Concretize gimple_cond_set_code
[official-gcc.git] / gcc / tree-ssa-dom.c
blob47e45da4386b1612da706b34ff1aaa10baa8b481
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "inchash.h"
33 #include "function.h"
34 #include "gimple-pretty-print.h"
35 #include "tree-ssa-alias.h"
36 #include "internal-fn.h"
37 #include "gimple-fold.h"
38 #include "tree-eh.h"
39 #include "gimple-expr.h"
40 #include "is-a.h"
41 #include "gimple.h"
42 #include "gimple-iterator.h"
43 #include "gimple-ssa.h"
44 #include "tree-cfg.h"
45 #include "tree-phinodes.h"
46 #include "ssa-iterators.h"
47 #include "stringpool.h"
48 #include "tree-ssanames.h"
49 #include "tree-into-ssa.h"
50 #include "domwalk.h"
51 #include "tree-pass.h"
52 #include "tree-ssa-propagate.h"
53 #include "tree-ssa-threadupdate.h"
54 #include "langhooks.h"
55 #include "params.h"
56 #include "tree-ssa-threadedge.h"
57 #include "tree-ssa-dom.h"
58 #include "inchash.h"
60 /* This file implements optimizations on the dominator tree. */
62 /* Representation of a "naked" right-hand-side expression, to be used
63 in recording available expressions in the expression hash table. */
65 enum expr_kind
67 EXPR_SINGLE,
68 EXPR_UNARY,
69 EXPR_BINARY,
70 EXPR_TERNARY,
71 EXPR_CALL,
72 EXPR_PHI
75 struct hashable_expr
77 tree type;
78 enum expr_kind kind;
79 union {
80 struct { tree rhs; } single;
81 struct { enum tree_code op; tree opnd; } unary;
82 struct { enum tree_code op; tree opnd0, opnd1; } binary;
83 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
84 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
85 struct { size_t nargs; tree *args; } phi;
86 } ops;
89 /* Structure for recording known values of a conditional expression
90 at the exits from its block. */
92 typedef struct cond_equivalence_s
94 struct hashable_expr cond;
95 tree value;
96 } cond_equivalence;
99 /* Structure for recording edge equivalences as well as any pending
100 edge redirections during the dominator optimizer.
102 Computing and storing the edge equivalences instead of creating
103 them on-demand can save significant amounts of time, particularly
104 for pathological cases involving switch statements.
106 These structures live for a single iteration of the dominator
107 optimizer in the edge's AUX field. At the end of an iteration we
108 free each of these structures and update the AUX field to point
109 to any requested redirection target (the code for updating the
110 CFG and SSA graph for edge redirection expects redirection edge
111 targets to be in the AUX field for each edge. */
113 struct edge_info
115 /* If this edge creates a simple equivalence, the LHS and RHS of
116 the equivalence will be stored here. */
117 tree lhs;
118 tree rhs;
120 /* Traversing an edge may also indicate one or more particular conditions
121 are true or false. */
122 vec<cond_equivalence> cond_equivalences;
125 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
126 expressions it enters into the hash table along with a marker entry
127 (null). When we finish processing the block, we pop off entries and
128 remove the expressions from the global hash table until we hit the
129 marker. */
130 typedef struct expr_hash_elt * expr_hash_elt_t;
132 static vec<expr_hash_elt_t> avail_exprs_stack;
134 /* Structure for entries in the expression hash table. */
136 struct expr_hash_elt
138 /* The value (lhs) of this expression. */
139 tree lhs;
141 /* The expression (rhs) we want to record. */
142 struct hashable_expr expr;
144 /* The stmt pointer if this element corresponds to a statement. */
145 gimple stmt;
147 /* The hash value for RHS. */
148 hashval_t hash;
150 /* A unique stamp, typically the address of the hash
151 element itself, used in removing entries from the table. */
152 struct expr_hash_elt *stamp;
155 /* Hashtable helpers. */
157 static bool hashable_expr_equal_p (const struct hashable_expr *,
158 const struct hashable_expr *);
159 static void free_expr_hash_elt (void *);
161 struct expr_elt_hasher
163 typedef expr_hash_elt *value_type;
164 typedef expr_hash_elt *compare_type;
165 typedef int store_values_directly;
166 static inline hashval_t hash (const value_type &);
167 static inline bool equal (const value_type &, const compare_type &);
168 static inline void remove (value_type &);
171 inline hashval_t
172 expr_elt_hasher::hash (const value_type &p)
174 return p->hash;
177 inline bool
178 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
180 gimple stmt1 = p1->stmt;
181 const struct hashable_expr *expr1 = &p1->expr;
182 const struct expr_hash_elt *stamp1 = p1->stamp;
183 gimple stmt2 = p2->stmt;
184 const struct hashable_expr *expr2 = &p2->expr;
185 const struct expr_hash_elt *stamp2 = p2->stamp;
187 /* This case should apply only when removing entries from the table. */
188 if (stamp1 == stamp2)
189 return true;
191 /* FIXME tuples:
192 We add stmts to a hash table and them modify them. To detect the case
193 that we modify a stmt and then search for it, we assume that the hash
194 is always modified by that change.
195 We have to fully check why this doesn't happen on trunk or rewrite
196 this in a more reliable (and easier to understand) way. */
197 if (((const struct expr_hash_elt *)p1)->hash
198 != ((const struct expr_hash_elt *)p2)->hash)
199 return false;
201 /* In case of a collision, both RHS have to be identical and have the
202 same VUSE operands. */
203 if (hashable_expr_equal_p (expr1, expr2)
204 && types_compatible_p (expr1->type, expr2->type))
206 /* Note that STMT1 and/or STMT2 may be NULL. */
207 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
208 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
211 return false;
214 /* Delete an expr_hash_elt and reclaim its storage. */
216 inline void
217 expr_elt_hasher::remove (value_type &element)
219 free_expr_hash_elt (element);
222 /* Hash table with expressions made available during the renaming process.
223 When an assignment of the form X_i = EXPR is found, the statement is
224 stored in this table. If the same expression EXPR is later found on the
225 RHS of another statement, it is replaced with X_i (thus performing
226 global redundancy elimination). Similarly as we pass through conditionals
227 we record the conditional itself as having either a true or false value
228 in this table. */
229 static hash_table<expr_elt_hasher> *avail_exprs;
231 /* Stack of dest,src pairs that need to be restored during finalization.
233 A NULL entry is used to mark the end of pairs which need to be
234 restored during finalization of this block. */
235 static vec<tree> const_and_copies_stack;
237 /* Track whether or not we have changed the control flow graph. */
238 static bool cfg_altered;
240 /* Bitmap of blocks that have had EH statements cleaned. We should
241 remove their dead edges eventually. */
242 static bitmap need_eh_cleanup;
244 /* Statistics for dominator optimizations. */
245 struct opt_stats_d
247 long num_stmts;
248 long num_exprs_considered;
249 long num_re;
250 long num_const_prop;
251 long num_copy_prop;
254 static struct opt_stats_d opt_stats;
256 /* Local functions. */
257 static void optimize_stmt (basic_block, gimple_stmt_iterator);
258 static tree lookup_avail_expr (gimple, bool);
259 static hashval_t avail_expr_hash (const void *);
260 static void htab_statistics (FILE *,
261 const hash_table<expr_elt_hasher> &);
262 static void record_cond (cond_equivalence *);
263 static void record_const_or_copy (tree, tree);
264 static void record_equality (tree, tree);
265 static void record_equivalences_from_phis (basic_block);
266 static void record_equivalences_from_incoming_edge (basic_block);
267 static void eliminate_redundant_computations (gimple_stmt_iterator *);
268 static void record_equivalences_from_stmt (gimple, int);
269 static void remove_local_expressions_from_table (void);
270 static void restore_vars_to_original_value (void);
271 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
274 /* Given a statement STMT, initialize the hash table element pointed to
275 by ELEMENT. */
277 static void
278 initialize_hash_element (gimple stmt, tree lhs,
279 struct expr_hash_elt *element)
281 enum gimple_code code = gimple_code (stmt);
282 struct hashable_expr *expr = &element->expr;
284 if (code == GIMPLE_ASSIGN)
286 enum tree_code subcode = gimple_assign_rhs_code (stmt);
288 switch (get_gimple_rhs_class (subcode))
290 case GIMPLE_SINGLE_RHS:
291 expr->kind = EXPR_SINGLE;
292 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
293 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
294 break;
295 case GIMPLE_UNARY_RHS:
296 expr->kind = EXPR_UNARY;
297 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
298 expr->ops.unary.op = subcode;
299 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
300 break;
301 case GIMPLE_BINARY_RHS:
302 expr->kind = EXPR_BINARY;
303 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
304 expr->ops.binary.op = subcode;
305 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
306 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
307 break;
308 case GIMPLE_TERNARY_RHS:
309 expr->kind = EXPR_TERNARY;
310 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
311 expr->ops.ternary.op = subcode;
312 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
313 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
314 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
315 break;
316 default:
317 gcc_unreachable ();
320 else if (code == GIMPLE_COND)
322 expr->type = boolean_type_node;
323 expr->kind = EXPR_BINARY;
324 expr->ops.binary.op = gimple_cond_code (stmt);
325 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
326 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
328 else if (code == GIMPLE_CALL)
330 size_t nargs = gimple_call_num_args (stmt);
331 size_t i;
333 gcc_assert (gimple_call_lhs (stmt));
335 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
336 expr->kind = EXPR_CALL;
337 expr->ops.call.fn_from = stmt;
339 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
340 expr->ops.call.pure = true;
341 else
342 expr->ops.call.pure = false;
344 expr->ops.call.nargs = nargs;
345 expr->ops.call.args = XCNEWVEC (tree, nargs);
346 for (i = 0; i < nargs; i++)
347 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
349 else if (gimple_switch swtch_stmt = dyn_cast <gimple_switch> (stmt))
351 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
352 expr->kind = EXPR_SINGLE;
353 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
355 else if (code == GIMPLE_GOTO)
357 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
358 expr->kind = EXPR_SINGLE;
359 expr->ops.single.rhs = gimple_goto_dest (stmt);
361 else if (code == GIMPLE_PHI)
363 size_t nargs = gimple_phi_num_args (stmt);
364 size_t i;
366 expr->type = TREE_TYPE (gimple_phi_result (stmt));
367 expr->kind = EXPR_PHI;
368 expr->ops.phi.nargs = nargs;
369 expr->ops.phi.args = XCNEWVEC (tree, nargs);
371 for (i = 0; i < nargs; i++)
372 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
374 else
375 gcc_unreachable ();
377 element->lhs = lhs;
378 element->stmt = stmt;
379 element->hash = avail_expr_hash (element);
380 element->stamp = element;
383 /* Given a conditional expression COND as a tree, initialize
384 a hashable_expr expression EXPR. The conditional must be a
385 comparison or logical negation. A constant or a variable is
386 not permitted. */
388 static void
389 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
391 expr->type = boolean_type_node;
393 if (COMPARISON_CLASS_P (cond))
395 expr->kind = EXPR_BINARY;
396 expr->ops.binary.op = TREE_CODE (cond);
397 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
398 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
400 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
402 expr->kind = EXPR_UNARY;
403 expr->ops.unary.op = TRUTH_NOT_EXPR;
404 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
406 else
407 gcc_unreachable ();
410 /* Given a hashable_expr expression EXPR and an LHS,
411 initialize the hash table element pointed to by ELEMENT. */
413 static void
414 initialize_hash_element_from_expr (struct hashable_expr *expr,
415 tree lhs,
416 struct expr_hash_elt *element)
418 element->expr = *expr;
419 element->lhs = lhs;
420 element->stmt = NULL;
421 element->hash = avail_expr_hash (element);
422 element->stamp = element;
425 /* Compare two hashable_expr structures for equivalence.
426 They are considered equivalent when the the expressions
427 they denote must necessarily be equal. The logic is intended
428 to follow that of operand_equal_p in fold-const.c */
430 static bool
431 hashable_expr_equal_p (const struct hashable_expr *expr0,
432 const struct hashable_expr *expr1)
434 tree type0 = expr0->type;
435 tree type1 = expr1->type;
437 /* If either type is NULL, there is nothing to check. */
438 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
439 return false;
441 /* If both types don't have the same signedness, precision, and mode,
442 then we can't consider them equal. */
443 if (type0 != type1
444 && (TREE_CODE (type0) == ERROR_MARK
445 || TREE_CODE (type1) == ERROR_MARK
446 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
447 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
448 || TYPE_MODE (type0) != TYPE_MODE (type1)))
449 return false;
451 if (expr0->kind != expr1->kind)
452 return false;
454 switch (expr0->kind)
456 case EXPR_SINGLE:
457 return operand_equal_p (expr0->ops.single.rhs,
458 expr1->ops.single.rhs, 0);
460 case EXPR_UNARY:
461 if (expr0->ops.unary.op != expr1->ops.unary.op)
462 return false;
464 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
465 || expr0->ops.unary.op == NON_LVALUE_EXPR)
466 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
467 return false;
469 return operand_equal_p (expr0->ops.unary.opnd,
470 expr1->ops.unary.opnd, 0);
472 case EXPR_BINARY:
473 if (expr0->ops.binary.op != expr1->ops.binary.op)
474 return false;
476 if (operand_equal_p (expr0->ops.binary.opnd0,
477 expr1->ops.binary.opnd0, 0)
478 && operand_equal_p (expr0->ops.binary.opnd1,
479 expr1->ops.binary.opnd1, 0))
480 return true;
482 /* For commutative ops, allow the other order. */
483 return (commutative_tree_code (expr0->ops.binary.op)
484 && operand_equal_p (expr0->ops.binary.opnd0,
485 expr1->ops.binary.opnd1, 0)
486 && operand_equal_p (expr0->ops.binary.opnd1,
487 expr1->ops.binary.opnd0, 0));
489 case EXPR_TERNARY:
490 if (expr0->ops.ternary.op != expr1->ops.ternary.op
491 || !operand_equal_p (expr0->ops.ternary.opnd2,
492 expr1->ops.ternary.opnd2, 0))
493 return false;
495 if (operand_equal_p (expr0->ops.ternary.opnd0,
496 expr1->ops.ternary.opnd0, 0)
497 && operand_equal_p (expr0->ops.ternary.opnd1,
498 expr1->ops.ternary.opnd1, 0))
499 return true;
501 /* For commutative ops, allow the other order. */
502 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
503 && operand_equal_p (expr0->ops.ternary.opnd0,
504 expr1->ops.ternary.opnd1, 0)
505 && operand_equal_p (expr0->ops.ternary.opnd1,
506 expr1->ops.ternary.opnd0, 0));
508 case EXPR_CALL:
510 size_t i;
512 /* If the calls are to different functions, then they
513 clearly cannot be equal. */
514 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
515 expr1->ops.call.fn_from))
516 return false;
518 if (! expr0->ops.call.pure)
519 return false;
521 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
522 return false;
524 for (i = 0; i < expr0->ops.call.nargs; i++)
525 if (! operand_equal_p (expr0->ops.call.args[i],
526 expr1->ops.call.args[i], 0))
527 return false;
529 if (stmt_could_throw_p (expr0->ops.call.fn_from))
531 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
532 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
533 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
534 return false;
537 return true;
540 case EXPR_PHI:
542 size_t i;
544 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
545 return false;
547 for (i = 0; i < expr0->ops.phi.nargs; i++)
548 if (! operand_equal_p (expr0->ops.phi.args[i],
549 expr1->ops.phi.args[i], 0))
550 return false;
552 return true;
555 default:
556 gcc_unreachable ();
560 /* Generate a hash value for a pair of expressions. This can be used
561 iteratively by passing a previous result in HSTATE.
563 The same hash value is always returned for a given pair of expressions,
564 regardless of the order in which they are presented. This is useful in
565 hashing the operands of commutative functions. */
567 namespace inchash
570 static void
571 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
573 hash one, two;
575 inchash::add_expr (t1, one);
576 inchash::add_expr (t2, two);
577 hstate.add_commutative (one, two);
580 /* Compute a hash value for a hashable_expr value EXPR and a
581 previously accumulated hash value VAL. If two hashable_expr
582 values compare equal with hashable_expr_equal_p, they must
583 hash to the same value, given an identical value of VAL.
584 The logic is intended to follow inchash::add_expr in tree.c. */
586 static void
587 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
589 switch (expr->kind)
591 case EXPR_SINGLE:
592 inchash::add_expr (expr->ops.single.rhs, hstate);
593 break;
595 case EXPR_UNARY:
596 hstate.add_object (expr->ops.unary.op);
598 /* Make sure to include signedness in the hash computation.
599 Don't hash the type, that can lead to having nodes which
600 compare equal according to operand_equal_p, but which
601 have different hash codes. */
602 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
603 || expr->ops.unary.op == NON_LVALUE_EXPR)
604 hstate.add_int (TYPE_UNSIGNED (expr->type));
606 inchash::add_expr (expr->ops.unary.opnd, hstate);
607 break;
609 case EXPR_BINARY:
610 hstate.add_object (expr->ops.binary.op);
611 if (commutative_tree_code (expr->ops.binary.op))
612 inchash::add_expr_commutative (expr->ops.binary.opnd0,
613 expr->ops.binary.opnd1, hstate);
614 else
616 inchash::add_expr (expr->ops.binary.opnd0, hstate);
617 inchash::add_expr (expr->ops.binary.opnd1, hstate);
619 break;
621 case EXPR_TERNARY:
622 hstate.add_object (expr->ops.ternary.op);
623 if (commutative_ternary_tree_code (expr->ops.ternary.op))
624 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
625 expr->ops.ternary.opnd1, hstate);
626 else
628 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
629 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
631 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
632 break;
634 case EXPR_CALL:
636 size_t i;
637 enum tree_code code = CALL_EXPR;
638 gimple fn_from;
640 hstate.add_object (code);
641 fn_from = expr->ops.call.fn_from;
642 if (gimple_call_internal_p (fn_from))
643 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
644 else
645 inchash::add_expr (gimple_call_fn (fn_from), hstate);
646 for (i = 0; i < expr->ops.call.nargs; i++)
647 inchash::add_expr (expr->ops.call.args[i], hstate);
649 break;
651 case EXPR_PHI:
653 size_t i;
655 for (i = 0; i < expr->ops.phi.nargs; i++)
656 inchash::add_expr (expr->ops.phi.args[i], hstate);
658 break;
660 default:
661 gcc_unreachable ();
667 /* Print a diagnostic dump of an expression hash table entry. */
669 static void
670 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
672 if (element->stmt)
673 fprintf (stream, "STMT ");
674 else
675 fprintf (stream, "COND ");
677 if (element->lhs)
679 print_generic_expr (stream, element->lhs, 0);
680 fprintf (stream, " = ");
683 switch (element->expr.kind)
685 case EXPR_SINGLE:
686 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
687 break;
689 case EXPR_UNARY:
690 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
691 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
692 break;
694 case EXPR_BINARY:
695 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
696 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
697 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
698 break;
700 case EXPR_TERNARY:
701 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
702 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
703 fputs (", ", stream);
704 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
705 fputs (", ", stream);
706 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
707 fputs (">", stream);
708 break;
710 case EXPR_CALL:
712 size_t i;
713 size_t nargs = element->expr.ops.call.nargs;
714 gimple fn_from;
716 fn_from = element->expr.ops.call.fn_from;
717 if (gimple_call_internal_p (fn_from))
718 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
719 stream);
720 else
721 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
722 fprintf (stream, " (");
723 for (i = 0; i < nargs; i++)
725 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
726 if (i + 1 < nargs)
727 fprintf (stream, ", ");
729 fprintf (stream, ")");
731 break;
733 case EXPR_PHI:
735 size_t i;
736 size_t nargs = element->expr.ops.phi.nargs;
738 fprintf (stream, "PHI <");
739 for (i = 0; i < nargs; i++)
741 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
742 if (i + 1 < nargs)
743 fprintf (stream, ", ");
745 fprintf (stream, ">");
747 break;
749 fprintf (stream, "\n");
751 if (element->stmt)
753 fprintf (stream, " ");
754 print_gimple_stmt (stream, element->stmt, 0, 0);
758 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
760 static void
761 free_expr_hash_elt_contents (struct expr_hash_elt *element)
763 if (element->expr.kind == EXPR_CALL)
764 free (element->expr.ops.call.args);
765 else if (element->expr.kind == EXPR_PHI)
766 free (element->expr.ops.phi.args);
769 /* Delete an expr_hash_elt and reclaim its storage. */
771 static void
772 free_expr_hash_elt (void *elt)
774 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
775 free_expr_hash_elt_contents (element);
776 free (element);
779 /* Allocate an EDGE_INFO for edge E and attach it to E.
780 Return the new EDGE_INFO structure. */
782 static struct edge_info *
783 allocate_edge_info (edge e)
785 struct edge_info *edge_info;
787 edge_info = XCNEW (struct edge_info);
789 e->aux = edge_info;
790 return edge_info;
793 /* Free all EDGE_INFO structures associated with edges in the CFG.
794 If a particular edge can be threaded, copy the redirection
795 target from the EDGE_INFO structure into the edge's AUX field
796 as required by code to update the CFG and SSA graph for
797 jump threading. */
799 static void
800 free_all_edge_infos (void)
802 basic_block bb;
803 edge_iterator ei;
804 edge e;
806 FOR_EACH_BB_FN (bb, cfun)
808 FOR_EACH_EDGE (e, ei, bb->preds)
810 struct edge_info *edge_info = (struct edge_info *) e->aux;
812 if (edge_info)
814 edge_info->cond_equivalences.release ();
815 free (edge_info);
816 e->aux = NULL;
822 class dom_opt_dom_walker : public dom_walker
824 public:
825 dom_opt_dom_walker (cdi_direction direction)
826 : dom_walker (direction), m_dummy_cond (NULL) {}
828 virtual void before_dom_children (basic_block);
829 virtual void after_dom_children (basic_block);
831 private:
832 void thread_across_edge (edge);
834 gimple_cond m_dummy_cond;
837 /* Jump threading, redundancy elimination and const/copy propagation.
839 This pass may expose new symbols that need to be renamed into SSA. For
840 every new symbol exposed, its corresponding bit will be set in
841 VARS_TO_RENAME. */
843 namespace {
845 const pass_data pass_data_dominator =
847 GIMPLE_PASS, /* type */
848 "dom", /* name */
849 OPTGROUP_NONE, /* optinfo_flags */
850 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
851 ( PROP_cfg | PROP_ssa ), /* properties_required */
852 0, /* properties_provided */
853 0, /* properties_destroyed */
854 0, /* todo_flags_start */
855 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
858 class pass_dominator : public gimple_opt_pass
860 public:
861 pass_dominator (gcc::context *ctxt)
862 : gimple_opt_pass (pass_data_dominator, ctxt)
865 /* opt_pass methods: */
866 opt_pass * clone () { return new pass_dominator (m_ctxt); }
867 virtual bool gate (function *) { return flag_tree_dom != 0; }
868 virtual unsigned int execute (function *);
870 }; // class pass_dominator
872 unsigned int
873 pass_dominator::execute (function *fun)
875 memset (&opt_stats, 0, sizeof (opt_stats));
877 /* Create our hash tables. */
878 avail_exprs = new hash_table<expr_elt_hasher> (1024);
879 avail_exprs_stack.create (20);
880 const_and_copies_stack.create (20);
881 need_eh_cleanup = BITMAP_ALLOC (NULL);
883 calculate_dominance_info (CDI_DOMINATORS);
884 cfg_altered = false;
886 /* We need to know loop structures in order to avoid destroying them
887 in jump threading. Note that we still can e.g. thread through loop
888 headers to an exit edge, or through loop header to the loop body, assuming
889 that we update the loop info.
891 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
892 to several overly conservative bail-outs in jump threading, case
893 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
894 missing. We should improve jump threading in future then
895 LOOPS_HAVE_PREHEADERS won't be needed here. */
896 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
898 /* Initialize the value-handle array. */
899 threadedge_initialize_values ();
901 /* We need accurate information regarding back edges in the CFG
902 for jump threading; this may include back edges that are not part of
903 a single loop. */
904 mark_dfs_back_edges ();
906 /* Recursively walk the dominator tree optimizing statements. */
907 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
910 gimple_stmt_iterator gsi;
911 basic_block bb;
912 FOR_EACH_BB_FN (bb, fun)
914 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
915 update_stmt_if_modified (gsi_stmt (gsi));
919 /* If we exposed any new variables, go ahead and put them into
920 SSA form now, before we handle jump threading. This simplifies
921 interactions between rewriting of _DECL nodes into SSA form
922 and rewriting SSA_NAME nodes into SSA form after block
923 duplication and CFG manipulation. */
924 update_ssa (TODO_update_ssa);
926 free_all_edge_infos ();
928 /* Thread jumps, creating duplicate blocks as needed. */
929 cfg_altered |= thread_through_all_blocks (first_pass_instance);
931 if (cfg_altered)
932 free_dominance_info (CDI_DOMINATORS);
934 /* Removal of statements may make some EH edges dead. Purge
935 such edges from the CFG as needed. */
936 if (!bitmap_empty_p (need_eh_cleanup))
938 unsigned i;
939 bitmap_iterator bi;
941 /* Jump threading may have created forwarder blocks from blocks
942 needing EH cleanup; the new successor of these blocks, which
943 has inherited from the original block, needs the cleanup.
944 Don't clear bits in the bitmap, as that can break the bitmap
945 iterator. */
946 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
948 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
949 if (bb == NULL)
950 continue;
951 while (single_succ_p (bb)
952 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
953 bb = single_succ (bb);
954 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
955 continue;
956 if ((unsigned) bb->index != i)
957 bitmap_set_bit (need_eh_cleanup, bb->index);
960 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
961 bitmap_clear (need_eh_cleanup);
964 statistics_counter_event (fun, "Redundant expressions eliminated",
965 opt_stats.num_re);
966 statistics_counter_event (fun, "Constants propagated",
967 opt_stats.num_const_prop);
968 statistics_counter_event (fun, "Copies propagated",
969 opt_stats.num_copy_prop);
971 /* Debugging dumps. */
972 if (dump_file && (dump_flags & TDF_STATS))
973 dump_dominator_optimization_stats (dump_file);
975 loop_optimizer_finalize ();
977 /* Delete our main hashtable. */
978 delete avail_exprs;
979 avail_exprs = NULL;
981 /* Free asserted bitmaps and stacks. */
982 BITMAP_FREE (need_eh_cleanup);
984 avail_exprs_stack.release ();
985 const_and_copies_stack.release ();
987 /* Free the value-handle array. */
988 threadedge_finalize_values ();
990 return 0;
993 } // anon namespace
995 gimple_opt_pass *
996 make_pass_dominator (gcc::context *ctxt)
998 return new pass_dominator (ctxt);
1002 /* Given a conditional statement CONDSTMT, convert the
1003 condition to a canonical form. */
1005 static void
1006 canonicalize_comparison (gimple_cond condstmt)
1008 tree op0;
1009 tree op1;
1010 enum tree_code code;
1012 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1014 op0 = gimple_cond_lhs (condstmt);
1015 op1 = gimple_cond_rhs (condstmt);
1017 code = gimple_cond_code (condstmt);
1019 /* If it would be profitable to swap the operands, then do so to
1020 canonicalize the statement, enabling better optimization.
1022 By placing canonicalization of such expressions here we
1023 transparently keep statements in canonical form, even
1024 when the statement is modified. */
1025 if (tree_swap_operands_p (op0, op1, false))
1027 /* For relationals we need to swap the operands
1028 and change the code. */
1029 if (code == LT_EXPR
1030 || code == GT_EXPR
1031 || code == LE_EXPR
1032 || code == GE_EXPR)
1034 code = swap_tree_comparison (code);
1036 gimple_cond_set_code (condstmt, code);
1037 gimple_cond_set_lhs (condstmt, op1);
1038 gimple_cond_set_rhs (condstmt, op0);
1040 update_stmt (condstmt);
1045 /* Initialize local stacks for this optimizer and record equivalences
1046 upon entry to BB. Equivalences can come from the edge traversed to
1047 reach BB or they may come from PHI nodes at the start of BB. */
1049 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1050 LIMIT entries left in LOCALs. */
1052 static void
1053 remove_local_expressions_from_table (void)
1055 /* Remove all the expressions made available in this block. */
1056 while (avail_exprs_stack.length () > 0)
1058 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1059 expr_hash_elt **slot;
1061 if (victim == NULL)
1062 break;
1064 /* This must precede the actual removal from the hash table,
1065 as ELEMENT and the table entry may share a call argument
1066 vector which will be freed during removal. */
1067 if (dump_file && (dump_flags & TDF_DETAILS))
1069 fprintf (dump_file, "<<<< ");
1070 print_expr_hash_elt (dump_file, victim);
1073 slot = avail_exprs->find_slot (victim, NO_INSERT);
1074 gcc_assert (slot && *slot == victim);
1075 avail_exprs->clear_slot (slot);
1079 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1080 CONST_AND_COPIES to its original state, stopping when we hit a
1081 NULL marker. */
1083 static void
1084 restore_vars_to_original_value (void)
1086 while (const_and_copies_stack.length () > 0)
1088 tree prev_value, dest;
1090 dest = const_and_copies_stack.pop ();
1092 if (dest == NULL)
1093 break;
1095 if (dump_file && (dump_flags & TDF_DETAILS))
1097 fprintf (dump_file, "<<<< COPY ");
1098 print_generic_expr (dump_file, dest, 0);
1099 fprintf (dump_file, " = ");
1100 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1101 fprintf (dump_file, "\n");
1104 prev_value = const_and_copies_stack.pop ();
1105 set_ssa_name_value (dest, prev_value);
1109 /* A trivial wrapper so that we can present the generic jump
1110 threading code with a simple API for simplifying statements. */
1111 static tree
1112 simplify_stmt_for_jump_threading (gimple stmt,
1113 gimple within_stmt ATTRIBUTE_UNUSED)
1115 return lookup_avail_expr (stmt, false);
1118 /* Record into the equivalence tables any equivalences implied by
1119 traversing edge E (which are cached in E->aux).
1121 Callers are responsible for managing the unwinding markers. */
1122 static void
1123 record_temporary_equivalences (edge e)
1125 int i;
1126 struct edge_info *edge_info = (struct edge_info *) e->aux;
1128 /* If we have info associated with this edge, record it into
1129 our equivalence tables. */
1130 if (edge_info)
1132 cond_equivalence *eq;
1133 tree lhs = edge_info->lhs;
1134 tree rhs = edge_info->rhs;
1136 /* If we have a simple NAME = VALUE equivalence, record it. */
1137 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1138 record_const_or_copy (lhs, rhs);
1140 /* If we have 0 = COND or 1 = COND equivalences, record them
1141 into our expression hash tables. */
1142 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1143 record_cond (eq);
1147 /* Wrapper for common code to attempt to thread an edge. For example,
1148 it handles lazily building the dummy condition and the bookkeeping
1149 when jump threading is successful. */
1151 void
1152 dom_opt_dom_walker::thread_across_edge (edge e)
1154 if (! m_dummy_cond)
1155 m_dummy_cond =
1156 gimple_build_cond (NE_EXPR,
1157 integer_zero_node, integer_zero_node,
1158 NULL, NULL);
1160 /* Push a marker on both stacks so we can unwind the tables back to their
1161 current state. */
1162 avail_exprs_stack.safe_push (NULL);
1163 const_and_copies_stack.safe_push (NULL_TREE);
1165 /* Traversing E may result in equivalences we can utilize. */
1166 record_temporary_equivalences (e);
1168 /* With all the edge equivalences in the tables, go ahead and attempt
1169 to thread through E->dest. */
1170 ::thread_across_edge (m_dummy_cond, e, false,
1171 &const_and_copies_stack,
1172 simplify_stmt_for_jump_threading);
1174 /* And restore the various tables to their state before
1175 we threaded this edge.
1177 XXX The code in tree-ssa-threadedge.c will restore the state of
1178 the const_and_copies table. We we just have to restore the expression
1179 table. */
1180 remove_local_expressions_from_table ();
1183 /* PHI nodes can create equivalences too.
1185 Ignoring any alternatives which are the same as the result, if
1186 all the alternatives are equal, then the PHI node creates an
1187 equivalence. */
1189 static void
1190 record_equivalences_from_phis (basic_block bb)
1192 gimple_phi_iterator gsi;
1194 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1196 gimple_phi phi = gsi.phi ();
1198 tree lhs = gimple_phi_result (phi);
1199 tree rhs = NULL;
1200 size_t i;
1202 for (i = 0; i < gimple_phi_num_args (phi); i++)
1204 tree t = gimple_phi_arg_def (phi, i);
1206 /* Ignore alternatives which are the same as our LHS. Since
1207 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1208 can simply compare pointers. */
1209 if (lhs == t)
1210 continue;
1212 /* If we have not processed an alternative yet, then set
1213 RHS to this alternative. */
1214 if (rhs == NULL)
1215 rhs = t;
1216 /* If we have processed an alternative (stored in RHS), then
1217 see if it is equal to this one. If it isn't, then stop
1218 the search. */
1219 else if (! operand_equal_for_phi_arg_p (rhs, t))
1220 break;
1223 /* If we had no interesting alternatives, then all the RHS alternatives
1224 must have been the same as LHS. */
1225 if (!rhs)
1226 rhs = lhs;
1228 /* If we managed to iterate through each PHI alternative without
1229 breaking out of the loop, then we have a PHI which may create
1230 a useful equivalence. We do not need to record unwind data for
1231 this, since this is a true assignment and not an equivalence
1232 inferred from a comparison. All uses of this ssa name are dominated
1233 by this assignment, so unwinding just costs time and space. */
1234 if (i == gimple_phi_num_args (phi)
1235 && may_propagate_copy (lhs, rhs))
1236 set_ssa_name_value (lhs, rhs);
1240 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1241 return that edge. Otherwise return NULL. */
1242 static edge
1243 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1245 edge retval = NULL;
1246 edge e;
1247 edge_iterator ei;
1249 FOR_EACH_EDGE (e, ei, bb->preds)
1251 /* A loop back edge can be identified by the destination of
1252 the edge dominating the source of the edge. */
1253 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1254 continue;
1256 /* If we have already seen a non-loop edge, then we must have
1257 multiple incoming non-loop edges and thus we return NULL. */
1258 if (retval)
1259 return NULL;
1261 /* This is the first non-loop incoming edge we have found. Record
1262 it. */
1263 retval = e;
1266 return retval;
1269 /* Record any equivalences created by the incoming edge to BB. If BB
1270 has more than one incoming edge, then no equivalence is created. */
1272 static void
1273 record_equivalences_from_incoming_edge (basic_block bb)
1275 edge e;
1276 basic_block parent;
1277 struct edge_info *edge_info;
1279 /* If our parent block ended with a control statement, then we may be
1280 able to record some equivalences based on which outgoing edge from
1281 the parent was followed. */
1282 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1284 e = single_incoming_edge_ignoring_loop_edges (bb);
1286 /* If we had a single incoming edge from our parent block, then enter
1287 any data associated with the edge into our tables. */
1288 if (e && e->src == parent)
1290 unsigned int i;
1292 edge_info = (struct edge_info *) e->aux;
1294 if (edge_info)
1296 tree lhs = edge_info->lhs;
1297 tree rhs = edge_info->rhs;
1298 cond_equivalence *eq;
1300 if (lhs)
1301 record_equality (lhs, rhs);
1303 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1304 set via a widening type conversion, then we may be able to record
1305 additional equivalences. */
1306 if (lhs
1307 && TREE_CODE (lhs) == SSA_NAME
1308 && is_gimple_constant (rhs)
1309 && TREE_CODE (rhs) == INTEGER_CST)
1311 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1313 if (defstmt
1314 && is_gimple_assign (defstmt)
1315 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1317 tree old_rhs = gimple_assign_rhs1 (defstmt);
1319 /* If the conversion widens the original value and
1320 the constant is in the range of the type of OLD_RHS,
1321 then convert the constant and record the equivalence.
1323 Note that int_fits_type_p does not check the precision
1324 if the upper and lower bounds are OK. */
1325 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1326 && (TYPE_PRECISION (TREE_TYPE (lhs))
1327 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1328 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1330 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1331 record_equality (old_rhs, newval);
1336 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1337 record_cond (eq);
1342 /* Dump SSA statistics on FILE. */
1344 void
1345 dump_dominator_optimization_stats (FILE *file)
1347 fprintf (file, "Total number of statements: %6ld\n\n",
1348 opt_stats.num_stmts);
1349 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1350 opt_stats.num_exprs_considered);
1352 fprintf (file, "\nHash table statistics:\n");
1354 fprintf (file, " avail_exprs: ");
1355 htab_statistics (file, *avail_exprs);
1359 /* Dump SSA statistics on stderr. */
1361 DEBUG_FUNCTION void
1362 debug_dominator_optimization_stats (void)
1364 dump_dominator_optimization_stats (stderr);
1368 /* Dump statistics for the hash table HTAB. */
1370 static void
1371 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1373 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1374 (long) htab.size (),
1375 (long) htab.elements (),
1376 htab.collisions ());
1380 /* Enter condition equivalence into the expression hash table.
1381 This indicates that a conditional expression has a known
1382 boolean value. */
1384 static void
1385 record_cond (cond_equivalence *p)
1387 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1388 expr_hash_elt **slot;
1390 initialize_hash_element_from_expr (&p->cond, p->value, element);
1392 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1393 if (*slot == NULL)
1395 *slot = element;
1397 if (dump_file && (dump_flags & TDF_DETAILS))
1399 fprintf (dump_file, "1>>> ");
1400 print_expr_hash_elt (dump_file, element);
1403 avail_exprs_stack.safe_push (element);
1405 else
1406 free_expr_hash_elt (element);
1409 /* Build a cond_equivalence record indicating that the comparison
1410 CODE holds between operands OP0 and OP1 and push it to **P. */
1412 static void
1413 build_and_record_new_cond (enum tree_code code,
1414 tree op0, tree op1,
1415 vec<cond_equivalence> *p)
1417 cond_equivalence c;
1418 struct hashable_expr *cond = &c.cond;
1420 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1422 cond->type = boolean_type_node;
1423 cond->kind = EXPR_BINARY;
1424 cond->ops.binary.op = code;
1425 cond->ops.binary.opnd0 = op0;
1426 cond->ops.binary.opnd1 = op1;
1428 c.value = boolean_true_node;
1429 p->safe_push (c);
1432 /* Record that COND is true and INVERTED is false into the edge information
1433 structure. Also record that any conditions dominated by COND are true
1434 as well.
1436 For example, if a < b is true, then a <= b must also be true. */
1438 static void
1439 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1441 tree op0, op1;
1442 cond_equivalence c;
1444 if (!COMPARISON_CLASS_P (cond))
1445 return;
1447 op0 = TREE_OPERAND (cond, 0);
1448 op1 = TREE_OPERAND (cond, 1);
1450 switch (TREE_CODE (cond))
1452 case LT_EXPR:
1453 case GT_EXPR:
1454 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1456 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1457 &edge_info->cond_equivalences);
1458 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1459 &edge_info->cond_equivalences);
1462 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1463 ? LE_EXPR : GE_EXPR),
1464 op0, op1, &edge_info->cond_equivalences);
1465 build_and_record_new_cond (NE_EXPR, op0, op1,
1466 &edge_info->cond_equivalences);
1467 break;
1469 case GE_EXPR:
1470 case LE_EXPR:
1471 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1473 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1474 &edge_info->cond_equivalences);
1476 break;
1478 case EQ_EXPR:
1479 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1481 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1482 &edge_info->cond_equivalences);
1484 build_and_record_new_cond (LE_EXPR, op0, op1,
1485 &edge_info->cond_equivalences);
1486 build_and_record_new_cond (GE_EXPR, op0, op1,
1487 &edge_info->cond_equivalences);
1488 break;
1490 case UNORDERED_EXPR:
1491 build_and_record_new_cond (NE_EXPR, op0, op1,
1492 &edge_info->cond_equivalences);
1493 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1494 &edge_info->cond_equivalences);
1495 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1496 &edge_info->cond_equivalences);
1497 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1498 &edge_info->cond_equivalences);
1499 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1500 &edge_info->cond_equivalences);
1501 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 break;
1505 case UNLT_EXPR:
1506 case UNGT_EXPR:
1507 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1508 ? UNLE_EXPR : UNGE_EXPR),
1509 op0, op1, &edge_info->cond_equivalences);
1510 build_and_record_new_cond (NE_EXPR, op0, op1,
1511 &edge_info->cond_equivalences);
1512 break;
1514 case UNEQ_EXPR:
1515 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1516 &edge_info->cond_equivalences);
1517 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1518 &edge_info->cond_equivalences);
1519 break;
1521 case LTGT_EXPR:
1522 build_and_record_new_cond (NE_EXPR, op0, op1,
1523 &edge_info->cond_equivalences);
1524 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1525 &edge_info->cond_equivalences);
1526 break;
1528 default:
1529 break;
1532 /* Now store the original true and false conditions into the first
1533 two slots. */
1534 initialize_expr_from_cond (cond, &c.cond);
1535 c.value = boolean_true_node;
1536 edge_info->cond_equivalences.safe_push (c);
1538 /* It is possible for INVERTED to be the negation of a comparison,
1539 and not a valid RHS or GIMPLE_COND condition. This happens because
1540 invert_truthvalue may return such an expression when asked to invert
1541 a floating-point comparison. These comparisons are not assumed to
1542 obey the trichotomy law. */
1543 initialize_expr_from_cond (inverted, &c.cond);
1544 c.value = boolean_false_node;
1545 edge_info->cond_equivalences.safe_push (c);
1548 /* A helper function for record_const_or_copy and record_equality.
1549 Do the work of recording the value and undo info. */
1551 static void
1552 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1554 set_ssa_name_value (x, y);
1556 if (dump_file && (dump_flags & TDF_DETAILS))
1558 fprintf (dump_file, "0>>> COPY ");
1559 print_generic_expr (dump_file, x, 0);
1560 fprintf (dump_file, " = ");
1561 print_generic_expr (dump_file, y, 0);
1562 fprintf (dump_file, "\n");
1565 const_and_copies_stack.reserve (2);
1566 const_and_copies_stack.quick_push (prev_x);
1567 const_and_copies_stack.quick_push (x);
1570 /* Record that X is equal to Y in const_and_copies. Record undo
1571 information in the block-local vector. */
1573 static void
1574 record_const_or_copy (tree x, tree y)
1576 tree prev_x = SSA_NAME_VALUE (x);
1578 gcc_assert (TREE_CODE (x) == SSA_NAME);
1580 if (TREE_CODE (y) == SSA_NAME)
1582 tree tmp = SSA_NAME_VALUE (y);
1583 if (tmp)
1584 y = tmp;
1587 record_const_or_copy_1 (x, y, prev_x);
1590 /* Return the loop depth of the basic block of the defining statement of X.
1591 This number should not be treated as absolutely correct because the loop
1592 information may not be completely up-to-date when dom runs. However, it
1593 will be relatively correct, and as more passes are taught to keep loop info
1594 up to date, the result will become more and more accurate. */
1596 static int
1597 loop_depth_of_name (tree x)
1599 gimple defstmt;
1600 basic_block defbb;
1602 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1603 if (TREE_CODE (x) != SSA_NAME)
1604 return 0;
1606 /* Otherwise return the loop depth of the defining statement's bb.
1607 Note that there may not actually be a bb for this statement, if the
1608 ssa_name is live on entry. */
1609 defstmt = SSA_NAME_DEF_STMT (x);
1610 defbb = gimple_bb (defstmt);
1611 if (!defbb)
1612 return 0;
1614 return bb_loop_depth (defbb);
1617 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1618 This constrains the cases in which we may treat this as assignment. */
1620 static void
1621 record_equality (tree x, tree y)
1623 tree prev_x = NULL, prev_y = NULL;
1625 if (TREE_CODE (x) == SSA_NAME)
1626 prev_x = SSA_NAME_VALUE (x);
1627 if (TREE_CODE (y) == SSA_NAME)
1628 prev_y = SSA_NAME_VALUE (y);
1630 /* If one of the previous values is invariant, or invariant in more loops
1631 (by depth), then use that.
1632 Otherwise it doesn't matter which value we choose, just so
1633 long as we canonicalize on one value. */
1634 if (is_gimple_min_invariant (y))
1636 else if (is_gimple_min_invariant (x)
1637 /* ??? When threading over backedges the following is important
1638 for correctness. See PR61757. */
1639 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1640 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1641 else if (prev_x && is_gimple_min_invariant (prev_x))
1642 x = y, y = prev_x, prev_x = prev_y;
1643 else if (prev_y)
1644 y = prev_y;
1646 /* After the swapping, we must have one SSA_NAME. */
1647 if (TREE_CODE (x) != SSA_NAME)
1648 return;
1650 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1651 variable compared against zero. If we're honoring signed zeros,
1652 then we cannot record this value unless we know that the value is
1653 nonzero. */
1654 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1655 && (TREE_CODE (y) != REAL_CST
1656 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1657 return;
1659 record_const_or_copy_1 (x, y, prev_x);
1662 /* Returns true when STMT is a simple iv increment. It detects the
1663 following situation:
1665 i_1 = phi (..., i_2)
1666 i_2 = i_1 +/- ... */
1668 bool
1669 simple_iv_increment_p (gimple stmt)
1671 enum tree_code code;
1672 tree lhs, preinc;
1673 gimple phi;
1674 size_t i;
1676 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1677 return false;
1679 lhs = gimple_assign_lhs (stmt);
1680 if (TREE_CODE (lhs) != SSA_NAME)
1681 return false;
1683 code = gimple_assign_rhs_code (stmt);
1684 if (code != PLUS_EXPR
1685 && code != MINUS_EXPR
1686 && code != POINTER_PLUS_EXPR)
1687 return false;
1689 preinc = gimple_assign_rhs1 (stmt);
1690 if (TREE_CODE (preinc) != SSA_NAME)
1691 return false;
1693 phi = SSA_NAME_DEF_STMT (preinc);
1694 if (gimple_code (phi) != GIMPLE_PHI)
1695 return false;
1697 for (i = 0; i < gimple_phi_num_args (phi); i++)
1698 if (gimple_phi_arg_def (phi, i) == lhs)
1699 return true;
1701 return false;
1704 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1705 known value for that SSA_NAME (or NULL if no value is known).
1707 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1708 successors of BB. */
1710 static void
1711 cprop_into_successor_phis (basic_block bb)
1713 edge e;
1714 edge_iterator ei;
1716 FOR_EACH_EDGE (e, ei, bb->succs)
1718 int indx;
1719 gimple_phi_iterator gsi;
1721 /* If this is an abnormal edge, then we do not want to copy propagate
1722 into the PHI alternative associated with this edge. */
1723 if (e->flags & EDGE_ABNORMAL)
1724 continue;
1726 gsi = gsi_start_phis (e->dest);
1727 if (gsi_end_p (gsi))
1728 continue;
1730 /* We may have an equivalence associated with this edge. While
1731 we can not propagate it into non-dominated blocks, we can
1732 propagate them into PHIs in non-dominated blocks. */
1734 /* Push the unwind marker so we can reset the const and copies
1735 table back to its original state after processing this edge. */
1736 const_and_copies_stack.safe_push (NULL_TREE);
1738 /* Extract and record any simple NAME = VALUE equivalences.
1740 Don't bother with [01] = COND equivalences, they're not useful
1741 here. */
1742 struct edge_info *edge_info = (struct edge_info *) e->aux;
1743 if (edge_info)
1745 tree lhs = edge_info->lhs;
1746 tree rhs = edge_info->rhs;
1748 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1749 record_const_or_copy (lhs, rhs);
1752 indx = e->dest_idx;
1753 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1755 tree new_val;
1756 use_operand_p orig_p;
1757 tree orig_val;
1758 gimple_phi phi = gsi.phi ();
1760 /* The alternative may be associated with a constant, so verify
1761 it is an SSA_NAME before doing anything with it. */
1762 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1763 orig_val = get_use_from_ptr (orig_p);
1764 if (TREE_CODE (orig_val) != SSA_NAME)
1765 continue;
1767 /* If we have *ORIG_P in our constant/copy table, then replace
1768 ORIG_P with its value in our constant/copy table. */
1769 new_val = SSA_NAME_VALUE (orig_val);
1770 if (new_val
1771 && new_val != orig_val
1772 && (TREE_CODE (new_val) == SSA_NAME
1773 || is_gimple_min_invariant (new_val))
1774 && may_propagate_copy (orig_val, new_val))
1775 propagate_value (orig_p, new_val);
1778 restore_vars_to_original_value ();
1782 /* We have finished optimizing BB, record any information implied by
1783 taking a specific outgoing edge from BB. */
1785 static void
1786 record_edge_info (basic_block bb)
1788 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1789 struct edge_info *edge_info;
1791 if (! gsi_end_p (gsi))
1793 gimple stmt = gsi_stmt (gsi);
1794 location_t loc = gimple_location (stmt);
1796 if (gimple_code (stmt) == GIMPLE_SWITCH)
1798 gimple_switch switch_stmt = as_a <gimple_switch> (stmt);
1799 tree index = gimple_switch_index (switch_stmt);
1801 if (TREE_CODE (index) == SSA_NAME)
1803 int i;
1804 int n_labels = gimple_switch_num_labels (switch_stmt);
1805 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1806 edge e;
1807 edge_iterator ei;
1809 for (i = 0; i < n_labels; i++)
1811 tree label = gimple_switch_label (switch_stmt, i);
1812 basic_block target_bb = label_to_block (CASE_LABEL (label));
1813 if (CASE_HIGH (label)
1814 || !CASE_LOW (label)
1815 || info[target_bb->index])
1816 info[target_bb->index] = error_mark_node;
1817 else
1818 info[target_bb->index] = label;
1821 FOR_EACH_EDGE (e, ei, bb->succs)
1823 basic_block target_bb = e->dest;
1824 tree label = info[target_bb->index];
1826 if (label != NULL && label != error_mark_node)
1828 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1829 CASE_LOW (label));
1830 edge_info = allocate_edge_info (e);
1831 edge_info->lhs = index;
1832 edge_info->rhs = x;
1835 free (info);
1839 /* A COND_EXPR may create equivalences too. */
1840 if (gimple_code (stmt) == GIMPLE_COND)
1842 edge true_edge;
1843 edge false_edge;
1845 tree op0 = gimple_cond_lhs (stmt);
1846 tree op1 = gimple_cond_rhs (stmt);
1847 enum tree_code code = gimple_cond_code (stmt);
1849 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1851 /* Special case comparing booleans against a constant as we
1852 know the value of OP0 on both arms of the branch. i.e., we
1853 can record an equivalence for OP0 rather than COND. */
1854 if ((code == EQ_EXPR || code == NE_EXPR)
1855 && TREE_CODE (op0) == SSA_NAME
1856 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1857 && is_gimple_min_invariant (op1))
1859 if (code == EQ_EXPR)
1861 edge_info = allocate_edge_info (true_edge);
1862 edge_info->lhs = op0;
1863 edge_info->rhs = (integer_zerop (op1)
1864 ? boolean_false_node
1865 : boolean_true_node);
1867 edge_info = allocate_edge_info (false_edge);
1868 edge_info->lhs = op0;
1869 edge_info->rhs = (integer_zerop (op1)
1870 ? boolean_true_node
1871 : boolean_false_node);
1873 else
1875 edge_info = allocate_edge_info (true_edge);
1876 edge_info->lhs = op0;
1877 edge_info->rhs = (integer_zerop (op1)
1878 ? boolean_true_node
1879 : boolean_false_node);
1881 edge_info = allocate_edge_info (false_edge);
1882 edge_info->lhs = op0;
1883 edge_info->rhs = (integer_zerop (op1)
1884 ? boolean_false_node
1885 : boolean_true_node);
1888 else if (is_gimple_min_invariant (op0)
1889 && (TREE_CODE (op1) == SSA_NAME
1890 || is_gimple_min_invariant (op1)))
1892 tree cond = build2 (code, boolean_type_node, op0, op1);
1893 tree inverted = invert_truthvalue_loc (loc, cond);
1894 bool can_infer_simple_equiv
1895 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1896 && real_zerop (op0));
1897 struct edge_info *edge_info;
1899 edge_info = allocate_edge_info (true_edge);
1900 record_conditions (edge_info, cond, inverted);
1902 if (can_infer_simple_equiv && code == EQ_EXPR)
1904 edge_info->lhs = op1;
1905 edge_info->rhs = op0;
1908 edge_info = allocate_edge_info (false_edge);
1909 record_conditions (edge_info, inverted, cond);
1911 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1913 edge_info->lhs = op1;
1914 edge_info->rhs = op0;
1918 else if (TREE_CODE (op0) == SSA_NAME
1919 && (TREE_CODE (op1) == SSA_NAME
1920 || is_gimple_min_invariant (op1)))
1922 tree cond = build2 (code, boolean_type_node, op0, op1);
1923 tree inverted = invert_truthvalue_loc (loc, cond);
1924 bool can_infer_simple_equiv
1925 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1926 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1927 struct edge_info *edge_info;
1929 edge_info = allocate_edge_info (true_edge);
1930 record_conditions (edge_info, cond, inverted);
1932 if (can_infer_simple_equiv && code == EQ_EXPR)
1934 edge_info->lhs = op0;
1935 edge_info->rhs = op1;
1938 edge_info = allocate_edge_info (false_edge);
1939 record_conditions (edge_info, inverted, cond);
1941 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1943 edge_info->lhs = op0;
1944 edge_info->rhs = op1;
1949 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1953 void
1954 dom_opt_dom_walker::before_dom_children (basic_block bb)
1956 gimple_stmt_iterator gsi;
1958 if (dump_file && (dump_flags & TDF_DETAILS))
1959 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1961 /* Push a marker on the stacks of local information so that we know how
1962 far to unwind when we finalize this block. */
1963 avail_exprs_stack.safe_push (NULL);
1964 const_and_copies_stack.safe_push (NULL_TREE);
1966 record_equivalences_from_incoming_edge (bb);
1968 /* PHI nodes can create equivalences too. */
1969 record_equivalences_from_phis (bb);
1971 /* Create equivalences from redundant PHIs. PHIs are only truly
1972 redundant when they exist in the same block, so push another
1973 marker and unwind right afterwards. */
1974 avail_exprs_stack.safe_push (NULL);
1975 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1976 eliminate_redundant_computations (&gsi);
1977 remove_local_expressions_from_table ();
1979 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1980 optimize_stmt (bb, gsi);
1982 /* Now prepare to process dominated blocks. */
1983 record_edge_info (bb);
1984 cprop_into_successor_phis (bb);
1987 /* We have finished processing the dominator children of BB, perform
1988 any finalization actions in preparation for leaving this node in
1989 the dominator tree. */
1991 void
1992 dom_opt_dom_walker::after_dom_children (basic_block bb)
1994 gimple last;
1996 /* If we have an outgoing edge to a block with multiple incoming and
1997 outgoing edges, then we may be able to thread the edge, i.e., we
1998 may be able to statically determine which of the outgoing edges
1999 will be traversed when the incoming edge from BB is traversed. */
2000 if (single_succ_p (bb)
2001 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2002 && potentially_threadable_block (single_succ (bb)))
2004 thread_across_edge (single_succ_edge (bb));
2006 else if ((last = last_stmt (bb))
2007 && gimple_code (last) == GIMPLE_COND
2008 && EDGE_COUNT (bb->succs) == 2
2009 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2010 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2012 edge true_edge, false_edge;
2014 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2016 /* Only try to thread the edge if it reaches a target block with
2017 more than one predecessor and more than one successor. */
2018 if (potentially_threadable_block (true_edge->dest))
2019 thread_across_edge (true_edge);
2021 /* Similarly for the ELSE arm. */
2022 if (potentially_threadable_block (false_edge->dest))
2023 thread_across_edge (false_edge);
2027 /* These remove expressions local to BB from the tables. */
2028 remove_local_expressions_from_table ();
2029 restore_vars_to_original_value ();
2032 /* Search for redundant computations in STMT. If any are found, then
2033 replace them with the variable holding the result of the computation.
2035 If safe, record this expression into the available expression hash
2036 table. */
2038 static void
2039 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2041 tree expr_type;
2042 tree cached_lhs;
2043 tree def;
2044 bool insert = true;
2045 bool assigns_var_p = false;
2047 gimple stmt = gsi_stmt (*gsi);
2049 if (gimple_code (stmt) == GIMPLE_PHI)
2050 def = gimple_phi_result (stmt);
2051 else
2052 def = gimple_get_lhs (stmt);
2054 /* Certain expressions on the RHS can be optimized away, but can not
2055 themselves be entered into the hash tables. */
2056 if (! def
2057 || TREE_CODE (def) != SSA_NAME
2058 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2059 || gimple_vdef (stmt)
2060 /* Do not record equivalences for increments of ivs. This would create
2061 overlapping live ranges for a very questionable gain. */
2062 || simple_iv_increment_p (stmt))
2063 insert = false;
2065 /* Check if the expression has been computed before. */
2066 cached_lhs = lookup_avail_expr (stmt, insert);
2068 opt_stats.num_exprs_considered++;
2070 /* Get the type of the expression we are trying to optimize. */
2071 if (is_gimple_assign (stmt))
2073 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2074 assigns_var_p = true;
2076 else if (gimple_code (stmt) == GIMPLE_COND)
2077 expr_type = boolean_type_node;
2078 else if (is_gimple_call (stmt))
2080 gcc_assert (gimple_call_lhs (stmt));
2081 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2082 assigns_var_p = true;
2084 else if (gimple_switch swtch_stmt = dyn_cast <gimple_switch> (stmt))
2085 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2086 else if (gimple_code (stmt) == GIMPLE_PHI)
2087 /* We can't propagate into a phi, so the logic below doesn't apply.
2088 Instead record an equivalence between the cached LHS and the
2089 PHI result of this statement, provided they are in the same block.
2090 This should be sufficient to kill the redundant phi. */
2092 if (def && cached_lhs)
2093 record_const_or_copy (def, cached_lhs);
2094 return;
2096 else
2097 gcc_unreachable ();
2099 if (!cached_lhs)
2100 return;
2102 /* It is safe to ignore types here since we have already done
2103 type checking in the hashing and equality routines. In fact
2104 type checking here merely gets in the way of constant
2105 propagation. Also, make sure that it is safe to propagate
2106 CACHED_LHS into the expression in STMT. */
2107 if ((TREE_CODE (cached_lhs) != SSA_NAME
2108 && (assigns_var_p
2109 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2110 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2112 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2113 || is_gimple_min_invariant (cached_lhs));
2115 if (dump_file && (dump_flags & TDF_DETAILS))
2117 fprintf (dump_file, " Replaced redundant expr '");
2118 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2119 fprintf (dump_file, "' with '");
2120 print_generic_expr (dump_file, cached_lhs, dump_flags);
2121 fprintf (dump_file, "'\n");
2124 opt_stats.num_re++;
2126 if (assigns_var_p
2127 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2128 cached_lhs = fold_convert (expr_type, cached_lhs);
2130 propagate_tree_value_into_stmt (gsi, cached_lhs);
2132 /* Since it is always necessary to mark the result as modified,
2133 perhaps we should move this into propagate_tree_value_into_stmt
2134 itself. */
2135 gimple_set_modified (gsi_stmt (*gsi), true);
2139 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2140 the available expressions table or the const_and_copies table.
2141 Detect and record those equivalences. */
2142 /* We handle only very simple copy equivalences here. The heavy
2143 lifing is done by eliminate_redundant_computations. */
2145 static void
2146 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2148 tree lhs;
2149 enum tree_code lhs_code;
2151 gcc_assert (is_gimple_assign (stmt));
2153 lhs = gimple_assign_lhs (stmt);
2154 lhs_code = TREE_CODE (lhs);
2156 if (lhs_code == SSA_NAME
2157 && gimple_assign_single_p (stmt))
2159 tree rhs = gimple_assign_rhs1 (stmt);
2161 /* If the RHS of the assignment is a constant or another variable that
2162 may be propagated, register it in the CONST_AND_COPIES table. We
2163 do not need to record unwind data for this, since this is a true
2164 assignment and not an equivalence inferred from a comparison. All
2165 uses of this ssa name are dominated by this assignment, so unwinding
2166 just costs time and space. */
2167 if (may_optimize_p
2168 && (TREE_CODE (rhs) == SSA_NAME
2169 || is_gimple_min_invariant (rhs)))
2171 if (dump_file && (dump_flags & TDF_DETAILS))
2173 fprintf (dump_file, "==== ASGN ");
2174 print_generic_expr (dump_file, lhs, 0);
2175 fprintf (dump_file, " = ");
2176 print_generic_expr (dump_file, rhs, 0);
2177 fprintf (dump_file, "\n");
2180 set_ssa_name_value (lhs, rhs);
2184 /* A memory store, even an aliased store, creates a useful
2185 equivalence. By exchanging the LHS and RHS, creating suitable
2186 vops and recording the result in the available expression table,
2187 we may be able to expose more redundant loads. */
2188 if (!gimple_has_volatile_ops (stmt)
2189 && gimple_references_memory_p (stmt)
2190 && gimple_assign_single_p (stmt)
2191 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2192 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2193 && !is_gimple_reg (lhs))
2195 tree rhs = gimple_assign_rhs1 (stmt);
2196 gimple_assign new_stmt;
2198 /* Build a new statement with the RHS and LHS exchanged. */
2199 if (TREE_CODE (rhs) == SSA_NAME)
2201 /* NOTE tuples. The call to gimple_build_assign below replaced
2202 a call to build_gimple_modify_stmt, which did not set the
2203 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2204 may cause an SSA validation failure, as the LHS may be a
2205 default-initialized name and should have no definition. I'm
2206 a bit dubious of this, as the artificial statement that we
2207 generate here may in fact be ill-formed, but it is simply
2208 used as an internal device in this pass, and never becomes
2209 part of the CFG. */
2210 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2211 new_stmt = gimple_build_assign (rhs, lhs);
2212 SSA_NAME_DEF_STMT (rhs) = defstmt;
2214 else
2215 new_stmt = gimple_build_assign (rhs, lhs);
2217 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2219 /* Finally enter the statement into the available expression
2220 table. */
2221 lookup_avail_expr (new_stmt, true);
2225 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2226 CONST_AND_COPIES. */
2228 static void
2229 cprop_operand (gimple stmt, use_operand_p op_p)
2231 tree val;
2232 tree op = USE_FROM_PTR (op_p);
2234 /* If the operand has a known constant value or it is known to be a
2235 copy of some other variable, use the value or copy stored in
2236 CONST_AND_COPIES. */
2237 val = SSA_NAME_VALUE (op);
2238 if (val && val != op)
2240 /* Do not replace hard register operands in asm statements. */
2241 if (gimple_code (stmt) == GIMPLE_ASM
2242 && !may_propagate_copy_into_asm (op))
2243 return;
2245 /* Certain operands are not allowed to be copy propagated due
2246 to their interaction with exception handling and some GCC
2247 extensions. */
2248 if (!may_propagate_copy (op, val))
2249 return;
2251 /* Do not propagate copies into simple IV increment statements.
2252 See PR23821 for how this can disturb IV analysis. */
2253 if (TREE_CODE (val) != INTEGER_CST
2254 && simple_iv_increment_p (stmt))
2255 return;
2257 /* Dump details. */
2258 if (dump_file && (dump_flags & TDF_DETAILS))
2260 fprintf (dump_file, " Replaced '");
2261 print_generic_expr (dump_file, op, dump_flags);
2262 fprintf (dump_file, "' with %s '",
2263 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2264 print_generic_expr (dump_file, val, dump_flags);
2265 fprintf (dump_file, "'\n");
2268 if (TREE_CODE (val) != SSA_NAME)
2269 opt_stats.num_const_prop++;
2270 else
2271 opt_stats.num_copy_prop++;
2273 propagate_value (op_p, val);
2275 /* And note that we modified this statement. This is now
2276 safe, even if we changed virtual operands since we will
2277 rescan the statement and rewrite its operands again. */
2278 gimple_set_modified (stmt, true);
2282 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2283 known value for that SSA_NAME (or NULL if no value is known).
2285 Propagate values from CONST_AND_COPIES into the uses, vuses and
2286 vdef_ops of STMT. */
2288 static void
2289 cprop_into_stmt (gimple stmt)
2291 use_operand_p op_p;
2292 ssa_op_iter iter;
2294 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2295 cprop_operand (stmt, op_p);
2298 /* Optimize the statement pointed to by iterator SI.
2300 We try to perform some simplistic global redundancy elimination and
2301 constant propagation:
2303 1- To detect global redundancy, we keep track of expressions that have
2304 been computed in this block and its dominators. If we find that the
2305 same expression is computed more than once, we eliminate repeated
2306 computations by using the target of the first one.
2308 2- Constant values and copy assignments. This is used to do very
2309 simplistic constant and copy propagation. When a constant or copy
2310 assignment is found, we map the value on the RHS of the assignment to
2311 the variable in the LHS in the CONST_AND_COPIES table. */
2313 static void
2314 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2316 gimple stmt, old_stmt;
2317 bool may_optimize_p;
2318 bool modified_p = false;
2320 old_stmt = stmt = gsi_stmt (si);
2322 if (dump_file && (dump_flags & TDF_DETAILS))
2324 fprintf (dump_file, "Optimizing statement ");
2325 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2328 if (gimple_code (stmt) == GIMPLE_COND)
2329 canonicalize_comparison (as_a <gimple_cond> (stmt));
2331 update_stmt_if_modified (stmt);
2332 opt_stats.num_stmts++;
2334 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2335 cprop_into_stmt (stmt);
2337 /* If the statement has been modified with constant replacements,
2338 fold its RHS before checking for redundant computations. */
2339 if (gimple_modified_p (stmt))
2341 tree rhs = NULL;
2343 /* Try to fold the statement making sure that STMT is kept
2344 up to date. */
2345 if (fold_stmt (&si))
2347 stmt = gsi_stmt (si);
2348 gimple_set_modified (stmt, true);
2350 if (dump_file && (dump_flags & TDF_DETAILS))
2352 fprintf (dump_file, " Folded to: ");
2353 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2357 /* We only need to consider cases that can yield a gimple operand. */
2358 if (gimple_assign_single_p (stmt))
2359 rhs = gimple_assign_rhs1 (stmt);
2360 else if (gimple_code (stmt) == GIMPLE_GOTO)
2361 rhs = gimple_goto_dest (stmt);
2362 else if (gimple_switch swtch_stmt = dyn_cast <gimple_switch> (stmt))
2363 /* This should never be an ADDR_EXPR. */
2364 rhs = gimple_switch_index (swtch_stmt);
2366 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2367 recompute_tree_invariant_for_addr_expr (rhs);
2369 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2370 even if fold_stmt updated the stmt already and thus cleared
2371 gimple_modified_p flag on it. */
2372 modified_p = true;
2375 /* Check for redundant computations. Do this optimization only
2376 for assignments that have no volatile ops and conditionals. */
2377 may_optimize_p = (!gimple_has_side_effects (stmt)
2378 && (is_gimple_assign (stmt)
2379 || (is_gimple_call (stmt)
2380 && gimple_call_lhs (stmt) != NULL_TREE)
2381 || gimple_code (stmt) == GIMPLE_COND
2382 || gimple_code (stmt) == GIMPLE_SWITCH));
2384 if (may_optimize_p)
2386 if (gimple_code (stmt) == GIMPLE_CALL)
2388 /* Resolve __builtin_constant_p. If it hasn't been
2389 folded to integer_one_node by now, it's fairly
2390 certain that the value simply isn't constant. */
2391 tree callee = gimple_call_fndecl (stmt);
2392 if (callee
2393 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2394 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2396 propagate_tree_value_into_stmt (&si, integer_zero_node);
2397 stmt = gsi_stmt (si);
2401 update_stmt_if_modified (stmt);
2402 eliminate_redundant_computations (&si);
2403 stmt = gsi_stmt (si);
2405 /* Perform simple redundant store elimination. */
2406 if (gimple_assign_single_p (stmt)
2407 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2409 tree lhs = gimple_assign_lhs (stmt);
2410 tree rhs = gimple_assign_rhs1 (stmt);
2411 tree cached_lhs;
2412 gimple_assign new_stmt;
2413 if (TREE_CODE (rhs) == SSA_NAME)
2415 tree tem = SSA_NAME_VALUE (rhs);
2416 if (tem)
2417 rhs = tem;
2419 /* Build a new statement with the RHS and LHS exchanged. */
2420 if (TREE_CODE (rhs) == SSA_NAME)
2422 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2423 new_stmt = gimple_build_assign (rhs, lhs);
2424 SSA_NAME_DEF_STMT (rhs) = defstmt;
2426 else
2427 new_stmt = gimple_build_assign (rhs, lhs);
2428 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2429 cached_lhs = lookup_avail_expr (new_stmt, false);
2430 if (cached_lhs
2431 && rhs == cached_lhs)
2433 basic_block bb = gimple_bb (stmt);
2434 unlink_stmt_vdef (stmt);
2435 if (gsi_remove (&si, true))
2437 bitmap_set_bit (need_eh_cleanup, bb->index);
2438 if (dump_file && (dump_flags & TDF_DETAILS))
2439 fprintf (dump_file, " Flagged to clear EH edges.\n");
2441 release_defs (stmt);
2442 return;
2447 /* Record any additional equivalences created by this statement. */
2448 if (is_gimple_assign (stmt))
2449 record_equivalences_from_stmt (stmt, may_optimize_p);
2451 /* If STMT is a COND_EXPR and it was modified, then we may know
2452 where it goes. If that is the case, then mark the CFG as altered.
2454 This will cause us to later call remove_unreachable_blocks and
2455 cleanup_tree_cfg when it is safe to do so. It is not safe to
2456 clean things up here since removal of edges and such can trigger
2457 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2458 the manager.
2460 That's all fine and good, except that once SSA_NAMEs are released
2461 to the manager, we must not call create_ssa_name until all references
2462 to released SSA_NAMEs have been eliminated.
2464 All references to the deleted SSA_NAMEs can not be eliminated until
2465 we remove unreachable blocks.
2467 We can not remove unreachable blocks until after we have completed
2468 any queued jump threading.
2470 We can not complete any queued jump threads until we have taken
2471 appropriate variables out of SSA form. Taking variables out of
2472 SSA form can call create_ssa_name and thus we lose.
2474 Ultimately I suspect we're going to need to change the interface
2475 into the SSA_NAME manager. */
2476 if (gimple_modified_p (stmt) || modified_p)
2478 tree val = NULL;
2480 update_stmt_if_modified (stmt);
2482 if (gimple_code (stmt) == GIMPLE_COND)
2483 val = fold_binary_loc (gimple_location (stmt),
2484 gimple_cond_code (stmt), boolean_type_node,
2485 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2486 else if (gimple_switch swtch_stmt = dyn_cast <gimple_switch> (stmt))
2487 val = gimple_switch_index (swtch_stmt);
2489 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2490 cfg_altered = true;
2492 /* If we simplified a statement in such a way as to be shown that it
2493 cannot trap, update the eh information and the cfg to match. */
2494 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2496 bitmap_set_bit (need_eh_cleanup, bb->index);
2497 if (dump_file && (dump_flags & TDF_DETAILS))
2498 fprintf (dump_file, " Flagged to clear EH edges.\n");
2503 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2504 If found, return its LHS. Otherwise insert STMT in the table and
2505 return NULL_TREE.
2507 Also, when an expression is first inserted in the table, it is also
2508 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2509 we finish processing this block and its children. */
2511 static tree
2512 lookup_avail_expr (gimple stmt, bool insert)
2514 expr_hash_elt **slot;
2515 tree lhs;
2516 tree temp;
2517 struct expr_hash_elt element;
2519 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2520 if (gimple_code (stmt) == GIMPLE_PHI)
2521 lhs = gimple_phi_result (stmt);
2522 else
2523 lhs = gimple_get_lhs (stmt);
2525 initialize_hash_element (stmt, lhs, &element);
2527 if (dump_file && (dump_flags & TDF_DETAILS))
2529 fprintf (dump_file, "LKUP ");
2530 print_expr_hash_elt (dump_file, &element);
2533 /* Don't bother remembering constant assignments and copy operations.
2534 Constants and copy operations are handled by the constant/copy propagator
2535 in optimize_stmt. */
2536 if (element.expr.kind == EXPR_SINGLE
2537 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2538 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2539 return NULL_TREE;
2541 /* Finally try to find the expression in the main expression hash table. */
2542 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2543 if (slot == NULL)
2545 free_expr_hash_elt_contents (&element);
2546 return NULL_TREE;
2548 else if (*slot == NULL)
2550 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2551 *element2 = element;
2552 element2->stamp = element2;
2553 *slot = element2;
2555 if (dump_file && (dump_flags & TDF_DETAILS))
2557 fprintf (dump_file, "2>>> ");
2558 print_expr_hash_elt (dump_file, element2);
2561 avail_exprs_stack.safe_push (element2);
2562 return NULL_TREE;
2564 else
2565 free_expr_hash_elt_contents (&element);
2567 /* Extract the LHS of the assignment so that it can be used as the current
2568 definition of another variable. */
2569 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2571 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2572 use the value from the const_and_copies table. */
2573 if (TREE_CODE (lhs) == SSA_NAME)
2575 temp = SSA_NAME_VALUE (lhs);
2576 if (temp)
2577 lhs = temp;
2580 if (dump_file && (dump_flags & TDF_DETAILS))
2582 fprintf (dump_file, "FIND: ");
2583 print_generic_expr (dump_file, lhs, 0);
2584 fprintf (dump_file, "\n");
2587 return lhs;
2590 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2591 for expressions using the code of the expression and the SSA numbers of
2592 its operands. */
2594 static hashval_t
2595 avail_expr_hash (const void *p)
2597 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2598 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2599 tree vuse;
2600 inchash::hash hstate;
2602 inchash::add_hashable_expr (expr, hstate);
2604 /* If the hash table entry is not associated with a statement, then we
2605 can just hash the expression and not worry about virtual operands
2606 and such. */
2607 if (!stmt)
2608 return hstate.end ();
2610 /* Add the SSA version numbers of the vuse operand. This is important
2611 because compound variables like arrays are not renamed in the
2612 operands. Rather, the rename is done on the virtual variable
2613 representing all the elements of the array. */
2614 if ((vuse = gimple_vuse (stmt)))
2615 inchash::add_expr (vuse, hstate);
2617 return hstate.end ();
2620 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2621 up degenerate PHIs created by or exposed by jump threading. */
2623 /* Given a statement STMT, which is either a PHI node or an assignment,
2624 remove it from the IL. */
2626 static void
2627 remove_stmt_or_phi (gimple stmt)
2629 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2631 if (gimple_code (stmt) == GIMPLE_PHI)
2632 remove_phi_node (&gsi, true);
2633 else
2635 gsi_remove (&gsi, true);
2636 release_defs (stmt);
2640 /* Given a statement STMT, which is either a PHI node or an assignment,
2641 return the "rhs" of the node, in the case of a non-degenerate
2642 phi, NULL is returned. */
2644 static tree
2645 get_rhs_or_phi_arg (gimple stmt)
2647 if (gimple_code (stmt) == GIMPLE_PHI)
2648 return degenerate_phi_result (stmt);
2649 else if (gimple_assign_single_p (stmt))
2650 return gimple_assign_rhs1 (stmt);
2651 else
2652 gcc_unreachable ();
2656 /* Given a statement STMT, which is either a PHI node or an assignment,
2657 return the "lhs" of the node. */
2659 static tree
2660 get_lhs_or_phi_result (gimple stmt)
2662 if (gimple_code (stmt) == GIMPLE_PHI)
2663 return gimple_phi_result (stmt);
2664 else if (is_gimple_assign (stmt))
2665 return gimple_assign_lhs (stmt);
2666 else
2667 gcc_unreachable ();
2670 /* Propagate RHS into all uses of LHS (when possible).
2672 RHS and LHS are derived from STMT, which is passed in solely so
2673 that we can remove it if propagation is successful.
2675 When propagating into a PHI node or into a statement which turns
2676 into a trivial copy or constant initialization, set the
2677 appropriate bit in INTERESTING_NAMEs so that we will visit those
2678 nodes as well in an effort to pick up secondary optimization
2679 opportunities. */
2681 static void
2682 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2684 /* First verify that propagation is valid. */
2685 if (may_propagate_copy (lhs, rhs))
2687 use_operand_p use_p;
2688 imm_use_iterator iter;
2689 gimple use_stmt;
2690 bool all = true;
2692 /* Dump details. */
2693 if (dump_file && (dump_flags & TDF_DETAILS))
2695 fprintf (dump_file, " Replacing '");
2696 print_generic_expr (dump_file, lhs, dump_flags);
2697 fprintf (dump_file, "' with %s '",
2698 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2699 print_generic_expr (dump_file, rhs, dump_flags);
2700 fprintf (dump_file, "'\n");
2703 /* Walk over every use of LHS and try to replace the use with RHS.
2704 At this point the only reason why such a propagation would not
2705 be successful would be if the use occurs in an ASM_EXPR. */
2706 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2708 /* Leave debug stmts alone. If we succeed in propagating
2709 all non-debug uses, we'll drop the DEF, and propagation
2710 into debug stmts will occur then. */
2711 if (gimple_debug_bind_p (use_stmt))
2712 continue;
2714 /* It's not always safe to propagate into an ASM_EXPR. */
2715 if (gimple_code (use_stmt) == GIMPLE_ASM
2716 && ! may_propagate_copy_into_asm (lhs))
2718 all = false;
2719 continue;
2722 /* It's not ok to propagate into the definition stmt of RHS.
2723 <bb 9>:
2724 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2725 g_67.1_6 = prephitmp.12_36;
2726 goto <bb 9>;
2727 While this is strictly all dead code we do not want to
2728 deal with this here. */
2729 if (TREE_CODE (rhs) == SSA_NAME
2730 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2732 all = false;
2733 continue;
2736 /* Dump details. */
2737 if (dump_file && (dump_flags & TDF_DETAILS))
2739 fprintf (dump_file, " Original statement:");
2740 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2743 /* Propagate the RHS into this use of the LHS. */
2744 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2745 propagate_value (use_p, rhs);
2747 /* Special cases to avoid useless calls into the folding
2748 routines, operand scanning, etc.
2750 Propagation into a PHI may cause the PHI to become
2751 a degenerate, so mark the PHI as interesting. No other
2752 actions are necessary. */
2753 if (gimple_code (use_stmt) == GIMPLE_PHI)
2755 tree result;
2757 /* Dump details. */
2758 if (dump_file && (dump_flags & TDF_DETAILS))
2760 fprintf (dump_file, " Updated statement:");
2761 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2764 result = get_lhs_or_phi_result (use_stmt);
2765 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2766 continue;
2769 /* From this point onward we are propagating into a
2770 real statement. Folding may (or may not) be possible,
2771 we may expose new operands, expose dead EH edges,
2772 etc. */
2773 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2774 cannot fold a call that simplifies to a constant,
2775 because the GIMPLE_CALL must be replaced by a
2776 GIMPLE_ASSIGN, and there is no way to effect such a
2777 transformation in-place. We might want to consider
2778 using the more general fold_stmt here. */
2780 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2781 fold_stmt_inplace (&gsi);
2784 /* Sometimes propagation can expose new operands to the
2785 renamer. */
2786 update_stmt (use_stmt);
2788 /* Dump details. */
2789 if (dump_file && (dump_flags & TDF_DETAILS))
2791 fprintf (dump_file, " Updated statement:");
2792 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2795 /* If we replaced a variable index with a constant, then
2796 we would need to update the invariant flag for ADDR_EXPRs. */
2797 if (gimple_assign_single_p (use_stmt)
2798 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2799 recompute_tree_invariant_for_addr_expr
2800 (gimple_assign_rhs1 (use_stmt));
2802 /* If we cleaned up EH information from the statement,
2803 mark its containing block as needing EH cleanups. */
2804 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2806 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2807 if (dump_file && (dump_flags & TDF_DETAILS))
2808 fprintf (dump_file, " Flagged to clear EH edges.\n");
2811 /* Propagation may expose new trivial copy/constant propagation
2812 opportunities. */
2813 if (gimple_assign_single_p (use_stmt)
2814 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2815 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2816 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2818 tree result = get_lhs_or_phi_result (use_stmt);
2819 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2822 /* Propagation into these nodes may make certain edges in
2823 the CFG unexecutable. We want to identify them as PHI nodes
2824 at the destination of those unexecutable edges may become
2825 degenerates. */
2826 else if (gimple_code (use_stmt) == GIMPLE_COND
2827 || gimple_code (use_stmt) == GIMPLE_SWITCH
2828 || gimple_code (use_stmt) == GIMPLE_GOTO)
2830 tree val;
2832 if (gimple_code (use_stmt) == GIMPLE_COND)
2833 val = fold_binary_loc (gimple_location (use_stmt),
2834 gimple_cond_code (use_stmt),
2835 boolean_type_node,
2836 gimple_cond_lhs (use_stmt),
2837 gimple_cond_rhs (use_stmt));
2838 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2839 val = gimple_switch_index (as_a <gimple_switch> (use_stmt));
2840 else
2841 val = gimple_goto_dest (use_stmt);
2843 if (val && is_gimple_min_invariant (val))
2845 basic_block bb = gimple_bb (use_stmt);
2846 edge te = find_taken_edge (bb, val);
2847 edge_iterator ei;
2848 edge e;
2849 gimple_stmt_iterator gsi;
2850 gimple_phi_iterator psi;
2852 /* Remove all outgoing edges except TE. */
2853 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2855 if (e != te)
2857 /* Mark all the PHI nodes at the destination of
2858 the unexecutable edge as interesting. */
2859 for (psi = gsi_start_phis (e->dest);
2860 !gsi_end_p (psi);
2861 gsi_next (&psi))
2863 gimple_phi phi = psi.phi ();
2865 tree result = gimple_phi_result (phi);
2866 int version = SSA_NAME_VERSION (result);
2868 bitmap_set_bit (interesting_names, version);
2871 te->probability += e->probability;
2873 te->count += e->count;
2874 remove_edge (e);
2875 cfg_altered = true;
2877 else
2878 ei_next (&ei);
2881 gsi = gsi_last_bb (gimple_bb (use_stmt));
2882 gsi_remove (&gsi, true);
2884 /* And fixup the flags on the single remaining edge. */
2885 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2886 te->flags &= ~EDGE_ABNORMAL;
2887 te->flags |= EDGE_FALLTHRU;
2888 if (te->probability > REG_BR_PROB_BASE)
2889 te->probability = REG_BR_PROB_BASE;
2894 /* Ensure there is nothing else to do. */
2895 gcc_assert (!all || has_zero_uses (lhs));
2897 /* If we were able to propagate away all uses of LHS, then
2898 we can remove STMT. */
2899 if (all)
2900 remove_stmt_or_phi (stmt);
2904 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2905 a statement that is a trivial copy or constant initialization.
2907 Attempt to eliminate T by propagating its RHS into all uses of
2908 its LHS. This may in turn set new bits in INTERESTING_NAMES
2909 for nodes we want to revisit later.
2911 All exit paths should clear INTERESTING_NAMES for the result
2912 of STMT. */
2914 static void
2915 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2917 tree lhs = get_lhs_or_phi_result (stmt);
2918 tree rhs;
2919 int version = SSA_NAME_VERSION (lhs);
2921 /* If the LHS of this statement or PHI has no uses, then we can
2922 just eliminate it. This can occur if, for example, the PHI
2923 was created by block duplication due to threading and its only
2924 use was in the conditional at the end of the block which was
2925 deleted. */
2926 if (has_zero_uses (lhs))
2928 bitmap_clear_bit (interesting_names, version);
2929 remove_stmt_or_phi (stmt);
2930 return;
2933 /* Get the RHS of the assignment or PHI node if the PHI is a
2934 degenerate. */
2935 rhs = get_rhs_or_phi_arg (stmt);
2936 if (!rhs)
2938 bitmap_clear_bit (interesting_names, version);
2939 return;
2942 if (!virtual_operand_p (lhs))
2943 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2944 else
2946 gimple use_stmt;
2947 imm_use_iterator iter;
2948 use_operand_p use_p;
2949 /* For virtual operands we have to propagate into all uses as
2950 otherwise we will create overlapping life-ranges. */
2951 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2952 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2953 SET_USE (use_p, rhs);
2954 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2955 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2956 remove_stmt_or_phi (stmt);
2959 /* Note that STMT may well have been deleted by now, so do
2960 not access it, instead use the saved version # to clear
2961 T's entry in the worklist. */
2962 bitmap_clear_bit (interesting_names, version);
2965 /* The first phase in degenerate PHI elimination.
2967 Eliminate the degenerate PHIs in BB, then recurse on the
2968 dominator children of BB. */
2970 static void
2971 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2973 gimple_phi_iterator gsi;
2974 basic_block son;
2976 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2978 gimple_phi phi = gsi.phi ();
2980 eliminate_const_or_copy (phi, interesting_names);
2983 /* Recurse into the dominator children of BB. */
2984 for (son = first_dom_son (CDI_DOMINATORS, bb);
2985 son;
2986 son = next_dom_son (CDI_DOMINATORS, son))
2987 eliminate_degenerate_phis_1 (son, interesting_names);
2991 /* A very simple pass to eliminate degenerate PHI nodes from the
2992 IL. This is meant to be fast enough to be able to be run several
2993 times in the optimization pipeline.
2995 Certain optimizations, particularly those which duplicate blocks
2996 or remove edges from the CFG can create or expose PHIs which are
2997 trivial copies or constant initializations.
2999 While we could pick up these optimizations in DOM or with the
3000 combination of copy-prop and CCP, those solutions are far too
3001 heavy-weight for our needs.
3003 This implementation has two phases so that we can efficiently
3004 eliminate the first order degenerate PHIs and second order
3005 degenerate PHIs.
3007 The first phase performs a dominator walk to identify and eliminate
3008 the vast majority of the degenerate PHIs. When a degenerate PHI
3009 is identified and eliminated any affected statements or PHIs
3010 are put on a worklist.
3012 The second phase eliminates degenerate PHIs and trivial copies
3013 or constant initializations using the worklist. This is how we
3014 pick up the secondary optimization opportunities with minimal
3015 cost. */
3017 namespace {
3019 const pass_data pass_data_phi_only_cprop =
3021 GIMPLE_PASS, /* type */
3022 "phicprop", /* name */
3023 OPTGROUP_NONE, /* optinfo_flags */
3024 TV_TREE_PHI_CPROP, /* tv_id */
3025 ( PROP_cfg | PROP_ssa ), /* properties_required */
3026 0, /* properties_provided */
3027 0, /* properties_destroyed */
3028 0, /* todo_flags_start */
3029 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3032 class pass_phi_only_cprop : public gimple_opt_pass
3034 public:
3035 pass_phi_only_cprop (gcc::context *ctxt)
3036 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3039 /* opt_pass methods: */
3040 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3041 virtual bool gate (function *) { return flag_tree_dom != 0; }
3042 virtual unsigned int execute (function *);
3044 }; // class pass_phi_only_cprop
3046 unsigned int
3047 pass_phi_only_cprop::execute (function *fun)
3049 bitmap interesting_names;
3050 bitmap interesting_names1;
3052 /* Bitmap of blocks which need EH information updated. We can not
3053 update it on-the-fly as doing so invalidates the dominator tree. */
3054 need_eh_cleanup = BITMAP_ALLOC (NULL);
3056 /* INTERESTING_NAMES is effectively our worklist, indexed by
3057 SSA_NAME_VERSION.
3059 A set bit indicates that the statement or PHI node which
3060 defines the SSA_NAME should be (re)examined to determine if
3061 it has become a degenerate PHI or trivial const/copy propagation
3062 opportunity.
3064 Experiments have show we generally get better compilation
3065 time behavior with bitmaps rather than sbitmaps. */
3066 interesting_names = BITMAP_ALLOC (NULL);
3067 interesting_names1 = BITMAP_ALLOC (NULL);
3069 calculate_dominance_info (CDI_DOMINATORS);
3070 cfg_altered = false;
3072 /* First phase. Eliminate degenerate PHIs via a dominator
3073 walk of the CFG.
3075 Experiments have indicated that we generally get better
3076 compile-time behavior by visiting blocks in the first
3077 phase in dominator order. Presumably this is because walking
3078 in dominator order leaves fewer PHIs for later examination
3079 by the worklist phase. */
3080 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3081 interesting_names);
3083 /* Second phase. Eliminate second order degenerate PHIs as well
3084 as trivial copies or constant initializations identified by
3085 the first phase or this phase. Basically we keep iterating
3086 until our set of INTERESTING_NAMEs is empty. */
3087 while (!bitmap_empty_p (interesting_names))
3089 unsigned int i;
3090 bitmap_iterator bi;
3092 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3093 changed during the loop. Copy it to another bitmap and
3094 use that. */
3095 bitmap_copy (interesting_names1, interesting_names);
3097 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3099 tree name = ssa_name (i);
3101 /* Ignore SSA_NAMEs that have been released because
3102 their defining statement was deleted (unreachable). */
3103 if (name)
3104 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3105 interesting_names);
3109 if (cfg_altered)
3111 free_dominance_info (CDI_DOMINATORS);
3112 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3113 loops_state_set (LOOPS_NEED_FIXUP);
3116 /* Propagation of const and copies may make some EH edges dead. Purge
3117 such edges from the CFG as needed. */
3118 if (!bitmap_empty_p (need_eh_cleanup))
3120 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3121 BITMAP_FREE (need_eh_cleanup);
3124 BITMAP_FREE (interesting_names);
3125 BITMAP_FREE (interesting_names1);
3126 return 0;
3129 } // anon namespace
3131 gimple_opt_pass *
3132 make_pass_phi_only_cprop (gcc::context *ctxt)
3134 return new pass_phi_only_cprop (ctxt);