gcc/testsuite/
[official-gcc.git] / gcc / tree-ssa-dom.c
blobc980dfd85e61e97922d11ab2264d7a7de480fc2f
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "tree-eh.h"
38 #include "gimple-expr.h"
39 #include "is-a.h"
40 #include "gimple.h"
41 #include "gimple-iterator.h"
42 #include "gimple-ssa.h"
43 #include "tree-cfg.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
49 #include "domwalk.h"
50 #include "tree-pass.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-ssa-threadupdate.h"
53 #include "langhooks.h"
54 #include "params.h"
55 #include "tree-ssa-threadedge.h"
56 #include "tree-ssa-dom.h"
58 /* This file implements optimizations on the dominator tree. */
60 /* Representation of a "naked" right-hand-side expression, to be used
61 in recording available expressions in the expression hash table. */
63 enum expr_kind
65 EXPR_SINGLE,
66 EXPR_UNARY,
67 EXPR_BINARY,
68 EXPR_TERNARY,
69 EXPR_CALL,
70 EXPR_PHI
73 struct hashable_expr
75 tree type;
76 enum expr_kind kind;
77 union {
78 struct { tree rhs; } single;
79 struct { enum tree_code op; tree opnd; } unary;
80 struct { enum tree_code op; tree opnd0, opnd1; } binary;
81 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
82 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
83 struct { size_t nargs; tree *args; } phi;
84 } ops;
87 /* Structure for recording known values of a conditional expression
88 at the exits from its block. */
90 typedef struct cond_equivalence_s
92 struct hashable_expr cond;
93 tree value;
94 } cond_equivalence;
97 /* Structure for recording edge equivalences as well as any pending
98 edge redirections during the dominator optimizer.
100 Computing and storing the edge equivalences instead of creating
101 them on-demand can save significant amounts of time, particularly
102 for pathological cases involving switch statements.
104 These structures live for a single iteration of the dominator
105 optimizer in the edge's AUX field. At the end of an iteration we
106 free each of these structures and update the AUX field to point
107 to any requested redirection target (the code for updating the
108 CFG and SSA graph for edge redirection expects redirection edge
109 targets to be in the AUX field for each edge. */
111 struct edge_info
113 /* If this edge creates a simple equivalence, the LHS and RHS of
114 the equivalence will be stored here. */
115 tree lhs;
116 tree rhs;
118 /* Traversing an edge may also indicate one or more particular conditions
119 are true or false. */
120 vec<cond_equivalence> cond_equivalences;
123 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
124 expressions it enters into the hash table along with a marker entry
125 (null). When we finish processing the block, we pop off entries and
126 remove the expressions from the global hash table until we hit the
127 marker. */
128 typedef struct expr_hash_elt * expr_hash_elt_t;
130 static vec<expr_hash_elt_t> avail_exprs_stack;
132 /* Structure for entries in the expression hash table. */
134 struct expr_hash_elt
136 /* The value (lhs) of this expression. */
137 tree lhs;
139 /* The expression (rhs) we want to record. */
140 struct hashable_expr expr;
142 /* The stmt pointer if this element corresponds to a statement. */
143 gimple stmt;
145 /* The hash value for RHS. */
146 hashval_t hash;
148 /* A unique stamp, typically the address of the hash
149 element itself, used in removing entries from the table. */
150 struct expr_hash_elt *stamp;
153 /* Hashtable helpers. */
155 static bool hashable_expr_equal_p (const struct hashable_expr *,
156 const struct hashable_expr *);
157 static void free_expr_hash_elt (void *);
159 struct expr_elt_hasher
161 typedef expr_hash_elt value_type;
162 typedef expr_hash_elt compare_type;
163 static inline hashval_t hash (const value_type *);
164 static inline bool equal (const value_type *, const compare_type *);
165 static inline void remove (value_type *);
168 inline hashval_t
169 expr_elt_hasher::hash (const value_type *p)
171 return p->hash;
174 inline bool
175 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
177 gimple stmt1 = p1->stmt;
178 const struct hashable_expr *expr1 = &p1->expr;
179 const struct expr_hash_elt *stamp1 = p1->stamp;
180 gimple stmt2 = p2->stmt;
181 const struct hashable_expr *expr2 = &p2->expr;
182 const struct expr_hash_elt *stamp2 = p2->stamp;
184 /* This case should apply only when removing entries from the table. */
185 if (stamp1 == stamp2)
186 return true;
188 /* FIXME tuples:
189 We add stmts to a hash table and them modify them. To detect the case
190 that we modify a stmt and then search for it, we assume that the hash
191 is always modified by that change.
192 We have to fully check why this doesn't happen on trunk or rewrite
193 this in a more reliable (and easier to understand) way. */
194 if (((const struct expr_hash_elt *)p1)->hash
195 != ((const struct expr_hash_elt *)p2)->hash)
196 return false;
198 /* In case of a collision, both RHS have to be identical and have the
199 same VUSE operands. */
200 if (hashable_expr_equal_p (expr1, expr2)
201 && types_compatible_p (expr1->type, expr2->type))
203 /* Note that STMT1 and/or STMT2 may be NULL. */
204 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
205 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
208 return false;
211 /* Delete an expr_hash_elt and reclaim its storage. */
213 inline void
214 expr_elt_hasher::remove (value_type *element)
216 free_expr_hash_elt (element);
219 /* Hash table with expressions made available during the renaming process.
220 When an assignment of the form X_i = EXPR is found, the statement is
221 stored in this table. If the same expression EXPR is later found on the
222 RHS of another statement, it is replaced with X_i (thus performing
223 global redundancy elimination). Similarly as we pass through conditionals
224 we record the conditional itself as having either a true or false value
225 in this table. */
226 static hash_table <expr_elt_hasher> avail_exprs;
228 /* Stack of dest,src pairs that need to be restored during finalization.
230 A NULL entry is used to mark the end of pairs which need to be
231 restored during finalization of this block. */
232 static vec<tree> const_and_copies_stack;
234 /* Track whether or not we have changed the control flow graph. */
235 static bool cfg_altered;
237 /* Bitmap of blocks that have had EH statements cleaned. We should
238 remove their dead edges eventually. */
239 static bitmap need_eh_cleanup;
241 /* Statistics for dominator optimizations. */
242 struct opt_stats_d
244 long num_stmts;
245 long num_exprs_considered;
246 long num_re;
247 long num_const_prop;
248 long num_copy_prop;
251 static struct opt_stats_d opt_stats;
253 /* Local functions. */
254 static void optimize_stmt (basic_block, gimple_stmt_iterator);
255 static tree lookup_avail_expr (gimple, bool);
256 static hashval_t avail_expr_hash (const void *);
257 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
258 static void record_cond (cond_equivalence *);
259 static void record_const_or_copy (tree, tree);
260 static void record_equality (tree, tree);
261 static void record_equivalences_from_phis (basic_block);
262 static void record_equivalences_from_incoming_edge (basic_block);
263 static void eliminate_redundant_computations (gimple_stmt_iterator *);
264 static void record_equivalences_from_stmt (gimple, int);
265 static void remove_local_expressions_from_table (void);
266 static void restore_vars_to_original_value (void);
267 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
270 /* Given a statement STMT, initialize the hash table element pointed to
271 by ELEMENT. */
273 static void
274 initialize_hash_element (gimple stmt, tree lhs,
275 struct expr_hash_elt *element)
277 enum gimple_code code = gimple_code (stmt);
278 struct hashable_expr *expr = &element->expr;
280 if (code == GIMPLE_ASSIGN)
282 enum tree_code subcode = gimple_assign_rhs_code (stmt);
284 switch (get_gimple_rhs_class (subcode))
286 case GIMPLE_SINGLE_RHS:
287 expr->kind = EXPR_SINGLE;
288 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
289 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
290 break;
291 case GIMPLE_UNARY_RHS:
292 expr->kind = EXPR_UNARY;
293 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
294 expr->ops.unary.op = subcode;
295 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
296 break;
297 case GIMPLE_BINARY_RHS:
298 expr->kind = EXPR_BINARY;
299 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
300 expr->ops.binary.op = subcode;
301 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
302 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
303 break;
304 case GIMPLE_TERNARY_RHS:
305 expr->kind = EXPR_TERNARY;
306 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
307 expr->ops.ternary.op = subcode;
308 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
309 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
310 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
311 break;
312 default:
313 gcc_unreachable ();
316 else if (code == GIMPLE_COND)
318 expr->type = boolean_type_node;
319 expr->kind = EXPR_BINARY;
320 expr->ops.binary.op = gimple_cond_code (stmt);
321 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
322 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
324 else if (code == GIMPLE_CALL)
326 size_t nargs = gimple_call_num_args (stmt);
327 size_t i;
329 gcc_assert (gimple_call_lhs (stmt));
331 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
332 expr->kind = EXPR_CALL;
333 expr->ops.call.fn_from = stmt;
335 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
336 expr->ops.call.pure = true;
337 else
338 expr->ops.call.pure = false;
340 expr->ops.call.nargs = nargs;
341 expr->ops.call.args = XCNEWVEC (tree, nargs);
342 for (i = 0; i < nargs; i++)
343 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
345 else if (code == GIMPLE_SWITCH)
347 expr->type = TREE_TYPE (gimple_switch_index (stmt));
348 expr->kind = EXPR_SINGLE;
349 expr->ops.single.rhs = gimple_switch_index (stmt);
351 else if (code == GIMPLE_GOTO)
353 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
354 expr->kind = EXPR_SINGLE;
355 expr->ops.single.rhs = gimple_goto_dest (stmt);
357 else if (code == GIMPLE_PHI)
359 size_t nargs = gimple_phi_num_args (stmt);
360 size_t i;
362 expr->type = TREE_TYPE (gimple_phi_result (stmt));
363 expr->kind = EXPR_PHI;
364 expr->ops.phi.nargs = nargs;
365 expr->ops.phi.args = XCNEWVEC (tree, nargs);
367 for (i = 0; i < nargs; i++)
368 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
370 else
371 gcc_unreachable ();
373 element->lhs = lhs;
374 element->stmt = stmt;
375 element->hash = avail_expr_hash (element);
376 element->stamp = element;
379 /* Given a conditional expression COND as a tree, initialize
380 a hashable_expr expression EXPR. The conditional must be a
381 comparison or logical negation. A constant or a variable is
382 not permitted. */
384 static void
385 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
387 expr->type = boolean_type_node;
389 if (COMPARISON_CLASS_P (cond))
391 expr->kind = EXPR_BINARY;
392 expr->ops.binary.op = TREE_CODE (cond);
393 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
394 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
396 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
398 expr->kind = EXPR_UNARY;
399 expr->ops.unary.op = TRUTH_NOT_EXPR;
400 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
402 else
403 gcc_unreachable ();
406 /* Given a hashable_expr expression EXPR and an LHS,
407 initialize the hash table element pointed to by ELEMENT. */
409 static void
410 initialize_hash_element_from_expr (struct hashable_expr *expr,
411 tree lhs,
412 struct expr_hash_elt *element)
414 element->expr = *expr;
415 element->lhs = lhs;
416 element->stmt = NULL;
417 element->hash = avail_expr_hash (element);
418 element->stamp = element;
421 /* Compare two hashable_expr structures for equivalence.
422 They are considered equivalent when the the expressions
423 they denote must necessarily be equal. The logic is intended
424 to follow that of operand_equal_p in fold-const.c */
426 static bool
427 hashable_expr_equal_p (const struct hashable_expr *expr0,
428 const struct hashable_expr *expr1)
430 tree type0 = expr0->type;
431 tree type1 = expr1->type;
433 /* If either type is NULL, there is nothing to check. */
434 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
435 return false;
437 /* If both types don't have the same signedness, precision, and mode,
438 then we can't consider them equal. */
439 if (type0 != type1
440 && (TREE_CODE (type0) == ERROR_MARK
441 || TREE_CODE (type1) == ERROR_MARK
442 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
443 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
444 || TYPE_MODE (type0) != TYPE_MODE (type1)))
445 return false;
447 if (expr0->kind != expr1->kind)
448 return false;
450 switch (expr0->kind)
452 case EXPR_SINGLE:
453 return operand_equal_p (expr0->ops.single.rhs,
454 expr1->ops.single.rhs, 0);
456 case EXPR_UNARY:
457 if (expr0->ops.unary.op != expr1->ops.unary.op)
458 return false;
460 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
461 || expr0->ops.unary.op == NON_LVALUE_EXPR)
462 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
463 return false;
465 return operand_equal_p (expr0->ops.unary.opnd,
466 expr1->ops.unary.opnd, 0);
468 case EXPR_BINARY:
469 if (expr0->ops.binary.op != expr1->ops.binary.op)
470 return false;
472 if (operand_equal_p (expr0->ops.binary.opnd0,
473 expr1->ops.binary.opnd0, 0)
474 && operand_equal_p (expr0->ops.binary.opnd1,
475 expr1->ops.binary.opnd1, 0))
476 return true;
478 /* For commutative ops, allow the other order. */
479 return (commutative_tree_code (expr0->ops.binary.op)
480 && operand_equal_p (expr0->ops.binary.opnd0,
481 expr1->ops.binary.opnd1, 0)
482 && operand_equal_p (expr0->ops.binary.opnd1,
483 expr1->ops.binary.opnd0, 0));
485 case EXPR_TERNARY:
486 if (expr0->ops.ternary.op != expr1->ops.ternary.op
487 || !operand_equal_p (expr0->ops.ternary.opnd2,
488 expr1->ops.ternary.opnd2, 0))
489 return false;
491 if (operand_equal_p (expr0->ops.ternary.opnd0,
492 expr1->ops.ternary.opnd0, 0)
493 && operand_equal_p (expr0->ops.ternary.opnd1,
494 expr1->ops.ternary.opnd1, 0))
495 return true;
497 /* For commutative ops, allow the other order. */
498 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
499 && operand_equal_p (expr0->ops.ternary.opnd0,
500 expr1->ops.ternary.opnd1, 0)
501 && operand_equal_p (expr0->ops.ternary.opnd1,
502 expr1->ops.ternary.opnd0, 0));
504 case EXPR_CALL:
506 size_t i;
508 /* If the calls are to different functions, then they
509 clearly cannot be equal. */
510 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
511 expr1->ops.call.fn_from))
512 return false;
514 if (! expr0->ops.call.pure)
515 return false;
517 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
518 return false;
520 for (i = 0; i < expr0->ops.call.nargs; i++)
521 if (! operand_equal_p (expr0->ops.call.args[i],
522 expr1->ops.call.args[i], 0))
523 return false;
525 if (stmt_could_throw_p (expr0->ops.call.fn_from))
527 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
528 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
529 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
530 return false;
533 return true;
536 case EXPR_PHI:
538 size_t i;
540 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
541 return false;
543 for (i = 0; i < expr0->ops.phi.nargs; i++)
544 if (! operand_equal_p (expr0->ops.phi.args[i],
545 expr1->ops.phi.args[i], 0))
546 return false;
548 return true;
551 default:
552 gcc_unreachable ();
556 /* Generate a hash value for a pair of expressions. This can be used
557 iteratively by passing a previous result as the VAL argument.
559 The same hash value is always returned for a given pair of expressions,
560 regardless of the order in which they are presented. This is useful in
561 hashing the operands of commutative functions. */
563 static hashval_t
564 iterative_hash_exprs_commutative (const_tree t1,
565 const_tree t2, hashval_t val)
567 hashval_t one = iterative_hash_expr (t1, 0);
568 hashval_t two = iterative_hash_expr (t2, 0);
569 hashval_t t;
571 if (one > two)
572 t = one, one = two, two = t;
573 val = iterative_hash_hashval_t (one, val);
574 val = iterative_hash_hashval_t (two, val);
576 return val;
579 /* Compute a hash value for a hashable_expr value EXPR and a
580 previously accumulated hash value VAL. If two hashable_expr
581 values compare equal with hashable_expr_equal_p, they must
582 hash to the same value, given an identical value of VAL.
583 The logic is intended to follow iterative_hash_expr in tree.c. */
585 static hashval_t
586 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
588 switch (expr->kind)
590 case EXPR_SINGLE:
591 val = iterative_hash_expr (expr->ops.single.rhs, val);
592 break;
594 case EXPR_UNARY:
595 val = iterative_hash_object (expr->ops.unary.op, val);
597 /* Make sure to include signedness in the hash computation.
598 Don't hash the type, that can lead to having nodes which
599 compare equal according to operand_equal_p, but which
600 have different hash codes. */
601 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
602 || expr->ops.unary.op == NON_LVALUE_EXPR)
603 val += TYPE_UNSIGNED (expr->type);
605 val = iterative_hash_expr (expr->ops.unary.opnd, val);
606 break;
608 case EXPR_BINARY:
609 val = iterative_hash_object (expr->ops.binary.op, val);
610 if (commutative_tree_code (expr->ops.binary.op))
611 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
612 expr->ops.binary.opnd1, val);
613 else
615 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
616 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
618 break;
620 case EXPR_TERNARY:
621 val = iterative_hash_object (expr->ops.ternary.op, val);
622 if (commutative_ternary_tree_code (expr->ops.ternary.op))
623 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
624 expr->ops.ternary.opnd1, val);
625 else
627 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
628 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
630 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
631 break;
633 case EXPR_CALL:
635 size_t i;
636 enum tree_code code = CALL_EXPR;
637 gimple fn_from;
639 val = iterative_hash_object (code, val);
640 fn_from = expr->ops.call.fn_from;
641 if (gimple_call_internal_p (fn_from))
642 val = iterative_hash_hashval_t
643 ((hashval_t) gimple_call_internal_fn (fn_from), val);
644 else
645 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
646 for (i = 0; i < expr->ops.call.nargs; i++)
647 val = iterative_hash_expr (expr->ops.call.args[i], val);
649 break;
651 case EXPR_PHI:
653 size_t i;
655 for (i = 0; i < expr->ops.phi.nargs; i++)
656 val = iterative_hash_expr (expr->ops.phi.args[i], val);
658 break;
660 default:
661 gcc_unreachable ();
664 return val;
667 /* Print a diagnostic dump of an expression hash table entry. */
669 static void
670 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
672 if (element->stmt)
673 fprintf (stream, "STMT ");
674 else
675 fprintf (stream, "COND ");
677 if (element->lhs)
679 print_generic_expr (stream, element->lhs, 0);
680 fprintf (stream, " = ");
683 switch (element->expr.kind)
685 case EXPR_SINGLE:
686 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
687 break;
689 case EXPR_UNARY:
690 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
691 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
692 break;
694 case EXPR_BINARY:
695 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
696 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
697 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
698 break;
700 case EXPR_TERNARY:
701 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
702 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
703 fputs (", ", stream);
704 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
705 fputs (", ", stream);
706 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
707 fputs (">", stream);
708 break;
710 case EXPR_CALL:
712 size_t i;
713 size_t nargs = element->expr.ops.call.nargs;
714 gimple fn_from;
716 fn_from = element->expr.ops.call.fn_from;
717 if (gimple_call_internal_p (fn_from))
718 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
719 stream);
720 else
721 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
722 fprintf (stream, " (");
723 for (i = 0; i < nargs; i++)
725 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
726 if (i + 1 < nargs)
727 fprintf (stream, ", ");
729 fprintf (stream, ")");
731 break;
733 case EXPR_PHI:
735 size_t i;
736 size_t nargs = element->expr.ops.phi.nargs;
738 fprintf (stream, "PHI <");
739 for (i = 0; i < nargs; i++)
741 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
742 if (i + 1 < nargs)
743 fprintf (stream, ", ");
745 fprintf (stream, ">");
747 break;
749 fprintf (stream, "\n");
751 if (element->stmt)
753 fprintf (stream, " ");
754 print_gimple_stmt (stream, element->stmt, 0, 0);
758 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
760 static void
761 free_expr_hash_elt_contents (struct expr_hash_elt *element)
763 if (element->expr.kind == EXPR_CALL)
764 free (element->expr.ops.call.args);
765 else if (element->expr.kind == EXPR_PHI)
766 free (element->expr.ops.phi.args);
769 /* Delete an expr_hash_elt and reclaim its storage. */
771 static void
772 free_expr_hash_elt (void *elt)
774 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
775 free_expr_hash_elt_contents (element);
776 free (element);
779 /* Allocate an EDGE_INFO for edge E and attach it to E.
780 Return the new EDGE_INFO structure. */
782 static struct edge_info *
783 allocate_edge_info (edge e)
785 struct edge_info *edge_info;
787 edge_info = XCNEW (struct edge_info);
789 e->aux = edge_info;
790 return edge_info;
793 /* Free all EDGE_INFO structures associated with edges in the CFG.
794 If a particular edge can be threaded, copy the redirection
795 target from the EDGE_INFO structure into the edge's AUX field
796 as required by code to update the CFG and SSA graph for
797 jump threading. */
799 static void
800 free_all_edge_infos (void)
802 basic_block bb;
803 edge_iterator ei;
804 edge e;
806 FOR_EACH_BB_FN (bb, cfun)
808 FOR_EACH_EDGE (e, ei, bb->preds)
810 struct edge_info *edge_info = (struct edge_info *) e->aux;
812 if (edge_info)
814 edge_info->cond_equivalences.release ();
815 free (edge_info);
816 e->aux = NULL;
822 class dom_opt_dom_walker : public dom_walker
824 public:
825 dom_opt_dom_walker (cdi_direction direction)
826 : dom_walker (direction), m_dummy_cond (NULL) {}
828 virtual void before_dom_children (basic_block);
829 virtual void after_dom_children (basic_block);
831 private:
832 void thread_across_edge (edge);
834 gimple m_dummy_cond;
837 /* Jump threading, redundancy elimination and const/copy propagation.
839 This pass may expose new symbols that need to be renamed into SSA. For
840 every new symbol exposed, its corresponding bit will be set in
841 VARS_TO_RENAME. */
843 namespace {
845 const pass_data pass_data_dominator =
847 GIMPLE_PASS, /* type */
848 "dom", /* name */
849 OPTGROUP_NONE, /* optinfo_flags */
850 true, /* has_execute */
851 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
852 ( PROP_cfg | PROP_ssa ), /* properties_required */
853 0, /* properties_provided */
854 0, /* properties_destroyed */
855 0, /* todo_flags_start */
856 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
859 class pass_dominator : public gimple_opt_pass
861 public:
862 pass_dominator (gcc::context *ctxt)
863 : gimple_opt_pass (pass_data_dominator, ctxt)
866 /* opt_pass methods: */
867 opt_pass * clone () { return new pass_dominator (m_ctxt); }
868 virtual bool gate (function *) { return flag_tree_dom != 0; }
869 virtual unsigned int execute (function *);
871 }; // class pass_dominator
873 unsigned int
874 pass_dominator::execute (function *fun)
876 memset (&opt_stats, 0, sizeof (opt_stats));
878 /* Create our hash tables. */
879 avail_exprs.create (1024);
880 avail_exprs_stack.create (20);
881 const_and_copies_stack.create (20);
882 need_eh_cleanup = BITMAP_ALLOC (NULL);
884 calculate_dominance_info (CDI_DOMINATORS);
885 cfg_altered = false;
887 /* We need to know loop structures in order to avoid destroying them
888 in jump threading. Note that we still can e.g. thread through loop
889 headers to an exit edge, or through loop header to the loop body, assuming
890 that we update the loop info.
892 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
893 to several overly conservative bail-outs in jump threading, case
894 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
895 missing. We should improve jump threading in future then
896 LOOPS_HAVE_PREHEADERS won't be needed here. */
897 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
899 /* Initialize the value-handle array. */
900 threadedge_initialize_values ();
902 /* We need accurate information regarding back edges in the CFG
903 for jump threading; this may include back edges that are not part of
904 a single loop. */
905 mark_dfs_back_edges ();
907 /* Recursively walk the dominator tree optimizing statements. */
908 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
911 gimple_stmt_iterator gsi;
912 basic_block bb;
913 FOR_EACH_BB_FN (bb, fun)
915 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
916 update_stmt_if_modified (gsi_stmt (gsi));
920 /* If we exposed any new variables, go ahead and put them into
921 SSA form now, before we handle jump threading. This simplifies
922 interactions between rewriting of _DECL nodes into SSA form
923 and rewriting SSA_NAME nodes into SSA form after block
924 duplication and CFG manipulation. */
925 update_ssa (TODO_update_ssa);
927 free_all_edge_infos ();
929 /* Thread jumps, creating duplicate blocks as needed. */
930 cfg_altered |= thread_through_all_blocks (first_pass_instance);
932 if (cfg_altered)
933 free_dominance_info (CDI_DOMINATORS);
935 /* Removal of statements may make some EH edges dead. Purge
936 such edges from the CFG as needed. */
937 if (!bitmap_empty_p (need_eh_cleanup))
939 unsigned i;
940 bitmap_iterator bi;
942 /* Jump threading may have created forwarder blocks from blocks
943 needing EH cleanup; the new successor of these blocks, which
944 has inherited from the original block, needs the cleanup.
945 Don't clear bits in the bitmap, as that can break the bitmap
946 iterator. */
947 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
949 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
950 if (bb == NULL)
951 continue;
952 while (single_succ_p (bb)
953 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
954 bb = single_succ (bb);
955 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
956 continue;
957 if ((unsigned) bb->index != i)
958 bitmap_set_bit (need_eh_cleanup, bb->index);
961 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
962 bitmap_clear (need_eh_cleanup);
965 statistics_counter_event (fun, "Redundant expressions eliminated",
966 opt_stats.num_re);
967 statistics_counter_event (fun, "Constants propagated",
968 opt_stats.num_const_prop);
969 statistics_counter_event (fun, "Copies propagated",
970 opt_stats.num_copy_prop);
972 /* Debugging dumps. */
973 if (dump_file && (dump_flags & TDF_STATS))
974 dump_dominator_optimization_stats (dump_file);
976 loop_optimizer_finalize ();
978 /* Delete our main hashtable. */
979 avail_exprs.dispose ();
981 /* Free asserted bitmaps and stacks. */
982 BITMAP_FREE (need_eh_cleanup);
984 avail_exprs_stack.release ();
985 const_and_copies_stack.release ();
987 /* Free the value-handle array. */
988 threadedge_finalize_values ();
990 return 0;
993 } // anon namespace
995 gimple_opt_pass *
996 make_pass_dominator (gcc::context *ctxt)
998 return new pass_dominator (ctxt);
1002 /* Given a conditional statement CONDSTMT, convert the
1003 condition to a canonical form. */
1005 static void
1006 canonicalize_comparison (gimple condstmt)
1008 tree op0;
1009 tree op1;
1010 enum tree_code code;
1012 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1014 op0 = gimple_cond_lhs (condstmt);
1015 op1 = gimple_cond_rhs (condstmt);
1017 code = gimple_cond_code (condstmt);
1019 /* If it would be profitable to swap the operands, then do so to
1020 canonicalize the statement, enabling better optimization.
1022 By placing canonicalization of such expressions here we
1023 transparently keep statements in canonical form, even
1024 when the statement is modified. */
1025 if (tree_swap_operands_p (op0, op1, false))
1027 /* For relationals we need to swap the operands
1028 and change the code. */
1029 if (code == LT_EXPR
1030 || code == GT_EXPR
1031 || code == LE_EXPR
1032 || code == GE_EXPR)
1034 code = swap_tree_comparison (code);
1036 gimple_cond_set_code (condstmt, code);
1037 gimple_cond_set_lhs (condstmt, op1);
1038 gimple_cond_set_rhs (condstmt, op0);
1040 update_stmt (condstmt);
1045 /* Initialize local stacks for this optimizer and record equivalences
1046 upon entry to BB. Equivalences can come from the edge traversed to
1047 reach BB or they may come from PHI nodes at the start of BB. */
1049 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1050 LIMIT entries left in LOCALs. */
1052 static void
1053 remove_local_expressions_from_table (void)
1055 /* Remove all the expressions made available in this block. */
1056 while (avail_exprs_stack.length () > 0)
1058 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1059 expr_hash_elt **slot;
1061 if (victim == NULL)
1062 break;
1064 /* This must precede the actual removal from the hash table,
1065 as ELEMENT and the table entry may share a call argument
1066 vector which will be freed during removal. */
1067 if (dump_file && (dump_flags & TDF_DETAILS))
1069 fprintf (dump_file, "<<<< ");
1070 print_expr_hash_elt (dump_file, victim);
1073 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1074 gcc_assert (slot && *slot == victim);
1075 avail_exprs.clear_slot (slot);
1079 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1080 CONST_AND_COPIES to its original state, stopping when we hit a
1081 NULL marker. */
1083 static void
1084 restore_vars_to_original_value (void)
1086 while (const_and_copies_stack.length () > 0)
1088 tree prev_value, dest;
1090 dest = const_and_copies_stack.pop ();
1092 if (dest == NULL)
1093 break;
1095 if (dump_file && (dump_flags & TDF_DETAILS))
1097 fprintf (dump_file, "<<<< COPY ");
1098 print_generic_expr (dump_file, dest, 0);
1099 fprintf (dump_file, " = ");
1100 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1101 fprintf (dump_file, "\n");
1104 prev_value = const_and_copies_stack.pop ();
1105 set_ssa_name_value (dest, prev_value);
1109 /* A trivial wrapper so that we can present the generic jump
1110 threading code with a simple API for simplifying statements. */
1111 static tree
1112 simplify_stmt_for_jump_threading (gimple stmt,
1113 gimple within_stmt ATTRIBUTE_UNUSED)
1115 return lookup_avail_expr (stmt, false);
1118 /* Record into the equivalence tables any equivalences implied by
1119 traversing edge E (which are cached in E->aux).
1121 Callers are responsible for managing the unwinding markers. */
1122 static void
1123 record_temporary_equivalences (edge e)
1125 int i;
1126 struct edge_info *edge_info = (struct edge_info *) e->aux;
1128 /* If we have info associated with this edge, record it into
1129 our equivalence tables. */
1130 if (edge_info)
1132 cond_equivalence *eq;
1133 tree lhs = edge_info->lhs;
1134 tree rhs = edge_info->rhs;
1136 /* If we have a simple NAME = VALUE equivalence, record it. */
1137 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1138 record_const_or_copy (lhs, rhs);
1140 /* If we have 0 = COND or 1 = COND equivalences, record them
1141 into our expression hash tables. */
1142 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1143 record_cond (eq);
1147 /* Wrapper for common code to attempt to thread an edge. For example,
1148 it handles lazily building the dummy condition and the bookkeeping
1149 when jump threading is successful. */
1151 void
1152 dom_opt_dom_walker::thread_across_edge (edge e)
1154 if (! m_dummy_cond)
1155 m_dummy_cond =
1156 gimple_build_cond (NE_EXPR,
1157 integer_zero_node, integer_zero_node,
1158 NULL, NULL);
1160 /* Push a marker on both stacks so we can unwind the tables back to their
1161 current state. */
1162 avail_exprs_stack.safe_push (NULL);
1163 const_and_copies_stack.safe_push (NULL_TREE);
1165 /* Traversing E may result in equivalences we can utilize. */
1166 record_temporary_equivalences (e);
1168 /* With all the edge equivalences in the tables, go ahead and attempt
1169 to thread through E->dest. */
1170 ::thread_across_edge (m_dummy_cond, e, false,
1171 &const_and_copies_stack,
1172 simplify_stmt_for_jump_threading);
1174 /* And restore the various tables to their state before
1175 we threaded this edge.
1177 XXX The code in tree-ssa-threadedge.c will restore the state of
1178 the const_and_copies table. We we just have to restore the expression
1179 table. */
1180 remove_local_expressions_from_table ();
1183 /* PHI nodes can create equivalences too.
1185 Ignoring any alternatives which are the same as the result, if
1186 all the alternatives are equal, then the PHI node creates an
1187 equivalence. */
1189 static void
1190 record_equivalences_from_phis (basic_block bb)
1192 gimple_stmt_iterator gsi;
1194 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1196 gimple phi = gsi_stmt (gsi);
1198 tree lhs = gimple_phi_result (phi);
1199 tree rhs = NULL;
1200 size_t i;
1202 for (i = 0; i < gimple_phi_num_args (phi); i++)
1204 tree t = gimple_phi_arg_def (phi, i);
1206 /* Ignore alternatives which are the same as our LHS. Since
1207 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1208 can simply compare pointers. */
1209 if (lhs == t)
1210 continue;
1212 /* If we have not processed an alternative yet, then set
1213 RHS to this alternative. */
1214 if (rhs == NULL)
1215 rhs = t;
1216 /* If we have processed an alternative (stored in RHS), then
1217 see if it is equal to this one. If it isn't, then stop
1218 the search. */
1219 else if (! operand_equal_for_phi_arg_p (rhs, t))
1220 break;
1223 /* If we had no interesting alternatives, then all the RHS alternatives
1224 must have been the same as LHS. */
1225 if (!rhs)
1226 rhs = lhs;
1228 /* If we managed to iterate through each PHI alternative without
1229 breaking out of the loop, then we have a PHI which may create
1230 a useful equivalence. We do not need to record unwind data for
1231 this, since this is a true assignment and not an equivalence
1232 inferred from a comparison. All uses of this ssa name are dominated
1233 by this assignment, so unwinding just costs time and space. */
1234 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1235 set_ssa_name_value (lhs, rhs);
1239 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1240 return that edge. Otherwise return NULL. */
1241 static edge
1242 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1244 edge retval = NULL;
1245 edge e;
1246 edge_iterator ei;
1248 FOR_EACH_EDGE (e, ei, bb->preds)
1250 /* A loop back edge can be identified by the destination of
1251 the edge dominating the source of the edge. */
1252 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1253 continue;
1255 /* If we have already seen a non-loop edge, then we must have
1256 multiple incoming non-loop edges and thus we return NULL. */
1257 if (retval)
1258 return NULL;
1260 /* This is the first non-loop incoming edge we have found. Record
1261 it. */
1262 retval = e;
1265 return retval;
1268 /* Record any equivalences created by the incoming edge to BB. If BB
1269 has more than one incoming edge, then no equivalence is created. */
1271 static void
1272 record_equivalences_from_incoming_edge (basic_block bb)
1274 edge e;
1275 basic_block parent;
1276 struct edge_info *edge_info;
1278 /* If our parent block ended with a control statement, then we may be
1279 able to record some equivalences based on which outgoing edge from
1280 the parent was followed. */
1281 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1283 e = single_incoming_edge_ignoring_loop_edges (bb);
1285 /* If we had a single incoming edge from our parent block, then enter
1286 any data associated with the edge into our tables. */
1287 if (e && e->src == parent)
1289 unsigned int i;
1291 edge_info = (struct edge_info *) e->aux;
1293 if (edge_info)
1295 tree lhs = edge_info->lhs;
1296 tree rhs = edge_info->rhs;
1297 cond_equivalence *eq;
1299 if (lhs)
1300 record_equality (lhs, rhs);
1302 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1303 set via a widening type conversion, then we may be able to record
1304 additional equivalences. */
1305 if (lhs
1306 && TREE_CODE (lhs) == SSA_NAME
1307 && is_gimple_constant (rhs)
1308 && TREE_CODE (rhs) == INTEGER_CST)
1310 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1312 if (defstmt
1313 && is_gimple_assign (defstmt)
1314 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1316 tree old_rhs = gimple_assign_rhs1 (defstmt);
1318 /* If the conversion widens the original value and
1319 the constant is in the range of the type of OLD_RHS,
1320 then convert the constant and record the equivalence.
1322 Note that int_fits_type_p does not check the precision
1323 if the upper and lower bounds are OK. */
1324 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1325 && (TYPE_PRECISION (TREE_TYPE (lhs))
1326 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1327 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1329 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1330 record_equality (old_rhs, newval);
1335 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1336 record_cond (eq);
1341 /* Dump SSA statistics on FILE. */
1343 void
1344 dump_dominator_optimization_stats (FILE *file)
1346 fprintf (file, "Total number of statements: %6ld\n\n",
1347 opt_stats.num_stmts);
1348 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1349 opt_stats.num_exprs_considered);
1351 fprintf (file, "\nHash table statistics:\n");
1353 fprintf (file, " avail_exprs: ");
1354 htab_statistics (file, avail_exprs);
1358 /* Dump SSA statistics on stderr. */
1360 DEBUG_FUNCTION void
1361 debug_dominator_optimization_stats (void)
1363 dump_dominator_optimization_stats (stderr);
1367 /* Dump statistics for the hash table HTAB. */
1369 static void
1370 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1372 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1373 (long) htab.size (),
1374 (long) htab.elements (),
1375 htab.collisions ());
1379 /* Enter condition equivalence into the expression hash table.
1380 This indicates that a conditional expression has a known
1381 boolean value. */
1383 static void
1384 record_cond (cond_equivalence *p)
1386 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1387 expr_hash_elt **slot;
1389 initialize_hash_element_from_expr (&p->cond, p->value, element);
1391 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1392 if (*slot == NULL)
1394 *slot = element;
1396 if (dump_file && (dump_flags & TDF_DETAILS))
1398 fprintf (dump_file, "1>>> ");
1399 print_expr_hash_elt (dump_file, element);
1402 avail_exprs_stack.safe_push (element);
1404 else
1405 free_expr_hash_elt (element);
1408 /* Build a cond_equivalence record indicating that the comparison
1409 CODE holds between operands OP0 and OP1 and push it to **P. */
1411 static void
1412 build_and_record_new_cond (enum tree_code code,
1413 tree op0, tree op1,
1414 vec<cond_equivalence> *p)
1416 cond_equivalence c;
1417 struct hashable_expr *cond = &c.cond;
1419 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1421 cond->type = boolean_type_node;
1422 cond->kind = EXPR_BINARY;
1423 cond->ops.binary.op = code;
1424 cond->ops.binary.opnd0 = op0;
1425 cond->ops.binary.opnd1 = op1;
1427 c.value = boolean_true_node;
1428 p->safe_push (c);
1431 /* Record that COND is true and INVERTED is false into the edge information
1432 structure. Also record that any conditions dominated by COND are true
1433 as well.
1435 For example, if a < b is true, then a <= b must also be true. */
1437 static void
1438 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1440 tree op0, op1;
1441 cond_equivalence c;
1443 if (!COMPARISON_CLASS_P (cond))
1444 return;
1446 op0 = TREE_OPERAND (cond, 0);
1447 op1 = TREE_OPERAND (cond, 1);
1449 switch (TREE_CODE (cond))
1451 case LT_EXPR:
1452 case GT_EXPR:
1453 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1455 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1456 &edge_info->cond_equivalences);
1457 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1458 &edge_info->cond_equivalences);
1461 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1462 ? LE_EXPR : GE_EXPR),
1463 op0, op1, &edge_info->cond_equivalences);
1464 build_and_record_new_cond (NE_EXPR, op0, op1,
1465 &edge_info->cond_equivalences);
1466 break;
1468 case GE_EXPR:
1469 case LE_EXPR:
1470 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1472 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1473 &edge_info->cond_equivalences);
1475 break;
1477 case EQ_EXPR:
1478 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1480 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1481 &edge_info->cond_equivalences);
1483 build_and_record_new_cond (LE_EXPR, op0, op1,
1484 &edge_info->cond_equivalences);
1485 build_and_record_new_cond (GE_EXPR, op0, op1,
1486 &edge_info->cond_equivalences);
1487 break;
1489 case UNORDERED_EXPR:
1490 build_and_record_new_cond (NE_EXPR, op0, op1,
1491 &edge_info->cond_equivalences);
1492 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1493 &edge_info->cond_equivalences);
1494 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1495 &edge_info->cond_equivalences);
1496 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1497 &edge_info->cond_equivalences);
1498 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1499 &edge_info->cond_equivalences);
1500 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1501 &edge_info->cond_equivalences);
1502 break;
1504 case UNLT_EXPR:
1505 case UNGT_EXPR:
1506 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1507 ? UNLE_EXPR : UNGE_EXPR),
1508 op0, op1, &edge_info->cond_equivalences);
1509 build_and_record_new_cond (NE_EXPR, op0, op1,
1510 &edge_info->cond_equivalences);
1511 break;
1513 case UNEQ_EXPR:
1514 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1515 &edge_info->cond_equivalences);
1516 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1517 &edge_info->cond_equivalences);
1518 break;
1520 case LTGT_EXPR:
1521 build_and_record_new_cond (NE_EXPR, op0, op1,
1522 &edge_info->cond_equivalences);
1523 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1524 &edge_info->cond_equivalences);
1525 break;
1527 default:
1528 break;
1531 /* Now store the original true and false conditions into the first
1532 two slots. */
1533 initialize_expr_from_cond (cond, &c.cond);
1534 c.value = boolean_true_node;
1535 edge_info->cond_equivalences.safe_push (c);
1537 /* It is possible for INVERTED to be the negation of a comparison,
1538 and not a valid RHS or GIMPLE_COND condition. This happens because
1539 invert_truthvalue may return such an expression when asked to invert
1540 a floating-point comparison. These comparisons are not assumed to
1541 obey the trichotomy law. */
1542 initialize_expr_from_cond (inverted, &c.cond);
1543 c.value = boolean_false_node;
1544 edge_info->cond_equivalences.safe_push (c);
1547 /* A helper function for record_const_or_copy and record_equality.
1548 Do the work of recording the value and undo info. */
1550 static void
1551 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1553 set_ssa_name_value (x, y);
1555 if (dump_file && (dump_flags & TDF_DETAILS))
1557 fprintf (dump_file, "0>>> COPY ");
1558 print_generic_expr (dump_file, x, 0);
1559 fprintf (dump_file, " = ");
1560 print_generic_expr (dump_file, y, 0);
1561 fprintf (dump_file, "\n");
1564 const_and_copies_stack.reserve (2);
1565 const_and_copies_stack.quick_push (prev_x);
1566 const_and_copies_stack.quick_push (x);
1569 /* Return the loop depth of the basic block of the defining statement of X.
1570 This number should not be treated as absolutely correct because the loop
1571 information may not be completely up-to-date when dom runs. However, it
1572 will be relatively correct, and as more passes are taught to keep loop info
1573 up to date, the result will become more and more accurate. */
1576 loop_depth_of_name (tree x)
1578 gimple defstmt;
1579 basic_block defbb;
1581 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1582 if (TREE_CODE (x) != SSA_NAME)
1583 return 0;
1585 /* Otherwise return the loop depth of the defining statement's bb.
1586 Note that there may not actually be a bb for this statement, if the
1587 ssa_name is live on entry. */
1588 defstmt = SSA_NAME_DEF_STMT (x);
1589 defbb = gimple_bb (defstmt);
1590 if (!defbb)
1591 return 0;
1593 return bb_loop_depth (defbb);
1596 /* Record that X is equal to Y in const_and_copies. Record undo
1597 information in the block-local vector. */
1599 static void
1600 record_const_or_copy (tree x, tree y)
1602 tree prev_x = SSA_NAME_VALUE (x);
1604 gcc_assert (TREE_CODE (x) == SSA_NAME);
1606 if (TREE_CODE (y) == SSA_NAME)
1608 tree tmp = SSA_NAME_VALUE (y);
1609 if (tmp)
1610 y = tmp;
1613 record_const_or_copy_1 (x, y, prev_x);
1616 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1617 This constrains the cases in which we may treat this as assignment. */
1619 static void
1620 record_equality (tree x, tree y)
1622 tree prev_x = NULL, prev_y = NULL;
1624 if (TREE_CODE (x) == SSA_NAME)
1625 prev_x = SSA_NAME_VALUE (x);
1626 if (TREE_CODE (y) == SSA_NAME)
1627 prev_y = SSA_NAME_VALUE (y);
1629 /* If one of the previous values is invariant, or invariant in more loops
1630 (by depth), then use that.
1631 Otherwise it doesn't matter which value we choose, just so
1632 long as we canonicalize on one value. */
1633 if (is_gimple_min_invariant (y))
1635 else if (is_gimple_min_invariant (x)
1636 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1637 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1638 else if (prev_x && is_gimple_min_invariant (prev_x))
1639 x = y, y = prev_x, prev_x = prev_y;
1640 else if (prev_y)
1641 y = prev_y;
1643 /* After the swapping, we must have one SSA_NAME. */
1644 if (TREE_CODE (x) != SSA_NAME)
1645 return;
1647 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1648 variable compared against zero. If we're honoring signed zeros,
1649 then we cannot record this value unless we know that the value is
1650 nonzero. */
1651 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1652 && (TREE_CODE (y) != REAL_CST
1653 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1654 return;
1656 record_const_or_copy_1 (x, y, prev_x);
1659 /* Returns true when STMT is a simple iv increment. It detects the
1660 following situation:
1662 i_1 = phi (..., i_2)
1663 i_2 = i_1 +/- ... */
1665 bool
1666 simple_iv_increment_p (gimple stmt)
1668 enum tree_code code;
1669 tree lhs, preinc;
1670 gimple phi;
1671 size_t i;
1673 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1674 return false;
1676 lhs = gimple_assign_lhs (stmt);
1677 if (TREE_CODE (lhs) != SSA_NAME)
1678 return false;
1680 code = gimple_assign_rhs_code (stmt);
1681 if (code != PLUS_EXPR
1682 && code != MINUS_EXPR
1683 && code != POINTER_PLUS_EXPR)
1684 return false;
1686 preinc = gimple_assign_rhs1 (stmt);
1687 if (TREE_CODE (preinc) != SSA_NAME)
1688 return false;
1690 phi = SSA_NAME_DEF_STMT (preinc);
1691 if (gimple_code (phi) != GIMPLE_PHI)
1692 return false;
1694 for (i = 0; i < gimple_phi_num_args (phi); i++)
1695 if (gimple_phi_arg_def (phi, i) == lhs)
1696 return true;
1698 return false;
1701 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1702 known value for that SSA_NAME (or NULL if no value is known).
1704 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1705 successors of BB. */
1707 static void
1708 cprop_into_successor_phis (basic_block bb)
1710 edge e;
1711 edge_iterator ei;
1713 FOR_EACH_EDGE (e, ei, bb->succs)
1715 int indx;
1716 gimple_stmt_iterator gsi;
1718 /* If this is an abnormal edge, then we do not want to copy propagate
1719 into the PHI alternative associated with this edge. */
1720 if (e->flags & EDGE_ABNORMAL)
1721 continue;
1723 gsi = gsi_start_phis (e->dest);
1724 if (gsi_end_p (gsi))
1725 continue;
1727 /* We may have an equivalence associated with this edge. While
1728 we can not propagate it into non-dominated blocks, we can
1729 propagate them into PHIs in non-dominated blocks. */
1731 /* Push the unwind marker so we can reset the const and copies
1732 table back to its original state after processing this edge. */
1733 const_and_copies_stack.safe_push (NULL_TREE);
1735 /* Extract and record any simple NAME = VALUE equivalences.
1737 Don't bother with [01] = COND equivalences, they're not useful
1738 here. */
1739 struct edge_info *edge_info = (struct edge_info *) e->aux;
1740 if (edge_info)
1742 tree lhs = edge_info->lhs;
1743 tree rhs = edge_info->rhs;
1745 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1746 record_const_or_copy (lhs, rhs);
1749 indx = e->dest_idx;
1750 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1752 tree new_val;
1753 use_operand_p orig_p;
1754 tree orig_val;
1755 gimple phi = gsi_stmt (gsi);
1757 /* The alternative may be associated with a constant, so verify
1758 it is an SSA_NAME before doing anything with it. */
1759 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1760 orig_val = get_use_from_ptr (orig_p);
1761 if (TREE_CODE (orig_val) != SSA_NAME)
1762 continue;
1764 /* If we have *ORIG_P in our constant/copy table, then replace
1765 ORIG_P with its value in our constant/copy table. */
1766 new_val = SSA_NAME_VALUE (orig_val);
1767 if (new_val
1768 && new_val != orig_val
1769 && (TREE_CODE (new_val) == SSA_NAME
1770 || is_gimple_min_invariant (new_val))
1771 && may_propagate_copy (orig_val, new_val))
1772 propagate_value (orig_p, new_val);
1775 restore_vars_to_original_value ();
1779 /* We have finished optimizing BB, record any information implied by
1780 taking a specific outgoing edge from BB. */
1782 static void
1783 record_edge_info (basic_block bb)
1785 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1786 struct edge_info *edge_info;
1788 if (! gsi_end_p (gsi))
1790 gimple stmt = gsi_stmt (gsi);
1791 location_t loc = gimple_location (stmt);
1793 if (gimple_code (stmt) == GIMPLE_SWITCH)
1795 tree index = gimple_switch_index (stmt);
1797 if (TREE_CODE (index) == SSA_NAME)
1799 int i;
1800 int n_labels = gimple_switch_num_labels (stmt);
1801 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1802 edge e;
1803 edge_iterator ei;
1805 for (i = 0; i < n_labels; i++)
1807 tree label = gimple_switch_label (stmt, i);
1808 basic_block target_bb = label_to_block (CASE_LABEL (label));
1809 if (CASE_HIGH (label)
1810 || !CASE_LOW (label)
1811 || info[target_bb->index])
1812 info[target_bb->index] = error_mark_node;
1813 else
1814 info[target_bb->index] = label;
1817 FOR_EACH_EDGE (e, ei, bb->succs)
1819 basic_block target_bb = e->dest;
1820 tree label = info[target_bb->index];
1822 if (label != NULL && label != error_mark_node)
1824 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1825 CASE_LOW (label));
1826 edge_info = allocate_edge_info (e);
1827 edge_info->lhs = index;
1828 edge_info->rhs = x;
1831 free (info);
1835 /* A COND_EXPR may create equivalences too. */
1836 if (gimple_code (stmt) == GIMPLE_COND)
1838 edge true_edge;
1839 edge false_edge;
1841 tree op0 = gimple_cond_lhs (stmt);
1842 tree op1 = gimple_cond_rhs (stmt);
1843 enum tree_code code = gimple_cond_code (stmt);
1845 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1847 /* Special case comparing booleans against a constant as we
1848 know the value of OP0 on both arms of the branch. i.e., we
1849 can record an equivalence for OP0 rather than COND. */
1850 if ((code == EQ_EXPR || code == NE_EXPR)
1851 && TREE_CODE (op0) == SSA_NAME
1852 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1853 && is_gimple_min_invariant (op1))
1855 if (code == EQ_EXPR)
1857 edge_info = allocate_edge_info (true_edge);
1858 edge_info->lhs = op0;
1859 edge_info->rhs = (integer_zerop (op1)
1860 ? boolean_false_node
1861 : boolean_true_node);
1863 edge_info = allocate_edge_info (false_edge);
1864 edge_info->lhs = op0;
1865 edge_info->rhs = (integer_zerop (op1)
1866 ? boolean_true_node
1867 : boolean_false_node);
1869 else
1871 edge_info = allocate_edge_info (true_edge);
1872 edge_info->lhs = op0;
1873 edge_info->rhs = (integer_zerop (op1)
1874 ? boolean_true_node
1875 : boolean_false_node);
1877 edge_info = allocate_edge_info (false_edge);
1878 edge_info->lhs = op0;
1879 edge_info->rhs = (integer_zerop (op1)
1880 ? boolean_false_node
1881 : boolean_true_node);
1884 else if (is_gimple_min_invariant (op0)
1885 && (TREE_CODE (op1) == SSA_NAME
1886 || is_gimple_min_invariant (op1)))
1888 tree cond = build2 (code, boolean_type_node, op0, op1);
1889 tree inverted = invert_truthvalue_loc (loc, cond);
1890 bool can_infer_simple_equiv
1891 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1892 && real_zerop (op0));
1893 struct edge_info *edge_info;
1895 edge_info = allocate_edge_info (true_edge);
1896 record_conditions (edge_info, cond, inverted);
1898 if (can_infer_simple_equiv && code == EQ_EXPR)
1900 edge_info->lhs = op1;
1901 edge_info->rhs = op0;
1904 edge_info = allocate_edge_info (false_edge);
1905 record_conditions (edge_info, inverted, cond);
1907 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1909 edge_info->lhs = op1;
1910 edge_info->rhs = op0;
1914 else if (TREE_CODE (op0) == SSA_NAME
1915 && (TREE_CODE (op1) == SSA_NAME
1916 || is_gimple_min_invariant (op1)))
1918 tree cond = build2 (code, boolean_type_node, op0, op1);
1919 tree inverted = invert_truthvalue_loc (loc, cond);
1920 bool can_infer_simple_equiv
1921 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1922 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1923 struct edge_info *edge_info;
1925 edge_info = allocate_edge_info (true_edge);
1926 record_conditions (edge_info, cond, inverted);
1928 if (can_infer_simple_equiv && code == EQ_EXPR)
1930 edge_info->lhs = op0;
1931 edge_info->rhs = op1;
1934 edge_info = allocate_edge_info (false_edge);
1935 record_conditions (edge_info, inverted, cond);
1937 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1939 edge_info->lhs = op0;
1940 edge_info->rhs = op1;
1945 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1949 void
1950 dom_opt_dom_walker::before_dom_children (basic_block bb)
1952 gimple_stmt_iterator gsi;
1954 if (dump_file && (dump_flags & TDF_DETAILS))
1955 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1957 /* Push a marker on the stacks of local information so that we know how
1958 far to unwind when we finalize this block. */
1959 avail_exprs_stack.safe_push (NULL);
1960 const_and_copies_stack.safe_push (NULL_TREE);
1962 record_equivalences_from_incoming_edge (bb);
1964 /* PHI nodes can create equivalences too. */
1965 record_equivalences_from_phis (bb);
1967 /* Create equivalences from redundant PHIs. PHIs are only truly
1968 redundant when they exist in the same block, so push another
1969 marker and unwind right afterwards. */
1970 avail_exprs_stack.safe_push (NULL);
1971 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1972 eliminate_redundant_computations (&gsi);
1973 remove_local_expressions_from_table ();
1975 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1976 optimize_stmt (bb, gsi);
1978 /* Now prepare to process dominated blocks. */
1979 record_edge_info (bb);
1980 cprop_into_successor_phis (bb);
1983 /* We have finished processing the dominator children of BB, perform
1984 any finalization actions in preparation for leaving this node in
1985 the dominator tree. */
1987 void
1988 dom_opt_dom_walker::after_dom_children (basic_block bb)
1990 gimple last;
1992 /* If we have an outgoing edge to a block with multiple incoming and
1993 outgoing edges, then we may be able to thread the edge, i.e., we
1994 may be able to statically determine which of the outgoing edges
1995 will be traversed when the incoming edge from BB is traversed. */
1996 if (single_succ_p (bb)
1997 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1998 && potentially_threadable_block (single_succ (bb)))
2000 thread_across_edge (single_succ_edge (bb));
2002 else if ((last = last_stmt (bb))
2003 && gimple_code (last) == GIMPLE_COND
2004 && EDGE_COUNT (bb->succs) == 2
2005 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2006 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2008 edge true_edge, false_edge;
2010 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2012 /* Only try to thread the edge if it reaches a target block with
2013 more than one predecessor and more than one successor. */
2014 if (potentially_threadable_block (true_edge->dest))
2015 thread_across_edge (true_edge);
2017 /* Similarly for the ELSE arm. */
2018 if (potentially_threadable_block (false_edge->dest))
2019 thread_across_edge (false_edge);
2023 /* These remove expressions local to BB from the tables. */
2024 remove_local_expressions_from_table ();
2025 restore_vars_to_original_value ();
2028 /* Search for redundant computations in STMT. If any are found, then
2029 replace them with the variable holding the result of the computation.
2031 If safe, record this expression into the available expression hash
2032 table. */
2034 static void
2035 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2037 tree expr_type;
2038 tree cached_lhs;
2039 tree def;
2040 bool insert = true;
2041 bool assigns_var_p = false;
2043 gimple stmt = gsi_stmt (*gsi);
2045 if (gimple_code (stmt) == GIMPLE_PHI)
2046 def = gimple_phi_result (stmt);
2047 else
2048 def = gimple_get_lhs (stmt);
2050 /* Certain expressions on the RHS can be optimized away, but can not
2051 themselves be entered into the hash tables. */
2052 if (! def
2053 || TREE_CODE (def) != SSA_NAME
2054 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2055 || gimple_vdef (stmt)
2056 /* Do not record equivalences for increments of ivs. This would create
2057 overlapping live ranges for a very questionable gain. */
2058 || simple_iv_increment_p (stmt))
2059 insert = false;
2061 /* Check if the expression has been computed before. */
2062 cached_lhs = lookup_avail_expr (stmt, insert);
2064 opt_stats.num_exprs_considered++;
2066 /* Get the type of the expression we are trying to optimize. */
2067 if (is_gimple_assign (stmt))
2069 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2070 assigns_var_p = true;
2072 else if (gimple_code (stmt) == GIMPLE_COND)
2073 expr_type = boolean_type_node;
2074 else if (is_gimple_call (stmt))
2076 gcc_assert (gimple_call_lhs (stmt));
2077 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2078 assigns_var_p = true;
2080 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2081 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2082 else if (gimple_code (stmt) == GIMPLE_PHI)
2083 /* We can't propagate into a phi, so the logic below doesn't apply.
2084 Instead record an equivalence between the cached LHS and the
2085 PHI result of this statement, provided they are in the same block.
2086 This should be sufficient to kill the redundant phi. */
2088 if (def && cached_lhs)
2089 record_const_or_copy (def, cached_lhs);
2090 return;
2092 else
2093 gcc_unreachable ();
2095 if (!cached_lhs)
2096 return;
2098 /* It is safe to ignore types here since we have already done
2099 type checking in the hashing and equality routines. In fact
2100 type checking here merely gets in the way of constant
2101 propagation. Also, make sure that it is safe to propagate
2102 CACHED_LHS into the expression in STMT. */
2103 if ((TREE_CODE (cached_lhs) != SSA_NAME
2104 && (assigns_var_p
2105 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2106 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2108 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2109 || is_gimple_min_invariant (cached_lhs));
2111 if (dump_file && (dump_flags & TDF_DETAILS))
2113 fprintf (dump_file, " Replaced redundant expr '");
2114 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2115 fprintf (dump_file, "' with '");
2116 print_generic_expr (dump_file, cached_lhs, dump_flags);
2117 fprintf (dump_file, "'\n");
2120 opt_stats.num_re++;
2122 if (assigns_var_p
2123 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2124 cached_lhs = fold_convert (expr_type, cached_lhs);
2126 propagate_tree_value_into_stmt (gsi, cached_lhs);
2128 /* Since it is always necessary to mark the result as modified,
2129 perhaps we should move this into propagate_tree_value_into_stmt
2130 itself. */
2131 gimple_set_modified (gsi_stmt (*gsi), true);
2135 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2136 the available expressions table or the const_and_copies table.
2137 Detect and record those equivalences. */
2138 /* We handle only very simple copy equivalences here. The heavy
2139 lifing is done by eliminate_redundant_computations. */
2141 static void
2142 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2144 tree lhs;
2145 enum tree_code lhs_code;
2147 gcc_assert (is_gimple_assign (stmt));
2149 lhs = gimple_assign_lhs (stmt);
2150 lhs_code = TREE_CODE (lhs);
2152 if (lhs_code == SSA_NAME
2153 && gimple_assign_single_p (stmt))
2155 tree rhs = gimple_assign_rhs1 (stmt);
2157 /* If the RHS of the assignment is a constant or another variable that
2158 may be propagated, register it in the CONST_AND_COPIES table. We
2159 do not need to record unwind data for this, since this is a true
2160 assignment and not an equivalence inferred from a comparison. All
2161 uses of this ssa name are dominated by this assignment, so unwinding
2162 just costs time and space. */
2163 if (may_optimize_p
2164 && (TREE_CODE (rhs) == SSA_NAME
2165 || is_gimple_min_invariant (rhs)))
2167 if (dump_file && (dump_flags & TDF_DETAILS))
2169 fprintf (dump_file, "==== ASGN ");
2170 print_generic_expr (dump_file, lhs, 0);
2171 fprintf (dump_file, " = ");
2172 print_generic_expr (dump_file, rhs, 0);
2173 fprintf (dump_file, "\n");
2176 set_ssa_name_value (lhs, rhs);
2180 /* A memory store, even an aliased store, creates a useful
2181 equivalence. By exchanging the LHS and RHS, creating suitable
2182 vops and recording the result in the available expression table,
2183 we may be able to expose more redundant loads. */
2184 if (!gimple_has_volatile_ops (stmt)
2185 && gimple_references_memory_p (stmt)
2186 && gimple_assign_single_p (stmt)
2187 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2188 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2189 && !is_gimple_reg (lhs))
2191 tree rhs = gimple_assign_rhs1 (stmt);
2192 gimple new_stmt;
2194 /* Build a new statement with the RHS and LHS exchanged. */
2195 if (TREE_CODE (rhs) == SSA_NAME)
2197 /* NOTE tuples. The call to gimple_build_assign below replaced
2198 a call to build_gimple_modify_stmt, which did not set the
2199 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2200 may cause an SSA validation failure, as the LHS may be a
2201 default-initialized name and should have no definition. I'm
2202 a bit dubious of this, as the artificial statement that we
2203 generate here may in fact be ill-formed, but it is simply
2204 used as an internal device in this pass, and never becomes
2205 part of the CFG. */
2206 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2207 new_stmt = gimple_build_assign (rhs, lhs);
2208 SSA_NAME_DEF_STMT (rhs) = defstmt;
2210 else
2211 new_stmt = gimple_build_assign (rhs, lhs);
2213 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2215 /* Finally enter the statement into the available expression
2216 table. */
2217 lookup_avail_expr (new_stmt, true);
2221 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2222 CONST_AND_COPIES. */
2224 static void
2225 cprop_operand (gimple stmt, use_operand_p op_p)
2227 tree val;
2228 tree op = USE_FROM_PTR (op_p);
2230 /* If the operand has a known constant value or it is known to be a
2231 copy of some other variable, use the value or copy stored in
2232 CONST_AND_COPIES. */
2233 val = SSA_NAME_VALUE (op);
2234 if (val && val != op)
2236 /* Do not replace hard register operands in asm statements. */
2237 if (gimple_code (stmt) == GIMPLE_ASM
2238 && !may_propagate_copy_into_asm (op))
2239 return;
2241 /* Certain operands are not allowed to be copy propagated due
2242 to their interaction with exception handling and some GCC
2243 extensions. */
2244 if (!may_propagate_copy (op, val))
2245 return;
2247 /* Do not propagate addresses that point to volatiles into memory
2248 stmts without volatile operands. */
2249 if (POINTER_TYPE_P (TREE_TYPE (val))
2250 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2251 && gimple_has_mem_ops (stmt)
2252 && !gimple_has_volatile_ops (stmt))
2253 return;
2255 /* Do not propagate copies if the propagated value is at a deeper loop
2256 depth than the propagatee. Otherwise, this may move loop variant
2257 variables outside of their loops and prevent coalescing
2258 opportunities. If the value was loop invariant, it will be hoisted
2259 by LICM and exposed for copy propagation. */
2260 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2261 return;
2263 /* Do not propagate copies into simple IV increment statements.
2264 See PR23821 for how this can disturb IV analysis. */
2265 if (TREE_CODE (val) != INTEGER_CST
2266 && simple_iv_increment_p (stmt))
2267 return;
2269 /* Dump details. */
2270 if (dump_file && (dump_flags & TDF_DETAILS))
2272 fprintf (dump_file, " Replaced '");
2273 print_generic_expr (dump_file, op, dump_flags);
2274 fprintf (dump_file, "' with %s '",
2275 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2276 print_generic_expr (dump_file, val, dump_flags);
2277 fprintf (dump_file, "'\n");
2280 if (TREE_CODE (val) != SSA_NAME)
2281 opt_stats.num_const_prop++;
2282 else
2283 opt_stats.num_copy_prop++;
2285 propagate_value (op_p, val);
2287 /* And note that we modified this statement. This is now
2288 safe, even if we changed virtual operands since we will
2289 rescan the statement and rewrite its operands again. */
2290 gimple_set_modified (stmt, true);
2294 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2295 known value for that SSA_NAME (or NULL if no value is known).
2297 Propagate values from CONST_AND_COPIES into the uses, vuses and
2298 vdef_ops of STMT. */
2300 static void
2301 cprop_into_stmt (gimple stmt)
2303 use_operand_p op_p;
2304 ssa_op_iter iter;
2306 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2307 cprop_operand (stmt, op_p);
2310 /* Optimize the statement pointed to by iterator SI.
2312 We try to perform some simplistic global redundancy elimination and
2313 constant propagation:
2315 1- To detect global redundancy, we keep track of expressions that have
2316 been computed in this block and its dominators. If we find that the
2317 same expression is computed more than once, we eliminate repeated
2318 computations by using the target of the first one.
2320 2- Constant values and copy assignments. This is used to do very
2321 simplistic constant and copy propagation. When a constant or copy
2322 assignment is found, we map the value on the RHS of the assignment to
2323 the variable in the LHS in the CONST_AND_COPIES table. */
2325 static void
2326 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2328 gimple stmt, old_stmt;
2329 bool may_optimize_p;
2330 bool modified_p = false;
2332 old_stmt = stmt = gsi_stmt (si);
2334 if (dump_file && (dump_flags & TDF_DETAILS))
2336 fprintf (dump_file, "Optimizing statement ");
2337 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2340 if (gimple_code (stmt) == GIMPLE_COND)
2341 canonicalize_comparison (stmt);
2343 update_stmt_if_modified (stmt);
2344 opt_stats.num_stmts++;
2346 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2347 cprop_into_stmt (stmt);
2349 /* If the statement has been modified with constant replacements,
2350 fold its RHS before checking for redundant computations. */
2351 if (gimple_modified_p (stmt))
2353 tree rhs = NULL;
2355 /* Try to fold the statement making sure that STMT is kept
2356 up to date. */
2357 if (fold_stmt (&si))
2359 stmt = gsi_stmt (si);
2360 gimple_set_modified (stmt, true);
2362 if (dump_file && (dump_flags & TDF_DETAILS))
2364 fprintf (dump_file, " Folded to: ");
2365 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2369 /* We only need to consider cases that can yield a gimple operand. */
2370 if (gimple_assign_single_p (stmt))
2371 rhs = gimple_assign_rhs1 (stmt);
2372 else if (gimple_code (stmt) == GIMPLE_GOTO)
2373 rhs = gimple_goto_dest (stmt);
2374 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2375 /* This should never be an ADDR_EXPR. */
2376 rhs = gimple_switch_index (stmt);
2378 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2379 recompute_tree_invariant_for_addr_expr (rhs);
2381 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2382 even if fold_stmt updated the stmt already and thus cleared
2383 gimple_modified_p flag on it. */
2384 modified_p = true;
2387 /* Check for redundant computations. Do this optimization only
2388 for assignments that have no volatile ops and conditionals. */
2389 may_optimize_p = (!gimple_has_side_effects (stmt)
2390 && (is_gimple_assign (stmt)
2391 || (is_gimple_call (stmt)
2392 && gimple_call_lhs (stmt) != NULL_TREE)
2393 || gimple_code (stmt) == GIMPLE_COND
2394 || gimple_code (stmt) == GIMPLE_SWITCH));
2396 if (may_optimize_p)
2398 if (gimple_code (stmt) == GIMPLE_CALL)
2400 /* Resolve __builtin_constant_p. If it hasn't been
2401 folded to integer_one_node by now, it's fairly
2402 certain that the value simply isn't constant. */
2403 tree callee = gimple_call_fndecl (stmt);
2404 if (callee
2405 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2406 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2408 propagate_tree_value_into_stmt (&si, integer_zero_node);
2409 stmt = gsi_stmt (si);
2413 update_stmt_if_modified (stmt);
2414 eliminate_redundant_computations (&si);
2415 stmt = gsi_stmt (si);
2417 /* Perform simple redundant store elimination. */
2418 if (gimple_assign_single_p (stmt)
2419 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2421 tree lhs = gimple_assign_lhs (stmt);
2422 tree rhs = gimple_assign_rhs1 (stmt);
2423 tree cached_lhs;
2424 gimple new_stmt;
2425 if (TREE_CODE (rhs) == SSA_NAME)
2427 tree tem = SSA_NAME_VALUE (rhs);
2428 if (tem)
2429 rhs = tem;
2431 /* Build a new statement with the RHS and LHS exchanged. */
2432 if (TREE_CODE (rhs) == SSA_NAME)
2434 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2435 new_stmt = gimple_build_assign (rhs, lhs);
2436 SSA_NAME_DEF_STMT (rhs) = defstmt;
2438 else
2439 new_stmt = gimple_build_assign (rhs, lhs);
2440 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2441 cached_lhs = lookup_avail_expr (new_stmt, false);
2442 if (cached_lhs
2443 && rhs == cached_lhs)
2445 basic_block bb = gimple_bb (stmt);
2446 unlink_stmt_vdef (stmt);
2447 if (gsi_remove (&si, true))
2449 bitmap_set_bit (need_eh_cleanup, bb->index);
2450 if (dump_file && (dump_flags & TDF_DETAILS))
2451 fprintf (dump_file, " Flagged to clear EH edges.\n");
2453 release_defs (stmt);
2454 return;
2459 /* Record any additional equivalences created by this statement. */
2460 if (is_gimple_assign (stmt))
2461 record_equivalences_from_stmt (stmt, may_optimize_p);
2463 /* If STMT is a COND_EXPR and it was modified, then we may know
2464 where it goes. If that is the case, then mark the CFG as altered.
2466 This will cause us to later call remove_unreachable_blocks and
2467 cleanup_tree_cfg when it is safe to do so. It is not safe to
2468 clean things up here since removal of edges and such can trigger
2469 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2470 the manager.
2472 That's all fine and good, except that once SSA_NAMEs are released
2473 to the manager, we must not call create_ssa_name until all references
2474 to released SSA_NAMEs have been eliminated.
2476 All references to the deleted SSA_NAMEs can not be eliminated until
2477 we remove unreachable blocks.
2479 We can not remove unreachable blocks until after we have completed
2480 any queued jump threading.
2482 We can not complete any queued jump threads until we have taken
2483 appropriate variables out of SSA form. Taking variables out of
2484 SSA form can call create_ssa_name and thus we lose.
2486 Ultimately I suspect we're going to need to change the interface
2487 into the SSA_NAME manager. */
2488 if (gimple_modified_p (stmt) || modified_p)
2490 tree val = NULL;
2492 update_stmt_if_modified (stmt);
2494 if (gimple_code (stmt) == GIMPLE_COND)
2495 val = fold_binary_loc (gimple_location (stmt),
2496 gimple_cond_code (stmt), boolean_type_node,
2497 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2498 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2499 val = gimple_switch_index (stmt);
2501 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2502 cfg_altered = true;
2504 /* If we simplified a statement in such a way as to be shown that it
2505 cannot trap, update the eh information and the cfg to match. */
2506 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2508 bitmap_set_bit (need_eh_cleanup, bb->index);
2509 if (dump_file && (dump_flags & TDF_DETAILS))
2510 fprintf (dump_file, " Flagged to clear EH edges.\n");
2515 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2516 If found, return its LHS. Otherwise insert STMT in the table and
2517 return NULL_TREE.
2519 Also, when an expression is first inserted in the table, it is also
2520 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2521 we finish processing this block and its children. */
2523 static tree
2524 lookup_avail_expr (gimple stmt, bool insert)
2526 expr_hash_elt **slot;
2527 tree lhs;
2528 tree temp;
2529 struct expr_hash_elt element;
2531 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2532 if (gimple_code (stmt) == GIMPLE_PHI)
2533 lhs = gimple_phi_result (stmt);
2534 else
2535 lhs = gimple_get_lhs (stmt);
2537 initialize_hash_element (stmt, lhs, &element);
2539 if (dump_file && (dump_flags & TDF_DETAILS))
2541 fprintf (dump_file, "LKUP ");
2542 print_expr_hash_elt (dump_file, &element);
2545 /* Don't bother remembering constant assignments and copy operations.
2546 Constants and copy operations are handled by the constant/copy propagator
2547 in optimize_stmt. */
2548 if (element.expr.kind == EXPR_SINGLE
2549 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2550 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2551 return NULL_TREE;
2553 /* Finally try to find the expression in the main expression hash table. */
2554 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2555 (insert ? INSERT : NO_INSERT));
2556 if (slot == NULL)
2558 free_expr_hash_elt_contents (&element);
2559 return NULL_TREE;
2561 else if (*slot == NULL)
2563 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2564 *element2 = element;
2565 element2->stamp = element2;
2566 *slot = element2;
2568 if (dump_file && (dump_flags & TDF_DETAILS))
2570 fprintf (dump_file, "2>>> ");
2571 print_expr_hash_elt (dump_file, element2);
2574 avail_exprs_stack.safe_push (element2);
2575 return NULL_TREE;
2577 else
2578 free_expr_hash_elt_contents (&element);
2580 /* Extract the LHS of the assignment so that it can be used as the current
2581 definition of another variable. */
2582 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2584 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2585 use the value from the const_and_copies table. */
2586 if (TREE_CODE (lhs) == SSA_NAME)
2588 temp = SSA_NAME_VALUE (lhs);
2589 if (temp)
2590 lhs = temp;
2593 if (dump_file && (dump_flags & TDF_DETAILS))
2595 fprintf (dump_file, "FIND: ");
2596 print_generic_expr (dump_file, lhs, 0);
2597 fprintf (dump_file, "\n");
2600 return lhs;
2603 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2604 for expressions using the code of the expression and the SSA numbers of
2605 its operands. */
2607 static hashval_t
2608 avail_expr_hash (const void *p)
2610 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2611 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2612 tree vuse;
2613 hashval_t val = 0;
2615 val = iterative_hash_hashable_expr (expr, val);
2617 /* If the hash table entry is not associated with a statement, then we
2618 can just hash the expression and not worry about virtual operands
2619 and such. */
2620 if (!stmt)
2621 return val;
2623 /* Add the SSA version numbers of the vuse operand. This is important
2624 because compound variables like arrays are not renamed in the
2625 operands. Rather, the rename is done on the virtual variable
2626 representing all the elements of the array. */
2627 if ((vuse = gimple_vuse (stmt)))
2628 val = iterative_hash_expr (vuse, val);
2630 return val;
2633 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2634 up degenerate PHIs created by or exposed by jump threading. */
2636 /* Given a statement STMT, which is either a PHI node or an assignment,
2637 remove it from the IL. */
2639 static void
2640 remove_stmt_or_phi (gimple stmt)
2642 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2644 if (gimple_code (stmt) == GIMPLE_PHI)
2645 remove_phi_node (&gsi, true);
2646 else
2648 gsi_remove (&gsi, true);
2649 release_defs (stmt);
2653 /* Given a statement STMT, which is either a PHI node or an assignment,
2654 return the "rhs" of the node, in the case of a non-degenerate
2655 phi, NULL is returned. */
2657 static tree
2658 get_rhs_or_phi_arg (gimple stmt)
2660 if (gimple_code (stmt) == GIMPLE_PHI)
2661 return degenerate_phi_result (stmt);
2662 else if (gimple_assign_single_p (stmt))
2663 return gimple_assign_rhs1 (stmt);
2664 else
2665 gcc_unreachable ();
2669 /* Given a statement STMT, which is either a PHI node or an assignment,
2670 return the "lhs" of the node. */
2672 static tree
2673 get_lhs_or_phi_result (gimple stmt)
2675 if (gimple_code (stmt) == GIMPLE_PHI)
2676 return gimple_phi_result (stmt);
2677 else if (is_gimple_assign (stmt))
2678 return gimple_assign_lhs (stmt);
2679 else
2680 gcc_unreachable ();
2683 /* Propagate RHS into all uses of LHS (when possible).
2685 RHS and LHS are derived from STMT, which is passed in solely so
2686 that we can remove it if propagation is successful.
2688 When propagating into a PHI node or into a statement which turns
2689 into a trivial copy or constant initialization, set the
2690 appropriate bit in INTERESTING_NAMEs so that we will visit those
2691 nodes as well in an effort to pick up secondary optimization
2692 opportunities. */
2694 static void
2695 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2697 /* First verify that propagation is valid and isn't going to move a
2698 loop variant variable outside its loop. */
2699 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2700 && (TREE_CODE (rhs) != SSA_NAME
2701 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2702 && may_propagate_copy (lhs, rhs)
2703 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2705 use_operand_p use_p;
2706 imm_use_iterator iter;
2707 gimple use_stmt;
2708 bool all = true;
2710 /* Dump details. */
2711 if (dump_file && (dump_flags & TDF_DETAILS))
2713 fprintf (dump_file, " Replacing '");
2714 print_generic_expr (dump_file, lhs, dump_flags);
2715 fprintf (dump_file, "' with %s '",
2716 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2717 print_generic_expr (dump_file, rhs, dump_flags);
2718 fprintf (dump_file, "'\n");
2721 /* Walk over every use of LHS and try to replace the use with RHS.
2722 At this point the only reason why such a propagation would not
2723 be successful would be if the use occurs in an ASM_EXPR. */
2724 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2726 /* Leave debug stmts alone. If we succeed in propagating
2727 all non-debug uses, we'll drop the DEF, and propagation
2728 into debug stmts will occur then. */
2729 if (gimple_debug_bind_p (use_stmt))
2730 continue;
2732 /* It's not always safe to propagate into an ASM_EXPR. */
2733 if (gimple_code (use_stmt) == GIMPLE_ASM
2734 && ! may_propagate_copy_into_asm (lhs))
2736 all = false;
2737 continue;
2740 /* It's not ok to propagate into the definition stmt of RHS.
2741 <bb 9>:
2742 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2743 g_67.1_6 = prephitmp.12_36;
2744 goto <bb 9>;
2745 While this is strictly all dead code we do not want to
2746 deal with this here. */
2747 if (TREE_CODE (rhs) == SSA_NAME
2748 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2750 all = false;
2751 continue;
2754 /* Dump details. */
2755 if (dump_file && (dump_flags & TDF_DETAILS))
2757 fprintf (dump_file, " Original statement:");
2758 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2761 /* Propagate the RHS into this use of the LHS. */
2762 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2763 propagate_value (use_p, rhs);
2765 /* Special cases to avoid useless calls into the folding
2766 routines, operand scanning, etc.
2768 Propagation into a PHI may cause the PHI to become
2769 a degenerate, so mark the PHI as interesting. No other
2770 actions are necessary. */
2771 if (gimple_code (use_stmt) == GIMPLE_PHI)
2773 tree result;
2775 /* Dump details. */
2776 if (dump_file && (dump_flags & TDF_DETAILS))
2778 fprintf (dump_file, " Updated statement:");
2779 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2782 result = get_lhs_or_phi_result (use_stmt);
2783 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2784 continue;
2787 /* From this point onward we are propagating into a
2788 real statement. Folding may (or may not) be possible,
2789 we may expose new operands, expose dead EH edges,
2790 etc. */
2791 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2792 cannot fold a call that simplifies to a constant,
2793 because the GIMPLE_CALL must be replaced by a
2794 GIMPLE_ASSIGN, and there is no way to effect such a
2795 transformation in-place. We might want to consider
2796 using the more general fold_stmt here. */
2798 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2799 fold_stmt_inplace (&gsi);
2802 /* Sometimes propagation can expose new operands to the
2803 renamer. */
2804 update_stmt (use_stmt);
2806 /* Dump details. */
2807 if (dump_file && (dump_flags & TDF_DETAILS))
2809 fprintf (dump_file, " Updated statement:");
2810 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2813 /* If we replaced a variable index with a constant, then
2814 we would need to update the invariant flag for ADDR_EXPRs. */
2815 if (gimple_assign_single_p (use_stmt)
2816 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2817 recompute_tree_invariant_for_addr_expr
2818 (gimple_assign_rhs1 (use_stmt));
2820 /* If we cleaned up EH information from the statement,
2821 mark its containing block as needing EH cleanups. */
2822 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2824 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2825 if (dump_file && (dump_flags & TDF_DETAILS))
2826 fprintf (dump_file, " Flagged to clear EH edges.\n");
2829 /* Propagation may expose new trivial copy/constant propagation
2830 opportunities. */
2831 if (gimple_assign_single_p (use_stmt)
2832 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2833 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2834 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2836 tree result = get_lhs_or_phi_result (use_stmt);
2837 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2840 /* Propagation into these nodes may make certain edges in
2841 the CFG unexecutable. We want to identify them as PHI nodes
2842 at the destination of those unexecutable edges may become
2843 degenerates. */
2844 else if (gimple_code (use_stmt) == GIMPLE_COND
2845 || gimple_code (use_stmt) == GIMPLE_SWITCH
2846 || gimple_code (use_stmt) == GIMPLE_GOTO)
2848 tree val;
2850 if (gimple_code (use_stmt) == GIMPLE_COND)
2851 val = fold_binary_loc (gimple_location (use_stmt),
2852 gimple_cond_code (use_stmt),
2853 boolean_type_node,
2854 gimple_cond_lhs (use_stmt),
2855 gimple_cond_rhs (use_stmt));
2856 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2857 val = gimple_switch_index (use_stmt);
2858 else
2859 val = gimple_goto_dest (use_stmt);
2861 if (val && is_gimple_min_invariant (val))
2863 basic_block bb = gimple_bb (use_stmt);
2864 edge te = find_taken_edge (bb, val);
2865 edge_iterator ei;
2866 edge e;
2867 gimple_stmt_iterator gsi, psi;
2869 /* Remove all outgoing edges except TE. */
2870 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2872 if (e != te)
2874 /* Mark all the PHI nodes at the destination of
2875 the unexecutable edge as interesting. */
2876 for (psi = gsi_start_phis (e->dest);
2877 !gsi_end_p (psi);
2878 gsi_next (&psi))
2880 gimple phi = gsi_stmt (psi);
2882 tree result = gimple_phi_result (phi);
2883 int version = SSA_NAME_VERSION (result);
2885 bitmap_set_bit (interesting_names, version);
2888 te->probability += e->probability;
2890 te->count += e->count;
2891 remove_edge (e);
2892 cfg_altered = true;
2894 else
2895 ei_next (&ei);
2898 gsi = gsi_last_bb (gimple_bb (use_stmt));
2899 gsi_remove (&gsi, true);
2901 /* And fixup the flags on the single remaining edge. */
2902 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2903 te->flags &= ~EDGE_ABNORMAL;
2904 te->flags |= EDGE_FALLTHRU;
2905 if (te->probability > REG_BR_PROB_BASE)
2906 te->probability = REG_BR_PROB_BASE;
2911 /* Ensure there is nothing else to do. */
2912 gcc_assert (!all || has_zero_uses (lhs));
2914 /* If we were able to propagate away all uses of LHS, then
2915 we can remove STMT. */
2916 if (all)
2917 remove_stmt_or_phi (stmt);
2921 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2922 a statement that is a trivial copy or constant initialization.
2924 Attempt to eliminate T by propagating its RHS into all uses of
2925 its LHS. This may in turn set new bits in INTERESTING_NAMES
2926 for nodes we want to revisit later.
2928 All exit paths should clear INTERESTING_NAMES for the result
2929 of STMT. */
2931 static void
2932 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2934 tree lhs = get_lhs_or_phi_result (stmt);
2935 tree rhs;
2936 int version = SSA_NAME_VERSION (lhs);
2938 /* If the LHS of this statement or PHI has no uses, then we can
2939 just eliminate it. This can occur if, for example, the PHI
2940 was created by block duplication due to threading and its only
2941 use was in the conditional at the end of the block which was
2942 deleted. */
2943 if (has_zero_uses (lhs))
2945 bitmap_clear_bit (interesting_names, version);
2946 remove_stmt_or_phi (stmt);
2947 return;
2950 /* Get the RHS of the assignment or PHI node if the PHI is a
2951 degenerate. */
2952 rhs = get_rhs_or_phi_arg (stmt);
2953 if (!rhs)
2955 bitmap_clear_bit (interesting_names, version);
2956 return;
2959 if (!virtual_operand_p (lhs))
2960 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2961 else
2963 gimple use_stmt;
2964 imm_use_iterator iter;
2965 use_operand_p use_p;
2966 /* For virtual operands we have to propagate into all uses as
2967 otherwise we will create overlapping life-ranges. */
2968 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2969 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2970 SET_USE (use_p, rhs);
2971 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2972 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2973 remove_stmt_or_phi (stmt);
2976 /* Note that STMT may well have been deleted by now, so do
2977 not access it, instead use the saved version # to clear
2978 T's entry in the worklist. */
2979 bitmap_clear_bit (interesting_names, version);
2982 /* The first phase in degenerate PHI elimination.
2984 Eliminate the degenerate PHIs in BB, then recurse on the
2985 dominator children of BB. */
2987 static void
2988 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2990 gimple_stmt_iterator gsi;
2991 basic_block son;
2993 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2995 gimple phi = gsi_stmt (gsi);
2997 eliminate_const_or_copy (phi, interesting_names);
3000 /* Recurse into the dominator children of BB. */
3001 for (son = first_dom_son (CDI_DOMINATORS, bb);
3002 son;
3003 son = next_dom_son (CDI_DOMINATORS, son))
3004 eliminate_degenerate_phis_1 (son, interesting_names);
3008 /* A very simple pass to eliminate degenerate PHI nodes from the
3009 IL. This is meant to be fast enough to be able to be run several
3010 times in the optimization pipeline.
3012 Certain optimizations, particularly those which duplicate blocks
3013 or remove edges from the CFG can create or expose PHIs which are
3014 trivial copies or constant initializations.
3016 While we could pick up these optimizations in DOM or with the
3017 combination of copy-prop and CCP, those solutions are far too
3018 heavy-weight for our needs.
3020 This implementation has two phases so that we can efficiently
3021 eliminate the first order degenerate PHIs and second order
3022 degenerate PHIs.
3024 The first phase performs a dominator walk to identify and eliminate
3025 the vast majority of the degenerate PHIs. When a degenerate PHI
3026 is identified and eliminated any affected statements or PHIs
3027 are put on a worklist.
3029 The second phase eliminates degenerate PHIs and trivial copies
3030 or constant initializations using the worklist. This is how we
3031 pick up the secondary optimization opportunities with minimal
3032 cost. */
3034 namespace {
3036 const pass_data pass_data_phi_only_cprop =
3038 GIMPLE_PASS, /* type */
3039 "phicprop", /* name */
3040 OPTGROUP_NONE, /* optinfo_flags */
3041 true, /* has_execute */
3042 TV_TREE_PHI_CPROP, /* tv_id */
3043 ( PROP_cfg | PROP_ssa ), /* properties_required */
3044 0, /* properties_provided */
3045 0, /* properties_destroyed */
3046 0, /* todo_flags_start */
3047 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3050 class pass_phi_only_cprop : public gimple_opt_pass
3052 public:
3053 pass_phi_only_cprop (gcc::context *ctxt)
3054 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3057 /* opt_pass methods: */
3058 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3059 virtual bool gate (function *) { return flag_tree_dom != 0; }
3060 virtual unsigned int execute (function *);
3062 }; // class pass_phi_only_cprop
3064 unsigned int
3065 pass_phi_only_cprop::execute (function *fun)
3067 bitmap interesting_names;
3068 bitmap interesting_names1;
3070 /* Bitmap of blocks which need EH information updated. We can not
3071 update it on-the-fly as doing so invalidates the dominator tree. */
3072 need_eh_cleanup = BITMAP_ALLOC (NULL);
3074 /* INTERESTING_NAMES is effectively our worklist, indexed by
3075 SSA_NAME_VERSION.
3077 A set bit indicates that the statement or PHI node which
3078 defines the SSA_NAME should be (re)examined to determine if
3079 it has become a degenerate PHI or trivial const/copy propagation
3080 opportunity.
3082 Experiments have show we generally get better compilation
3083 time behavior with bitmaps rather than sbitmaps. */
3084 interesting_names = BITMAP_ALLOC (NULL);
3085 interesting_names1 = BITMAP_ALLOC (NULL);
3087 calculate_dominance_info (CDI_DOMINATORS);
3088 cfg_altered = false;
3090 /* First phase. Eliminate degenerate PHIs via a dominator
3091 walk of the CFG.
3093 Experiments have indicated that we generally get better
3094 compile-time behavior by visiting blocks in the first
3095 phase in dominator order. Presumably this is because walking
3096 in dominator order leaves fewer PHIs for later examination
3097 by the worklist phase. */
3098 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3099 interesting_names);
3101 /* Second phase. Eliminate second order degenerate PHIs as well
3102 as trivial copies or constant initializations identified by
3103 the first phase or this phase. Basically we keep iterating
3104 until our set of INTERESTING_NAMEs is empty. */
3105 while (!bitmap_empty_p (interesting_names))
3107 unsigned int i;
3108 bitmap_iterator bi;
3110 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3111 changed during the loop. Copy it to another bitmap and
3112 use that. */
3113 bitmap_copy (interesting_names1, interesting_names);
3115 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3117 tree name = ssa_name (i);
3119 /* Ignore SSA_NAMEs that have been released because
3120 their defining statement was deleted (unreachable). */
3121 if (name)
3122 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3123 interesting_names);
3127 if (cfg_altered)
3129 free_dominance_info (CDI_DOMINATORS);
3130 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3131 if (current_loops)
3132 loops_state_set (LOOPS_NEED_FIXUP);
3135 /* Propagation of const and copies may make some EH edges dead. Purge
3136 such edges from the CFG as needed. */
3137 if (!bitmap_empty_p (need_eh_cleanup))
3139 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3140 BITMAP_FREE (need_eh_cleanup);
3143 BITMAP_FREE (interesting_names);
3144 BITMAP_FREE (interesting_names1);
3145 return 0;
3148 } // anon namespace
3150 gimple_opt_pass *
3151 make_pass_phi_only_cprop (gcc::context *ctxt)
3153 return new pass_phi_only_cprop (ctxt);