2014-04-30 Soundararajan Dhakshinamoorthy <sounderarajan.d@atmel.com>
[official-gcc.git] / gcc / tree-ssa-dom.c
blob15c4fb07596adf90429aad4a1d26c814914f6e40
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "tree-eh.h"
38 #include "gimple-expr.h"
39 #include "is-a.h"
40 #include "gimple.h"
41 #include "gimple-iterator.h"
42 #include "gimple-ssa.h"
43 #include "tree-cfg.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
49 #include "domwalk.h"
50 #include "tree-pass.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-ssa-threadupdate.h"
53 #include "langhooks.h"
54 #include "params.h"
55 #include "tree-ssa-threadedge.h"
56 #include "tree-ssa-dom.h"
58 /* This file implements optimizations on the dominator tree. */
60 /* Representation of a "naked" right-hand-side expression, to be used
61 in recording available expressions in the expression hash table. */
63 enum expr_kind
65 EXPR_SINGLE,
66 EXPR_UNARY,
67 EXPR_BINARY,
68 EXPR_TERNARY,
69 EXPR_CALL,
70 EXPR_PHI
73 struct hashable_expr
75 tree type;
76 enum expr_kind kind;
77 union {
78 struct { tree rhs; } single;
79 struct { enum tree_code op; tree opnd; } unary;
80 struct { enum tree_code op; tree opnd0, opnd1; } binary;
81 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
82 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
83 struct { size_t nargs; tree *args; } phi;
84 } ops;
87 /* Structure for recording known values of a conditional expression
88 at the exits from its block. */
90 typedef struct cond_equivalence_s
92 struct hashable_expr cond;
93 tree value;
94 } cond_equivalence;
97 /* Structure for recording edge equivalences as well as any pending
98 edge redirections during the dominator optimizer.
100 Computing and storing the edge equivalences instead of creating
101 them on-demand can save significant amounts of time, particularly
102 for pathological cases involving switch statements.
104 These structures live for a single iteration of the dominator
105 optimizer in the edge's AUX field. At the end of an iteration we
106 free each of these structures and update the AUX field to point
107 to any requested redirection target (the code for updating the
108 CFG and SSA graph for edge redirection expects redirection edge
109 targets to be in the AUX field for each edge. */
111 struct edge_info
113 /* If this edge creates a simple equivalence, the LHS and RHS of
114 the equivalence will be stored here. */
115 tree lhs;
116 tree rhs;
118 /* Traversing an edge may also indicate one or more particular conditions
119 are true or false. */
120 vec<cond_equivalence> cond_equivalences;
123 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
124 expressions it enters into the hash table along with a marker entry
125 (null). When we finish processing the block, we pop off entries and
126 remove the expressions from the global hash table until we hit the
127 marker. */
128 typedef struct expr_hash_elt * expr_hash_elt_t;
130 static vec<expr_hash_elt_t> avail_exprs_stack;
132 /* Structure for entries in the expression hash table. */
134 struct expr_hash_elt
136 /* The value (lhs) of this expression. */
137 tree lhs;
139 /* The expression (rhs) we want to record. */
140 struct hashable_expr expr;
142 /* The stmt pointer if this element corresponds to a statement. */
143 gimple stmt;
145 /* The hash value for RHS. */
146 hashval_t hash;
148 /* A unique stamp, typically the address of the hash
149 element itself, used in removing entries from the table. */
150 struct expr_hash_elt *stamp;
153 /* Hashtable helpers. */
155 static bool hashable_expr_equal_p (const struct hashable_expr *,
156 const struct hashable_expr *);
157 static void free_expr_hash_elt (void *);
159 struct expr_elt_hasher
161 typedef expr_hash_elt value_type;
162 typedef expr_hash_elt compare_type;
163 static inline hashval_t hash (const value_type *);
164 static inline bool equal (const value_type *, const compare_type *);
165 static inline void remove (value_type *);
168 inline hashval_t
169 expr_elt_hasher::hash (const value_type *p)
171 return p->hash;
174 inline bool
175 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
177 gimple stmt1 = p1->stmt;
178 const struct hashable_expr *expr1 = &p1->expr;
179 const struct expr_hash_elt *stamp1 = p1->stamp;
180 gimple stmt2 = p2->stmt;
181 const struct hashable_expr *expr2 = &p2->expr;
182 const struct expr_hash_elt *stamp2 = p2->stamp;
184 /* This case should apply only when removing entries from the table. */
185 if (stamp1 == stamp2)
186 return true;
188 /* FIXME tuples:
189 We add stmts to a hash table and them modify them. To detect the case
190 that we modify a stmt and then search for it, we assume that the hash
191 is always modified by that change.
192 We have to fully check why this doesn't happen on trunk or rewrite
193 this in a more reliable (and easier to understand) way. */
194 if (((const struct expr_hash_elt *)p1)->hash
195 != ((const struct expr_hash_elt *)p2)->hash)
196 return false;
198 /* In case of a collision, both RHS have to be identical and have the
199 same VUSE operands. */
200 if (hashable_expr_equal_p (expr1, expr2)
201 && types_compatible_p (expr1->type, expr2->type))
203 /* Note that STMT1 and/or STMT2 may be NULL. */
204 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
205 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
208 return false;
211 /* Delete an expr_hash_elt and reclaim its storage. */
213 inline void
214 expr_elt_hasher::remove (value_type *element)
216 free_expr_hash_elt (element);
219 /* Hash table with expressions made available during the renaming process.
220 When an assignment of the form X_i = EXPR is found, the statement is
221 stored in this table. If the same expression EXPR is later found on the
222 RHS of another statement, it is replaced with X_i (thus performing
223 global redundancy elimination). Similarly as we pass through conditionals
224 we record the conditional itself as having either a true or false value
225 in this table. */
226 static hash_table <expr_elt_hasher> avail_exprs;
228 /* Stack of dest,src pairs that need to be restored during finalization.
230 A NULL entry is used to mark the end of pairs which need to be
231 restored during finalization of this block. */
232 static vec<tree> const_and_copies_stack;
234 /* Track whether or not we have changed the control flow graph. */
235 static bool cfg_altered;
237 /* Bitmap of blocks that have had EH statements cleaned. We should
238 remove their dead edges eventually. */
239 static bitmap need_eh_cleanup;
241 /* Statistics for dominator optimizations. */
242 struct opt_stats_d
244 long num_stmts;
245 long num_exprs_considered;
246 long num_re;
247 long num_const_prop;
248 long num_copy_prop;
251 static struct opt_stats_d opt_stats;
253 /* Local functions. */
254 static void optimize_stmt (basic_block, gimple_stmt_iterator);
255 static tree lookup_avail_expr (gimple, bool);
256 static hashval_t avail_expr_hash (const void *);
257 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
258 static void record_cond (cond_equivalence *);
259 static void record_const_or_copy (tree, tree);
260 static void record_equality (tree, tree);
261 static void record_equivalences_from_phis (basic_block);
262 static void record_equivalences_from_incoming_edge (basic_block);
263 static void eliminate_redundant_computations (gimple_stmt_iterator *);
264 static void record_equivalences_from_stmt (gimple, int);
265 static void remove_local_expressions_from_table (void);
266 static void restore_vars_to_original_value (void);
267 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
270 /* Given a statement STMT, initialize the hash table element pointed to
271 by ELEMENT. */
273 static void
274 initialize_hash_element (gimple stmt, tree lhs,
275 struct expr_hash_elt *element)
277 enum gimple_code code = gimple_code (stmt);
278 struct hashable_expr *expr = &element->expr;
280 if (code == GIMPLE_ASSIGN)
282 enum tree_code subcode = gimple_assign_rhs_code (stmt);
284 switch (get_gimple_rhs_class (subcode))
286 case GIMPLE_SINGLE_RHS:
287 expr->kind = EXPR_SINGLE;
288 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
289 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
290 break;
291 case GIMPLE_UNARY_RHS:
292 expr->kind = EXPR_UNARY;
293 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
294 expr->ops.unary.op = subcode;
295 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
296 break;
297 case GIMPLE_BINARY_RHS:
298 expr->kind = EXPR_BINARY;
299 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
300 expr->ops.binary.op = subcode;
301 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
302 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
303 break;
304 case GIMPLE_TERNARY_RHS:
305 expr->kind = EXPR_TERNARY;
306 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
307 expr->ops.ternary.op = subcode;
308 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
309 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
310 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
311 break;
312 default:
313 gcc_unreachable ();
316 else if (code == GIMPLE_COND)
318 expr->type = boolean_type_node;
319 expr->kind = EXPR_BINARY;
320 expr->ops.binary.op = gimple_cond_code (stmt);
321 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
322 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
324 else if (code == GIMPLE_CALL)
326 size_t nargs = gimple_call_num_args (stmt);
327 size_t i;
329 gcc_assert (gimple_call_lhs (stmt));
331 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
332 expr->kind = EXPR_CALL;
333 expr->ops.call.fn_from = stmt;
335 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
336 expr->ops.call.pure = true;
337 else
338 expr->ops.call.pure = false;
340 expr->ops.call.nargs = nargs;
341 expr->ops.call.args = XCNEWVEC (tree, nargs);
342 for (i = 0; i < nargs; i++)
343 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
345 else if (code == GIMPLE_SWITCH)
347 expr->type = TREE_TYPE (gimple_switch_index (stmt));
348 expr->kind = EXPR_SINGLE;
349 expr->ops.single.rhs = gimple_switch_index (stmt);
351 else if (code == GIMPLE_GOTO)
353 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
354 expr->kind = EXPR_SINGLE;
355 expr->ops.single.rhs = gimple_goto_dest (stmt);
357 else if (code == GIMPLE_PHI)
359 size_t nargs = gimple_phi_num_args (stmt);
360 size_t i;
362 expr->type = TREE_TYPE (gimple_phi_result (stmt));
363 expr->kind = EXPR_PHI;
364 expr->ops.phi.nargs = nargs;
365 expr->ops.phi.args = XCNEWVEC (tree, nargs);
367 for (i = 0; i < nargs; i++)
368 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
370 else
371 gcc_unreachable ();
373 element->lhs = lhs;
374 element->stmt = stmt;
375 element->hash = avail_expr_hash (element);
376 element->stamp = element;
379 /* Given a conditional expression COND as a tree, initialize
380 a hashable_expr expression EXPR. The conditional must be a
381 comparison or logical negation. A constant or a variable is
382 not permitted. */
384 static void
385 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
387 expr->type = boolean_type_node;
389 if (COMPARISON_CLASS_P (cond))
391 expr->kind = EXPR_BINARY;
392 expr->ops.binary.op = TREE_CODE (cond);
393 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
394 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
396 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
398 expr->kind = EXPR_UNARY;
399 expr->ops.unary.op = TRUTH_NOT_EXPR;
400 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
402 else
403 gcc_unreachable ();
406 /* Given a hashable_expr expression EXPR and an LHS,
407 initialize the hash table element pointed to by ELEMENT. */
409 static void
410 initialize_hash_element_from_expr (struct hashable_expr *expr,
411 tree lhs,
412 struct expr_hash_elt *element)
414 element->expr = *expr;
415 element->lhs = lhs;
416 element->stmt = NULL;
417 element->hash = avail_expr_hash (element);
418 element->stamp = element;
421 /* Compare two hashable_expr structures for equivalence.
422 They are considered equivalent when the the expressions
423 they denote must necessarily be equal. The logic is intended
424 to follow that of operand_equal_p in fold-const.c */
426 static bool
427 hashable_expr_equal_p (const struct hashable_expr *expr0,
428 const struct hashable_expr *expr1)
430 tree type0 = expr0->type;
431 tree type1 = expr1->type;
433 /* If either type is NULL, there is nothing to check. */
434 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
435 return false;
437 /* If both types don't have the same signedness, precision, and mode,
438 then we can't consider them equal. */
439 if (type0 != type1
440 && (TREE_CODE (type0) == ERROR_MARK
441 || TREE_CODE (type1) == ERROR_MARK
442 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
443 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
444 || TYPE_MODE (type0) != TYPE_MODE (type1)))
445 return false;
447 if (expr0->kind != expr1->kind)
448 return false;
450 switch (expr0->kind)
452 case EXPR_SINGLE:
453 return operand_equal_p (expr0->ops.single.rhs,
454 expr1->ops.single.rhs, 0);
456 case EXPR_UNARY:
457 if (expr0->ops.unary.op != expr1->ops.unary.op)
458 return false;
460 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
461 || expr0->ops.unary.op == NON_LVALUE_EXPR)
462 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
463 return false;
465 return operand_equal_p (expr0->ops.unary.opnd,
466 expr1->ops.unary.opnd, 0);
468 case EXPR_BINARY:
469 if (expr0->ops.binary.op != expr1->ops.binary.op)
470 return false;
472 if (operand_equal_p (expr0->ops.binary.opnd0,
473 expr1->ops.binary.opnd0, 0)
474 && operand_equal_p (expr0->ops.binary.opnd1,
475 expr1->ops.binary.opnd1, 0))
476 return true;
478 /* For commutative ops, allow the other order. */
479 return (commutative_tree_code (expr0->ops.binary.op)
480 && operand_equal_p (expr0->ops.binary.opnd0,
481 expr1->ops.binary.opnd1, 0)
482 && operand_equal_p (expr0->ops.binary.opnd1,
483 expr1->ops.binary.opnd0, 0));
485 case EXPR_TERNARY:
486 if (expr0->ops.ternary.op != expr1->ops.ternary.op
487 || !operand_equal_p (expr0->ops.ternary.opnd2,
488 expr1->ops.ternary.opnd2, 0))
489 return false;
491 if (operand_equal_p (expr0->ops.ternary.opnd0,
492 expr1->ops.ternary.opnd0, 0)
493 && operand_equal_p (expr0->ops.ternary.opnd1,
494 expr1->ops.ternary.opnd1, 0))
495 return true;
497 /* For commutative ops, allow the other order. */
498 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
499 && operand_equal_p (expr0->ops.ternary.opnd0,
500 expr1->ops.ternary.opnd1, 0)
501 && operand_equal_p (expr0->ops.ternary.opnd1,
502 expr1->ops.ternary.opnd0, 0));
504 case EXPR_CALL:
506 size_t i;
508 /* If the calls are to different functions, then they
509 clearly cannot be equal. */
510 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
511 expr1->ops.call.fn_from))
512 return false;
514 if (! expr0->ops.call.pure)
515 return false;
517 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
518 return false;
520 for (i = 0; i < expr0->ops.call.nargs; i++)
521 if (! operand_equal_p (expr0->ops.call.args[i],
522 expr1->ops.call.args[i], 0))
523 return false;
525 return true;
528 case EXPR_PHI:
530 size_t i;
532 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
533 return false;
535 for (i = 0; i < expr0->ops.phi.nargs; i++)
536 if (! operand_equal_p (expr0->ops.phi.args[i],
537 expr1->ops.phi.args[i], 0))
538 return false;
540 return true;
543 default:
544 gcc_unreachable ();
548 /* Generate a hash value for a pair of expressions. This can be used
549 iteratively by passing a previous result as the VAL argument.
551 The same hash value is always returned for a given pair of expressions,
552 regardless of the order in which they are presented. This is useful in
553 hashing the operands of commutative functions. */
555 static hashval_t
556 iterative_hash_exprs_commutative (const_tree t1,
557 const_tree t2, hashval_t val)
559 hashval_t one = iterative_hash_expr (t1, 0);
560 hashval_t two = iterative_hash_expr (t2, 0);
561 hashval_t t;
563 if (one > two)
564 t = one, one = two, two = t;
565 val = iterative_hash_hashval_t (one, val);
566 val = iterative_hash_hashval_t (two, val);
568 return val;
571 /* Compute a hash value for a hashable_expr value EXPR and a
572 previously accumulated hash value VAL. If two hashable_expr
573 values compare equal with hashable_expr_equal_p, they must
574 hash to the same value, given an identical value of VAL.
575 The logic is intended to follow iterative_hash_expr in tree.c. */
577 static hashval_t
578 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
580 switch (expr->kind)
582 case EXPR_SINGLE:
583 val = iterative_hash_expr (expr->ops.single.rhs, val);
584 break;
586 case EXPR_UNARY:
587 val = iterative_hash_object (expr->ops.unary.op, val);
589 /* Make sure to include signedness in the hash computation.
590 Don't hash the type, that can lead to having nodes which
591 compare equal according to operand_equal_p, but which
592 have different hash codes. */
593 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
594 || expr->ops.unary.op == NON_LVALUE_EXPR)
595 val += TYPE_UNSIGNED (expr->type);
597 val = iterative_hash_expr (expr->ops.unary.opnd, val);
598 break;
600 case EXPR_BINARY:
601 val = iterative_hash_object (expr->ops.binary.op, val);
602 if (commutative_tree_code (expr->ops.binary.op))
603 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
604 expr->ops.binary.opnd1, val);
605 else
607 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
608 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
610 break;
612 case EXPR_TERNARY:
613 val = iterative_hash_object (expr->ops.ternary.op, val);
614 if (commutative_ternary_tree_code (expr->ops.ternary.op))
615 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
616 expr->ops.ternary.opnd1, val);
617 else
619 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
620 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
622 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
623 break;
625 case EXPR_CALL:
627 size_t i;
628 enum tree_code code = CALL_EXPR;
629 gimple fn_from;
631 val = iterative_hash_object (code, val);
632 fn_from = expr->ops.call.fn_from;
633 if (gimple_call_internal_p (fn_from))
634 val = iterative_hash_hashval_t
635 ((hashval_t) gimple_call_internal_fn (fn_from), val);
636 else
637 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
638 for (i = 0; i < expr->ops.call.nargs; i++)
639 val = iterative_hash_expr (expr->ops.call.args[i], val);
641 break;
643 case EXPR_PHI:
645 size_t i;
647 for (i = 0; i < expr->ops.phi.nargs; i++)
648 val = iterative_hash_expr (expr->ops.phi.args[i], val);
650 break;
652 default:
653 gcc_unreachable ();
656 return val;
659 /* Print a diagnostic dump of an expression hash table entry. */
661 static void
662 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
664 if (element->stmt)
665 fprintf (stream, "STMT ");
666 else
667 fprintf (stream, "COND ");
669 if (element->lhs)
671 print_generic_expr (stream, element->lhs, 0);
672 fprintf (stream, " = ");
675 switch (element->expr.kind)
677 case EXPR_SINGLE:
678 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
679 break;
681 case EXPR_UNARY:
682 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
683 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
684 break;
686 case EXPR_BINARY:
687 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
688 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
689 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
690 break;
692 case EXPR_TERNARY:
693 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
694 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
695 fputs (", ", stream);
696 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
697 fputs (", ", stream);
698 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
699 fputs (">", stream);
700 break;
702 case EXPR_CALL:
704 size_t i;
705 size_t nargs = element->expr.ops.call.nargs;
706 gimple fn_from;
708 fn_from = element->expr.ops.call.fn_from;
709 if (gimple_call_internal_p (fn_from))
710 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
711 stream);
712 else
713 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
714 fprintf (stream, " (");
715 for (i = 0; i < nargs; i++)
717 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
718 if (i + 1 < nargs)
719 fprintf (stream, ", ");
721 fprintf (stream, ")");
723 break;
725 case EXPR_PHI:
727 size_t i;
728 size_t nargs = element->expr.ops.phi.nargs;
730 fprintf (stream, "PHI <");
731 for (i = 0; i < nargs; i++)
733 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
734 if (i + 1 < nargs)
735 fprintf (stream, ", ");
737 fprintf (stream, ">");
739 break;
741 fprintf (stream, "\n");
743 if (element->stmt)
745 fprintf (stream, " ");
746 print_gimple_stmt (stream, element->stmt, 0, 0);
750 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
752 static void
753 free_expr_hash_elt_contents (struct expr_hash_elt *element)
755 if (element->expr.kind == EXPR_CALL)
756 free (element->expr.ops.call.args);
757 else if (element->expr.kind == EXPR_PHI)
758 free (element->expr.ops.phi.args);
761 /* Delete an expr_hash_elt and reclaim its storage. */
763 static void
764 free_expr_hash_elt (void *elt)
766 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
767 free_expr_hash_elt_contents (element);
768 free (element);
771 /* Allocate an EDGE_INFO for edge E and attach it to E.
772 Return the new EDGE_INFO structure. */
774 static struct edge_info *
775 allocate_edge_info (edge e)
777 struct edge_info *edge_info;
779 edge_info = XCNEW (struct edge_info);
781 e->aux = edge_info;
782 return edge_info;
785 /* Free all EDGE_INFO structures associated with edges in the CFG.
786 If a particular edge can be threaded, copy the redirection
787 target from the EDGE_INFO structure into the edge's AUX field
788 as required by code to update the CFG and SSA graph for
789 jump threading. */
791 static void
792 free_all_edge_infos (void)
794 basic_block bb;
795 edge_iterator ei;
796 edge e;
798 FOR_EACH_BB_FN (bb, cfun)
800 FOR_EACH_EDGE (e, ei, bb->preds)
802 struct edge_info *edge_info = (struct edge_info *) e->aux;
804 if (edge_info)
806 edge_info->cond_equivalences.release ();
807 free (edge_info);
808 e->aux = NULL;
814 class dom_opt_dom_walker : public dom_walker
816 public:
817 dom_opt_dom_walker (cdi_direction direction)
818 : dom_walker (direction), m_dummy_cond (NULL) {}
820 virtual void before_dom_children (basic_block);
821 virtual void after_dom_children (basic_block);
823 private:
824 void thread_across_edge (edge);
826 gimple m_dummy_cond;
829 /* Jump threading, redundancy elimination and const/copy propagation.
831 This pass may expose new symbols that need to be renamed into SSA. For
832 every new symbol exposed, its corresponding bit will be set in
833 VARS_TO_RENAME. */
835 namespace {
837 const pass_data pass_data_dominator =
839 GIMPLE_PASS, /* type */
840 "dom", /* name */
841 OPTGROUP_NONE, /* optinfo_flags */
842 true, /* has_execute */
843 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
844 ( PROP_cfg | PROP_ssa ), /* properties_required */
845 0, /* properties_provided */
846 0, /* properties_destroyed */
847 0, /* todo_flags_start */
848 ( TODO_cleanup_cfg | TODO_update_ssa
849 | TODO_verify_ssa
850 | TODO_verify_flow ), /* todo_flags_finish */
853 class pass_dominator : public gimple_opt_pass
855 public:
856 pass_dominator (gcc::context *ctxt)
857 : gimple_opt_pass (pass_data_dominator, ctxt)
860 /* opt_pass methods: */
861 opt_pass * clone () { return new pass_dominator (m_ctxt); }
862 virtual bool gate (function *) { return flag_tree_dom != 0; }
863 virtual unsigned int execute (function *);
865 }; // class pass_dominator
867 unsigned int
868 pass_dominator::execute (function *fun)
870 memset (&opt_stats, 0, sizeof (opt_stats));
872 /* Create our hash tables. */
873 avail_exprs.create (1024);
874 avail_exprs_stack.create (20);
875 const_and_copies_stack.create (20);
876 need_eh_cleanup = BITMAP_ALLOC (NULL);
878 calculate_dominance_info (CDI_DOMINATORS);
879 cfg_altered = false;
881 /* We need to know loop structures in order to avoid destroying them
882 in jump threading. Note that we still can e.g. thread through loop
883 headers to an exit edge, or through loop header to the loop body, assuming
884 that we update the loop info.
886 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
887 to several overly conservative bail-outs in jump threading, case
888 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
889 missing. We should improve jump threading in future then
890 LOOPS_HAVE_PREHEADERS won't be needed here. */
891 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
893 /* Initialize the value-handle array. */
894 threadedge_initialize_values ();
896 /* We need accurate information regarding back edges in the CFG
897 for jump threading; this may include back edges that are not part of
898 a single loop. */
899 mark_dfs_back_edges ();
901 /* Recursively walk the dominator tree optimizing statements. */
902 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
905 gimple_stmt_iterator gsi;
906 basic_block bb;
907 FOR_EACH_BB_FN (bb, fun)
909 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
910 update_stmt_if_modified (gsi_stmt (gsi));
914 /* If we exposed any new variables, go ahead and put them into
915 SSA form now, before we handle jump threading. This simplifies
916 interactions between rewriting of _DECL nodes into SSA form
917 and rewriting SSA_NAME nodes into SSA form after block
918 duplication and CFG manipulation. */
919 update_ssa (TODO_update_ssa);
921 free_all_edge_infos ();
923 /* Thread jumps, creating duplicate blocks as needed. */
924 cfg_altered |= thread_through_all_blocks (first_pass_instance);
926 if (cfg_altered)
927 free_dominance_info (CDI_DOMINATORS);
929 /* Removal of statements may make some EH edges dead. Purge
930 such edges from the CFG as needed. */
931 if (!bitmap_empty_p (need_eh_cleanup))
933 unsigned i;
934 bitmap_iterator bi;
936 /* Jump threading may have created forwarder blocks from blocks
937 needing EH cleanup; the new successor of these blocks, which
938 has inherited from the original block, needs the cleanup.
939 Don't clear bits in the bitmap, as that can break the bitmap
940 iterator. */
941 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
943 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
944 if (bb == NULL)
945 continue;
946 while (single_succ_p (bb)
947 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
948 bb = single_succ (bb);
949 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
950 continue;
951 if ((unsigned) bb->index != i)
952 bitmap_set_bit (need_eh_cleanup, bb->index);
955 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
956 bitmap_clear (need_eh_cleanup);
959 statistics_counter_event (fun, "Redundant expressions eliminated",
960 opt_stats.num_re);
961 statistics_counter_event (fun, "Constants propagated",
962 opt_stats.num_const_prop);
963 statistics_counter_event (fun, "Copies propagated",
964 opt_stats.num_copy_prop);
966 /* Debugging dumps. */
967 if (dump_file && (dump_flags & TDF_STATS))
968 dump_dominator_optimization_stats (dump_file);
970 loop_optimizer_finalize ();
972 /* Delete our main hashtable. */
973 avail_exprs.dispose ();
975 /* Free asserted bitmaps and stacks. */
976 BITMAP_FREE (need_eh_cleanup);
978 avail_exprs_stack.release ();
979 const_and_copies_stack.release ();
981 /* Free the value-handle array. */
982 threadedge_finalize_values ();
984 return 0;
987 } // anon namespace
989 gimple_opt_pass *
990 make_pass_dominator (gcc::context *ctxt)
992 return new pass_dominator (ctxt);
996 /* Given a conditional statement CONDSTMT, convert the
997 condition to a canonical form. */
999 static void
1000 canonicalize_comparison (gimple condstmt)
1002 tree op0;
1003 tree op1;
1004 enum tree_code code;
1006 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1008 op0 = gimple_cond_lhs (condstmt);
1009 op1 = gimple_cond_rhs (condstmt);
1011 code = gimple_cond_code (condstmt);
1013 /* If it would be profitable to swap the operands, then do so to
1014 canonicalize the statement, enabling better optimization.
1016 By placing canonicalization of such expressions here we
1017 transparently keep statements in canonical form, even
1018 when the statement is modified. */
1019 if (tree_swap_operands_p (op0, op1, false))
1021 /* For relationals we need to swap the operands
1022 and change the code. */
1023 if (code == LT_EXPR
1024 || code == GT_EXPR
1025 || code == LE_EXPR
1026 || code == GE_EXPR)
1028 code = swap_tree_comparison (code);
1030 gimple_cond_set_code (condstmt, code);
1031 gimple_cond_set_lhs (condstmt, op1);
1032 gimple_cond_set_rhs (condstmt, op0);
1034 update_stmt (condstmt);
1039 /* Initialize local stacks for this optimizer and record equivalences
1040 upon entry to BB. Equivalences can come from the edge traversed to
1041 reach BB or they may come from PHI nodes at the start of BB. */
1043 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1044 LIMIT entries left in LOCALs. */
1046 static void
1047 remove_local_expressions_from_table (void)
1049 /* Remove all the expressions made available in this block. */
1050 while (avail_exprs_stack.length () > 0)
1052 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1053 expr_hash_elt **slot;
1055 if (victim == NULL)
1056 break;
1058 /* This must precede the actual removal from the hash table,
1059 as ELEMENT and the table entry may share a call argument
1060 vector which will be freed during removal. */
1061 if (dump_file && (dump_flags & TDF_DETAILS))
1063 fprintf (dump_file, "<<<< ");
1064 print_expr_hash_elt (dump_file, victim);
1067 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1068 gcc_assert (slot && *slot == victim);
1069 avail_exprs.clear_slot (slot);
1073 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1074 CONST_AND_COPIES to its original state, stopping when we hit a
1075 NULL marker. */
1077 static void
1078 restore_vars_to_original_value (void)
1080 while (const_and_copies_stack.length () > 0)
1082 tree prev_value, dest;
1084 dest = const_and_copies_stack.pop ();
1086 if (dest == NULL)
1087 break;
1089 if (dump_file && (dump_flags & TDF_DETAILS))
1091 fprintf (dump_file, "<<<< COPY ");
1092 print_generic_expr (dump_file, dest, 0);
1093 fprintf (dump_file, " = ");
1094 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1095 fprintf (dump_file, "\n");
1098 prev_value = const_and_copies_stack.pop ();
1099 set_ssa_name_value (dest, prev_value);
1103 /* A trivial wrapper so that we can present the generic jump
1104 threading code with a simple API for simplifying statements. */
1105 static tree
1106 simplify_stmt_for_jump_threading (gimple stmt,
1107 gimple within_stmt ATTRIBUTE_UNUSED)
1109 return lookup_avail_expr (stmt, false);
1112 /* Record into the equivalence tables any equivalences implied by
1113 traversing edge E (which are cached in E->aux).
1115 Callers are responsible for managing the unwinding markers. */
1116 static void
1117 record_temporary_equivalences (edge e)
1119 int i;
1120 struct edge_info *edge_info = (struct edge_info *) e->aux;
1122 /* If we have info associated with this edge, record it into
1123 our equivalence tables. */
1124 if (edge_info)
1126 cond_equivalence *eq;
1127 tree lhs = edge_info->lhs;
1128 tree rhs = edge_info->rhs;
1130 /* If we have a simple NAME = VALUE equivalence, record it. */
1131 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1132 record_const_or_copy (lhs, rhs);
1134 /* If we have 0 = COND or 1 = COND equivalences, record them
1135 into our expression hash tables. */
1136 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1137 record_cond (eq);
1141 /* Wrapper for common code to attempt to thread an edge. For example,
1142 it handles lazily building the dummy condition and the bookkeeping
1143 when jump threading is successful. */
1145 void
1146 dom_opt_dom_walker::thread_across_edge (edge e)
1148 if (! m_dummy_cond)
1149 m_dummy_cond =
1150 gimple_build_cond (NE_EXPR,
1151 integer_zero_node, integer_zero_node,
1152 NULL, NULL);
1154 /* Push a marker on both stacks so we can unwind the tables back to their
1155 current state. */
1156 avail_exprs_stack.safe_push (NULL);
1157 const_and_copies_stack.safe_push (NULL_TREE);
1159 /* Traversing E may result in equivalences we can utilize. */
1160 record_temporary_equivalences (e);
1162 /* With all the edge equivalences in the tables, go ahead and attempt
1163 to thread through E->dest. */
1164 ::thread_across_edge (m_dummy_cond, e, false,
1165 &const_and_copies_stack,
1166 simplify_stmt_for_jump_threading);
1168 /* And restore the various tables to their state before
1169 we threaded this edge.
1171 XXX The code in tree-ssa-threadedge.c will restore the state of
1172 the const_and_copies table. We we just have to restore the expression
1173 table. */
1174 remove_local_expressions_from_table ();
1177 /* PHI nodes can create equivalences too.
1179 Ignoring any alternatives which are the same as the result, if
1180 all the alternatives are equal, then the PHI node creates an
1181 equivalence. */
1183 static void
1184 record_equivalences_from_phis (basic_block bb)
1186 gimple_stmt_iterator gsi;
1188 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1190 gimple phi = gsi_stmt (gsi);
1192 tree lhs = gimple_phi_result (phi);
1193 tree rhs = NULL;
1194 size_t i;
1196 for (i = 0; i < gimple_phi_num_args (phi); i++)
1198 tree t = gimple_phi_arg_def (phi, i);
1200 /* Ignore alternatives which are the same as our LHS. Since
1201 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1202 can simply compare pointers. */
1203 if (lhs == t)
1204 continue;
1206 /* If we have not processed an alternative yet, then set
1207 RHS to this alternative. */
1208 if (rhs == NULL)
1209 rhs = t;
1210 /* If we have processed an alternative (stored in RHS), then
1211 see if it is equal to this one. If it isn't, then stop
1212 the search. */
1213 else if (! operand_equal_for_phi_arg_p (rhs, t))
1214 break;
1217 /* If we had no interesting alternatives, then all the RHS alternatives
1218 must have been the same as LHS. */
1219 if (!rhs)
1220 rhs = lhs;
1222 /* If we managed to iterate through each PHI alternative without
1223 breaking out of the loop, then we have a PHI which may create
1224 a useful equivalence. We do not need to record unwind data for
1225 this, since this is a true assignment and not an equivalence
1226 inferred from a comparison. All uses of this ssa name are dominated
1227 by this assignment, so unwinding just costs time and space. */
1228 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1229 set_ssa_name_value (lhs, rhs);
1233 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1234 return that edge. Otherwise return NULL. */
1235 static edge
1236 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1238 edge retval = NULL;
1239 edge e;
1240 edge_iterator ei;
1242 FOR_EACH_EDGE (e, ei, bb->preds)
1244 /* A loop back edge can be identified by the destination of
1245 the edge dominating the source of the edge. */
1246 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1247 continue;
1249 /* If we have already seen a non-loop edge, then we must have
1250 multiple incoming non-loop edges and thus we return NULL. */
1251 if (retval)
1252 return NULL;
1254 /* This is the first non-loop incoming edge we have found. Record
1255 it. */
1256 retval = e;
1259 return retval;
1262 /* Record any equivalences created by the incoming edge to BB. If BB
1263 has more than one incoming edge, then no equivalence is created. */
1265 static void
1266 record_equivalences_from_incoming_edge (basic_block bb)
1268 edge e;
1269 basic_block parent;
1270 struct edge_info *edge_info;
1272 /* If our parent block ended with a control statement, then we may be
1273 able to record some equivalences based on which outgoing edge from
1274 the parent was followed. */
1275 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1277 e = single_incoming_edge_ignoring_loop_edges (bb);
1279 /* If we had a single incoming edge from our parent block, then enter
1280 any data associated with the edge into our tables. */
1281 if (e && e->src == parent)
1283 unsigned int i;
1285 edge_info = (struct edge_info *) e->aux;
1287 if (edge_info)
1289 tree lhs = edge_info->lhs;
1290 tree rhs = edge_info->rhs;
1291 cond_equivalence *eq;
1293 if (lhs)
1294 record_equality (lhs, rhs);
1296 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1297 set via a widening type conversion, then we may be able to record
1298 additional equivalences. */
1299 if (lhs
1300 && TREE_CODE (lhs) == SSA_NAME
1301 && is_gimple_constant (rhs)
1302 && TREE_CODE (rhs) == INTEGER_CST)
1304 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1306 if (defstmt
1307 && is_gimple_assign (defstmt)
1308 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1310 tree old_rhs = gimple_assign_rhs1 (defstmt);
1312 /* If the conversion widens the original value and
1313 the constant is in the range of the type of OLD_RHS,
1314 then convert the constant and record the equivalence.
1316 Note that int_fits_type_p does not check the precision
1317 if the upper and lower bounds are OK. */
1318 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1319 && (TYPE_PRECISION (TREE_TYPE (lhs))
1320 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1321 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1323 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1324 record_equality (old_rhs, newval);
1329 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1330 record_cond (eq);
1335 /* Dump SSA statistics on FILE. */
1337 void
1338 dump_dominator_optimization_stats (FILE *file)
1340 fprintf (file, "Total number of statements: %6ld\n\n",
1341 opt_stats.num_stmts);
1342 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1343 opt_stats.num_exprs_considered);
1345 fprintf (file, "\nHash table statistics:\n");
1347 fprintf (file, " avail_exprs: ");
1348 htab_statistics (file, avail_exprs);
1352 /* Dump SSA statistics on stderr. */
1354 DEBUG_FUNCTION void
1355 debug_dominator_optimization_stats (void)
1357 dump_dominator_optimization_stats (stderr);
1361 /* Dump statistics for the hash table HTAB. */
1363 static void
1364 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1366 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1367 (long) htab.size (),
1368 (long) htab.elements (),
1369 htab.collisions ());
1373 /* Enter condition equivalence into the expression hash table.
1374 This indicates that a conditional expression has a known
1375 boolean value. */
1377 static void
1378 record_cond (cond_equivalence *p)
1380 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1381 expr_hash_elt **slot;
1383 initialize_hash_element_from_expr (&p->cond, p->value, element);
1385 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1386 if (*slot == NULL)
1388 *slot = element;
1390 if (dump_file && (dump_flags & TDF_DETAILS))
1392 fprintf (dump_file, "1>>> ");
1393 print_expr_hash_elt (dump_file, element);
1396 avail_exprs_stack.safe_push (element);
1398 else
1399 free_expr_hash_elt (element);
1402 /* Build a cond_equivalence record indicating that the comparison
1403 CODE holds between operands OP0 and OP1 and push it to **P. */
1405 static void
1406 build_and_record_new_cond (enum tree_code code,
1407 tree op0, tree op1,
1408 vec<cond_equivalence> *p)
1410 cond_equivalence c;
1411 struct hashable_expr *cond = &c.cond;
1413 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1415 cond->type = boolean_type_node;
1416 cond->kind = EXPR_BINARY;
1417 cond->ops.binary.op = code;
1418 cond->ops.binary.opnd0 = op0;
1419 cond->ops.binary.opnd1 = op1;
1421 c.value = boolean_true_node;
1422 p->safe_push (c);
1425 /* Record that COND is true and INVERTED is false into the edge information
1426 structure. Also record that any conditions dominated by COND are true
1427 as well.
1429 For example, if a < b is true, then a <= b must also be true. */
1431 static void
1432 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1434 tree op0, op1;
1435 cond_equivalence c;
1437 if (!COMPARISON_CLASS_P (cond))
1438 return;
1440 op0 = TREE_OPERAND (cond, 0);
1441 op1 = TREE_OPERAND (cond, 1);
1443 switch (TREE_CODE (cond))
1445 case LT_EXPR:
1446 case GT_EXPR:
1447 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1449 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1450 &edge_info->cond_equivalences);
1451 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1452 &edge_info->cond_equivalences);
1455 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1456 ? LE_EXPR : GE_EXPR),
1457 op0, op1, &edge_info->cond_equivalences);
1458 build_and_record_new_cond (NE_EXPR, op0, op1,
1459 &edge_info->cond_equivalences);
1460 break;
1462 case GE_EXPR:
1463 case LE_EXPR:
1464 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1466 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1467 &edge_info->cond_equivalences);
1469 break;
1471 case EQ_EXPR:
1472 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1474 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1475 &edge_info->cond_equivalences);
1477 build_and_record_new_cond (LE_EXPR, op0, op1,
1478 &edge_info->cond_equivalences);
1479 build_and_record_new_cond (GE_EXPR, op0, op1,
1480 &edge_info->cond_equivalences);
1481 break;
1483 case UNORDERED_EXPR:
1484 build_and_record_new_cond (NE_EXPR, op0, op1,
1485 &edge_info->cond_equivalences);
1486 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1487 &edge_info->cond_equivalences);
1488 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1489 &edge_info->cond_equivalences);
1490 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1491 &edge_info->cond_equivalences);
1492 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1493 &edge_info->cond_equivalences);
1494 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1495 &edge_info->cond_equivalences);
1496 break;
1498 case UNLT_EXPR:
1499 case UNGT_EXPR:
1500 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1501 ? UNLE_EXPR : UNGE_EXPR),
1502 op0, op1, &edge_info->cond_equivalences);
1503 build_and_record_new_cond (NE_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 break;
1507 case UNEQ_EXPR:
1508 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1509 &edge_info->cond_equivalences);
1510 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1511 &edge_info->cond_equivalences);
1512 break;
1514 case LTGT_EXPR:
1515 build_and_record_new_cond (NE_EXPR, op0, op1,
1516 &edge_info->cond_equivalences);
1517 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1518 &edge_info->cond_equivalences);
1519 break;
1521 default:
1522 break;
1525 /* Now store the original true and false conditions into the first
1526 two slots. */
1527 initialize_expr_from_cond (cond, &c.cond);
1528 c.value = boolean_true_node;
1529 edge_info->cond_equivalences.safe_push (c);
1531 /* It is possible for INVERTED to be the negation of a comparison,
1532 and not a valid RHS or GIMPLE_COND condition. This happens because
1533 invert_truthvalue may return such an expression when asked to invert
1534 a floating-point comparison. These comparisons are not assumed to
1535 obey the trichotomy law. */
1536 initialize_expr_from_cond (inverted, &c.cond);
1537 c.value = boolean_false_node;
1538 edge_info->cond_equivalences.safe_push (c);
1541 /* A helper function for record_const_or_copy and record_equality.
1542 Do the work of recording the value and undo info. */
1544 static void
1545 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1547 set_ssa_name_value (x, y);
1549 if (dump_file && (dump_flags & TDF_DETAILS))
1551 fprintf (dump_file, "0>>> COPY ");
1552 print_generic_expr (dump_file, x, 0);
1553 fprintf (dump_file, " = ");
1554 print_generic_expr (dump_file, y, 0);
1555 fprintf (dump_file, "\n");
1558 const_and_copies_stack.reserve (2);
1559 const_and_copies_stack.quick_push (prev_x);
1560 const_and_copies_stack.quick_push (x);
1563 /* Return the loop depth of the basic block of the defining statement of X.
1564 This number should not be treated as absolutely correct because the loop
1565 information may not be completely up-to-date when dom runs. However, it
1566 will be relatively correct, and as more passes are taught to keep loop info
1567 up to date, the result will become more and more accurate. */
1570 loop_depth_of_name (tree x)
1572 gimple defstmt;
1573 basic_block defbb;
1575 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1576 if (TREE_CODE (x) != SSA_NAME)
1577 return 0;
1579 /* Otherwise return the loop depth of the defining statement's bb.
1580 Note that there may not actually be a bb for this statement, if the
1581 ssa_name is live on entry. */
1582 defstmt = SSA_NAME_DEF_STMT (x);
1583 defbb = gimple_bb (defstmt);
1584 if (!defbb)
1585 return 0;
1587 return bb_loop_depth (defbb);
1590 /* Record that X is equal to Y in const_and_copies. Record undo
1591 information in the block-local vector. */
1593 static void
1594 record_const_or_copy (tree x, tree y)
1596 tree prev_x = SSA_NAME_VALUE (x);
1598 gcc_assert (TREE_CODE (x) == SSA_NAME);
1600 if (TREE_CODE (y) == SSA_NAME)
1602 tree tmp = SSA_NAME_VALUE (y);
1603 if (tmp)
1604 y = tmp;
1607 record_const_or_copy_1 (x, y, prev_x);
1610 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1611 This constrains the cases in which we may treat this as assignment. */
1613 static void
1614 record_equality (tree x, tree y)
1616 tree prev_x = NULL, prev_y = NULL;
1618 if (TREE_CODE (x) == SSA_NAME)
1619 prev_x = SSA_NAME_VALUE (x);
1620 if (TREE_CODE (y) == SSA_NAME)
1621 prev_y = SSA_NAME_VALUE (y);
1623 /* If one of the previous values is invariant, or invariant in more loops
1624 (by depth), then use that.
1625 Otherwise it doesn't matter which value we choose, just so
1626 long as we canonicalize on one value. */
1627 if (is_gimple_min_invariant (y))
1629 else if (is_gimple_min_invariant (x)
1630 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1631 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1632 else if (prev_x && is_gimple_min_invariant (prev_x))
1633 x = y, y = prev_x, prev_x = prev_y;
1634 else if (prev_y)
1635 y = prev_y;
1637 /* After the swapping, we must have one SSA_NAME. */
1638 if (TREE_CODE (x) != SSA_NAME)
1639 return;
1641 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1642 variable compared against zero. If we're honoring signed zeros,
1643 then we cannot record this value unless we know that the value is
1644 nonzero. */
1645 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1646 && (TREE_CODE (y) != REAL_CST
1647 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1648 return;
1650 record_const_or_copy_1 (x, y, prev_x);
1653 /* Returns true when STMT is a simple iv increment. It detects the
1654 following situation:
1656 i_1 = phi (..., i_2)
1657 i_2 = i_1 +/- ... */
1659 bool
1660 simple_iv_increment_p (gimple stmt)
1662 enum tree_code code;
1663 tree lhs, preinc;
1664 gimple phi;
1665 size_t i;
1667 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1668 return false;
1670 lhs = gimple_assign_lhs (stmt);
1671 if (TREE_CODE (lhs) != SSA_NAME)
1672 return false;
1674 code = gimple_assign_rhs_code (stmt);
1675 if (code != PLUS_EXPR
1676 && code != MINUS_EXPR
1677 && code != POINTER_PLUS_EXPR)
1678 return false;
1680 preinc = gimple_assign_rhs1 (stmt);
1681 if (TREE_CODE (preinc) != SSA_NAME)
1682 return false;
1684 phi = SSA_NAME_DEF_STMT (preinc);
1685 if (gimple_code (phi) != GIMPLE_PHI)
1686 return false;
1688 for (i = 0; i < gimple_phi_num_args (phi); i++)
1689 if (gimple_phi_arg_def (phi, i) == lhs)
1690 return true;
1692 return false;
1695 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1696 known value for that SSA_NAME (or NULL if no value is known).
1698 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1699 successors of BB. */
1701 static void
1702 cprop_into_successor_phis (basic_block bb)
1704 edge e;
1705 edge_iterator ei;
1707 FOR_EACH_EDGE (e, ei, bb->succs)
1709 int indx;
1710 gimple_stmt_iterator gsi;
1712 /* If this is an abnormal edge, then we do not want to copy propagate
1713 into the PHI alternative associated with this edge. */
1714 if (e->flags & EDGE_ABNORMAL)
1715 continue;
1717 gsi = gsi_start_phis (e->dest);
1718 if (gsi_end_p (gsi))
1719 continue;
1721 /* We may have an equivalence associated with this edge. While
1722 we can not propagate it into non-dominated blocks, we can
1723 propagate them into PHIs in non-dominated blocks. */
1725 /* Push the unwind marker so we can reset the const and copies
1726 table back to its original state after processing this edge. */
1727 const_and_copies_stack.safe_push (NULL_TREE);
1729 /* Extract and record any simple NAME = VALUE equivalences.
1731 Don't bother with [01] = COND equivalences, they're not useful
1732 here. */
1733 struct edge_info *edge_info = (struct edge_info *) e->aux;
1734 if (edge_info)
1736 tree lhs = edge_info->lhs;
1737 tree rhs = edge_info->rhs;
1739 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1740 record_const_or_copy (lhs, rhs);
1743 indx = e->dest_idx;
1744 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1746 tree new_val;
1747 use_operand_p orig_p;
1748 tree orig_val;
1749 gimple phi = gsi_stmt (gsi);
1751 /* The alternative may be associated with a constant, so verify
1752 it is an SSA_NAME before doing anything with it. */
1753 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1754 orig_val = get_use_from_ptr (orig_p);
1755 if (TREE_CODE (orig_val) != SSA_NAME)
1756 continue;
1758 /* If we have *ORIG_P in our constant/copy table, then replace
1759 ORIG_P with its value in our constant/copy table. */
1760 new_val = SSA_NAME_VALUE (orig_val);
1761 if (new_val
1762 && new_val != orig_val
1763 && (TREE_CODE (new_val) == SSA_NAME
1764 || is_gimple_min_invariant (new_val))
1765 && may_propagate_copy (orig_val, new_val))
1766 propagate_value (orig_p, new_val);
1769 restore_vars_to_original_value ();
1773 /* We have finished optimizing BB, record any information implied by
1774 taking a specific outgoing edge from BB. */
1776 static void
1777 record_edge_info (basic_block bb)
1779 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1780 struct edge_info *edge_info;
1782 if (! gsi_end_p (gsi))
1784 gimple stmt = gsi_stmt (gsi);
1785 location_t loc = gimple_location (stmt);
1787 if (gimple_code (stmt) == GIMPLE_SWITCH)
1789 tree index = gimple_switch_index (stmt);
1791 if (TREE_CODE (index) == SSA_NAME)
1793 int i;
1794 int n_labels = gimple_switch_num_labels (stmt);
1795 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1796 edge e;
1797 edge_iterator ei;
1799 for (i = 0; i < n_labels; i++)
1801 tree label = gimple_switch_label (stmt, i);
1802 basic_block target_bb = label_to_block (CASE_LABEL (label));
1803 if (CASE_HIGH (label)
1804 || !CASE_LOW (label)
1805 || info[target_bb->index])
1806 info[target_bb->index] = error_mark_node;
1807 else
1808 info[target_bb->index] = label;
1811 FOR_EACH_EDGE (e, ei, bb->succs)
1813 basic_block target_bb = e->dest;
1814 tree label = info[target_bb->index];
1816 if (label != NULL && label != error_mark_node)
1818 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1819 CASE_LOW (label));
1820 edge_info = allocate_edge_info (e);
1821 edge_info->lhs = index;
1822 edge_info->rhs = x;
1825 free (info);
1829 /* A COND_EXPR may create equivalences too. */
1830 if (gimple_code (stmt) == GIMPLE_COND)
1832 edge true_edge;
1833 edge false_edge;
1835 tree op0 = gimple_cond_lhs (stmt);
1836 tree op1 = gimple_cond_rhs (stmt);
1837 enum tree_code code = gimple_cond_code (stmt);
1839 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1841 /* Special case comparing booleans against a constant as we
1842 know the value of OP0 on both arms of the branch. i.e., we
1843 can record an equivalence for OP0 rather than COND. */
1844 if ((code == EQ_EXPR || code == NE_EXPR)
1845 && TREE_CODE (op0) == SSA_NAME
1846 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1847 && is_gimple_min_invariant (op1))
1849 if (code == EQ_EXPR)
1851 edge_info = allocate_edge_info (true_edge);
1852 edge_info->lhs = op0;
1853 edge_info->rhs = (integer_zerop (op1)
1854 ? boolean_false_node
1855 : boolean_true_node);
1857 edge_info = allocate_edge_info (false_edge);
1858 edge_info->lhs = op0;
1859 edge_info->rhs = (integer_zerop (op1)
1860 ? boolean_true_node
1861 : boolean_false_node);
1863 else
1865 edge_info = allocate_edge_info (true_edge);
1866 edge_info->lhs = op0;
1867 edge_info->rhs = (integer_zerop (op1)
1868 ? boolean_true_node
1869 : boolean_false_node);
1871 edge_info = allocate_edge_info (false_edge);
1872 edge_info->lhs = op0;
1873 edge_info->rhs = (integer_zerop (op1)
1874 ? boolean_false_node
1875 : boolean_true_node);
1878 else if (is_gimple_min_invariant (op0)
1879 && (TREE_CODE (op1) == SSA_NAME
1880 || is_gimple_min_invariant (op1)))
1882 tree cond = build2 (code, boolean_type_node, op0, op1);
1883 tree inverted = invert_truthvalue_loc (loc, cond);
1884 bool can_infer_simple_equiv
1885 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1886 && real_zerop (op0));
1887 struct edge_info *edge_info;
1889 edge_info = allocate_edge_info (true_edge);
1890 record_conditions (edge_info, cond, inverted);
1892 if (can_infer_simple_equiv && code == EQ_EXPR)
1894 edge_info->lhs = op1;
1895 edge_info->rhs = op0;
1898 edge_info = allocate_edge_info (false_edge);
1899 record_conditions (edge_info, inverted, cond);
1901 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1903 edge_info->lhs = op1;
1904 edge_info->rhs = op0;
1908 else if (TREE_CODE (op0) == SSA_NAME
1909 && (TREE_CODE (op1) == SSA_NAME
1910 || is_gimple_min_invariant (op1)))
1912 tree cond = build2 (code, boolean_type_node, op0, op1);
1913 tree inverted = invert_truthvalue_loc (loc, cond);
1914 bool can_infer_simple_equiv
1915 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1916 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1917 struct edge_info *edge_info;
1919 edge_info = allocate_edge_info (true_edge);
1920 record_conditions (edge_info, cond, inverted);
1922 if (can_infer_simple_equiv && code == EQ_EXPR)
1924 edge_info->lhs = op0;
1925 edge_info->rhs = op1;
1928 edge_info = allocate_edge_info (false_edge);
1929 record_conditions (edge_info, inverted, cond);
1931 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1933 edge_info->lhs = op0;
1934 edge_info->rhs = op1;
1939 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1943 void
1944 dom_opt_dom_walker::before_dom_children (basic_block bb)
1946 gimple_stmt_iterator gsi;
1948 if (dump_file && (dump_flags & TDF_DETAILS))
1949 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1951 /* Push a marker on the stacks of local information so that we know how
1952 far to unwind when we finalize this block. */
1953 avail_exprs_stack.safe_push (NULL);
1954 const_and_copies_stack.safe_push (NULL_TREE);
1956 record_equivalences_from_incoming_edge (bb);
1958 /* PHI nodes can create equivalences too. */
1959 record_equivalences_from_phis (bb);
1961 /* Create equivalences from redundant PHIs. PHIs are only truly
1962 redundant when they exist in the same block, so push another
1963 marker and unwind right afterwards. */
1964 avail_exprs_stack.safe_push (NULL);
1965 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1966 eliminate_redundant_computations (&gsi);
1967 remove_local_expressions_from_table ();
1969 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1970 optimize_stmt (bb, gsi);
1972 /* Now prepare to process dominated blocks. */
1973 record_edge_info (bb);
1974 cprop_into_successor_phis (bb);
1977 /* We have finished processing the dominator children of BB, perform
1978 any finalization actions in preparation for leaving this node in
1979 the dominator tree. */
1981 void
1982 dom_opt_dom_walker::after_dom_children (basic_block bb)
1984 gimple last;
1986 /* If we have an outgoing edge to a block with multiple incoming and
1987 outgoing edges, then we may be able to thread the edge, i.e., we
1988 may be able to statically determine which of the outgoing edges
1989 will be traversed when the incoming edge from BB is traversed. */
1990 if (single_succ_p (bb)
1991 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1992 && potentially_threadable_block (single_succ (bb)))
1994 thread_across_edge (single_succ_edge (bb));
1996 else if ((last = last_stmt (bb))
1997 && gimple_code (last) == GIMPLE_COND
1998 && EDGE_COUNT (bb->succs) == 2
1999 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2000 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2002 edge true_edge, false_edge;
2004 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2006 /* Only try to thread the edge if it reaches a target block with
2007 more than one predecessor and more than one successor. */
2008 if (potentially_threadable_block (true_edge->dest))
2009 thread_across_edge (true_edge);
2011 /* Similarly for the ELSE arm. */
2012 if (potentially_threadable_block (false_edge->dest))
2013 thread_across_edge (false_edge);
2017 /* These remove expressions local to BB from the tables. */
2018 remove_local_expressions_from_table ();
2019 restore_vars_to_original_value ();
2022 /* Search for redundant computations in STMT. If any are found, then
2023 replace them with the variable holding the result of the computation.
2025 If safe, record this expression into the available expression hash
2026 table. */
2028 static void
2029 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2031 tree expr_type;
2032 tree cached_lhs;
2033 tree def;
2034 bool insert = true;
2035 bool assigns_var_p = false;
2037 gimple stmt = gsi_stmt (*gsi);
2039 if (gimple_code (stmt) == GIMPLE_PHI)
2040 def = gimple_phi_result (stmt);
2041 else
2042 def = gimple_get_lhs (stmt);
2044 /* Certain expressions on the RHS can be optimized away, but can not
2045 themselves be entered into the hash tables. */
2046 if (! def
2047 || TREE_CODE (def) != SSA_NAME
2048 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2049 || gimple_vdef (stmt)
2050 /* Do not record equivalences for increments of ivs. This would create
2051 overlapping live ranges for a very questionable gain. */
2052 || simple_iv_increment_p (stmt))
2053 insert = false;
2055 /* Check if the expression has been computed before. */
2056 cached_lhs = lookup_avail_expr (stmt, insert);
2058 opt_stats.num_exprs_considered++;
2060 /* Get the type of the expression we are trying to optimize. */
2061 if (is_gimple_assign (stmt))
2063 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2064 assigns_var_p = true;
2066 else if (gimple_code (stmt) == GIMPLE_COND)
2067 expr_type = boolean_type_node;
2068 else if (is_gimple_call (stmt))
2070 gcc_assert (gimple_call_lhs (stmt));
2071 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2072 assigns_var_p = true;
2074 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2075 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2076 else if (gimple_code (stmt) == GIMPLE_PHI)
2077 /* We can't propagate into a phi, so the logic below doesn't apply.
2078 Instead record an equivalence between the cached LHS and the
2079 PHI result of this statement, provided they are in the same block.
2080 This should be sufficient to kill the redundant phi. */
2082 if (def && cached_lhs)
2083 record_const_or_copy (def, cached_lhs);
2084 return;
2086 else
2087 gcc_unreachable ();
2089 if (!cached_lhs)
2090 return;
2092 /* It is safe to ignore types here since we have already done
2093 type checking in the hashing and equality routines. In fact
2094 type checking here merely gets in the way of constant
2095 propagation. Also, make sure that it is safe to propagate
2096 CACHED_LHS into the expression in STMT. */
2097 if ((TREE_CODE (cached_lhs) != SSA_NAME
2098 && (assigns_var_p
2099 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2100 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2102 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2103 || is_gimple_min_invariant (cached_lhs));
2105 if (dump_file && (dump_flags & TDF_DETAILS))
2107 fprintf (dump_file, " Replaced redundant expr '");
2108 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2109 fprintf (dump_file, "' with '");
2110 print_generic_expr (dump_file, cached_lhs, dump_flags);
2111 fprintf (dump_file, "'\n");
2114 opt_stats.num_re++;
2116 if (assigns_var_p
2117 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2118 cached_lhs = fold_convert (expr_type, cached_lhs);
2120 propagate_tree_value_into_stmt (gsi, cached_lhs);
2122 /* Since it is always necessary to mark the result as modified,
2123 perhaps we should move this into propagate_tree_value_into_stmt
2124 itself. */
2125 gimple_set_modified (gsi_stmt (*gsi), true);
2129 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2130 the available expressions table or the const_and_copies table.
2131 Detect and record those equivalences. */
2132 /* We handle only very simple copy equivalences here. The heavy
2133 lifing is done by eliminate_redundant_computations. */
2135 static void
2136 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2138 tree lhs;
2139 enum tree_code lhs_code;
2141 gcc_assert (is_gimple_assign (stmt));
2143 lhs = gimple_assign_lhs (stmt);
2144 lhs_code = TREE_CODE (lhs);
2146 if (lhs_code == SSA_NAME
2147 && gimple_assign_single_p (stmt))
2149 tree rhs = gimple_assign_rhs1 (stmt);
2151 /* If the RHS of the assignment is a constant or another variable that
2152 may be propagated, register it in the CONST_AND_COPIES table. We
2153 do not need to record unwind data for this, since this is a true
2154 assignment and not an equivalence inferred from a comparison. All
2155 uses of this ssa name are dominated by this assignment, so unwinding
2156 just costs time and space. */
2157 if (may_optimize_p
2158 && (TREE_CODE (rhs) == SSA_NAME
2159 || is_gimple_min_invariant (rhs)))
2161 if (dump_file && (dump_flags & TDF_DETAILS))
2163 fprintf (dump_file, "==== ASGN ");
2164 print_generic_expr (dump_file, lhs, 0);
2165 fprintf (dump_file, " = ");
2166 print_generic_expr (dump_file, rhs, 0);
2167 fprintf (dump_file, "\n");
2170 set_ssa_name_value (lhs, rhs);
2174 /* A memory store, even an aliased store, creates a useful
2175 equivalence. By exchanging the LHS and RHS, creating suitable
2176 vops and recording the result in the available expression table,
2177 we may be able to expose more redundant loads. */
2178 if (!gimple_has_volatile_ops (stmt)
2179 && gimple_references_memory_p (stmt)
2180 && gimple_assign_single_p (stmt)
2181 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2182 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2183 && !is_gimple_reg (lhs))
2185 tree rhs = gimple_assign_rhs1 (stmt);
2186 gimple new_stmt;
2188 /* Build a new statement with the RHS and LHS exchanged. */
2189 if (TREE_CODE (rhs) == SSA_NAME)
2191 /* NOTE tuples. The call to gimple_build_assign below replaced
2192 a call to build_gimple_modify_stmt, which did not set the
2193 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2194 may cause an SSA validation failure, as the LHS may be a
2195 default-initialized name and should have no definition. I'm
2196 a bit dubious of this, as the artificial statement that we
2197 generate here may in fact be ill-formed, but it is simply
2198 used as an internal device in this pass, and never becomes
2199 part of the CFG. */
2200 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2201 new_stmt = gimple_build_assign (rhs, lhs);
2202 SSA_NAME_DEF_STMT (rhs) = defstmt;
2204 else
2205 new_stmt = gimple_build_assign (rhs, lhs);
2207 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2209 /* Finally enter the statement into the available expression
2210 table. */
2211 lookup_avail_expr (new_stmt, true);
2215 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2216 CONST_AND_COPIES. */
2218 static void
2219 cprop_operand (gimple stmt, use_operand_p op_p)
2221 tree val;
2222 tree op = USE_FROM_PTR (op_p);
2224 /* If the operand has a known constant value or it is known to be a
2225 copy of some other variable, use the value or copy stored in
2226 CONST_AND_COPIES. */
2227 val = SSA_NAME_VALUE (op);
2228 if (val && val != op)
2230 /* Do not replace hard register operands in asm statements. */
2231 if (gimple_code (stmt) == GIMPLE_ASM
2232 && !may_propagate_copy_into_asm (op))
2233 return;
2235 /* Certain operands are not allowed to be copy propagated due
2236 to their interaction with exception handling and some GCC
2237 extensions. */
2238 if (!may_propagate_copy (op, val))
2239 return;
2241 /* Do not propagate addresses that point to volatiles into memory
2242 stmts without volatile operands. */
2243 if (POINTER_TYPE_P (TREE_TYPE (val))
2244 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2245 && gimple_has_mem_ops (stmt)
2246 && !gimple_has_volatile_ops (stmt))
2247 return;
2249 /* Do not propagate copies if the propagated value is at a deeper loop
2250 depth than the propagatee. Otherwise, this may move loop variant
2251 variables outside of their loops and prevent coalescing
2252 opportunities. If the value was loop invariant, it will be hoisted
2253 by LICM and exposed for copy propagation. */
2254 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2255 return;
2257 /* Do not propagate copies into simple IV increment statements.
2258 See PR23821 for how this can disturb IV analysis. */
2259 if (TREE_CODE (val) != INTEGER_CST
2260 && simple_iv_increment_p (stmt))
2261 return;
2263 /* Dump details. */
2264 if (dump_file && (dump_flags & TDF_DETAILS))
2266 fprintf (dump_file, " Replaced '");
2267 print_generic_expr (dump_file, op, dump_flags);
2268 fprintf (dump_file, "' with %s '",
2269 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2270 print_generic_expr (dump_file, val, dump_flags);
2271 fprintf (dump_file, "'\n");
2274 if (TREE_CODE (val) != SSA_NAME)
2275 opt_stats.num_const_prop++;
2276 else
2277 opt_stats.num_copy_prop++;
2279 propagate_value (op_p, val);
2281 /* And note that we modified this statement. This is now
2282 safe, even if we changed virtual operands since we will
2283 rescan the statement and rewrite its operands again. */
2284 gimple_set_modified (stmt, true);
2288 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2289 known value for that SSA_NAME (or NULL if no value is known).
2291 Propagate values from CONST_AND_COPIES into the uses, vuses and
2292 vdef_ops of STMT. */
2294 static void
2295 cprop_into_stmt (gimple stmt)
2297 use_operand_p op_p;
2298 ssa_op_iter iter;
2300 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2301 cprop_operand (stmt, op_p);
2304 /* Optimize the statement pointed to by iterator SI.
2306 We try to perform some simplistic global redundancy elimination and
2307 constant propagation:
2309 1- To detect global redundancy, we keep track of expressions that have
2310 been computed in this block and its dominators. If we find that the
2311 same expression is computed more than once, we eliminate repeated
2312 computations by using the target of the first one.
2314 2- Constant values and copy assignments. This is used to do very
2315 simplistic constant and copy propagation. When a constant or copy
2316 assignment is found, we map the value on the RHS of the assignment to
2317 the variable in the LHS in the CONST_AND_COPIES table. */
2319 static void
2320 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2322 gimple stmt, old_stmt;
2323 bool may_optimize_p;
2324 bool modified_p = false;
2326 old_stmt = stmt = gsi_stmt (si);
2328 if (dump_file && (dump_flags & TDF_DETAILS))
2330 fprintf (dump_file, "Optimizing statement ");
2331 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2334 if (gimple_code (stmt) == GIMPLE_COND)
2335 canonicalize_comparison (stmt);
2337 update_stmt_if_modified (stmt);
2338 opt_stats.num_stmts++;
2340 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2341 cprop_into_stmt (stmt);
2343 /* If the statement has been modified with constant replacements,
2344 fold its RHS before checking for redundant computations. */
2345 if (gimple_modified_p (stmt))
2347 tree rhs = NULL;
2349 /* Try to fold the statement making sure that STMT is kept
2350 up to date. */
2351 if (fold_stmt (&si))
2353 stmt = gsi_stmt (si);
2354 gimple_set_modified (stmt, true);
2356 if (dump_file && (dump_flags & TDF_DETAILS))
2358 fprintf (dump_file, " Folded to: ");
2359 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2363 /* We only need to consider cases that can yield a gimple operand. */
2364 if (gimple_assign_single_p (stmt))
2365 rhs = gimple_assign_rhs1 (stmt);
2366 else if (gimple_code (stmt) == GIMPLE_GOTO)
2367 rhs = gimple_goto_dest (stmt);
2368 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2369 /* This should never be an ADDR_EXPR. */
2370 rhs = gimple_switch_index (stmt);
2372 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2373 recompute_tree_invariant_for_addr_expr (rhs);
2375 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2376 even if fold_stmt updated the stmt already and thus cleared
2377 gimple_modified_p flag on it. */
2378 modified_p = true;
2381 /* Check for redundant computations. Do this optimization only
2382 for assignments that have no volatile ops and conditionals. */
2383 may_optimize_p = (!gimple_has_side_effects (stmt)
2384 && (is_gimple_assign (stmt)
2385 || (is_gimple_call (stmt)
2386 && gimple_call_lhs (stmt) != NULL_TREE)
2387 || gimple_code (stmt) == GIMPLE_COND
2388 || gimple_code (stmt) == GIMPLE_SWITCH));
2390 if (may_optimize_p)
2392 if (gimple_code (stmt) == GIMPLE_CALL)
2394 /* Resolve __builtin_constant_p. If it hasn't been
2395 folded to integer_one_node by now, it's fairly
2396 certain that the value simply isn't constant. */
2397 tree callee = gimple_call_fndecl (stmt);
2398 if (callee
2399 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2400 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2402 propagate_tree_value_into_stmt (&si, integer_zero_node);
2403 stmt = gsi_stmt (si);
2407 update_stmt_if_modified (stmt);
2408 eliminate_redundant_computations (&si);
2409 stmt = gsi_stmt (si);
2411 /* Perform simple redundant store elimination. */
2412 if (gimple_assign_single_p (stmt)
2413 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2415 tree lhs = gimple_assign_lhs (stmt);
2416 tree rhs = gimple_assign_rhs1 (stmt);
2417 tree cached_lhs;
2418 gimple new_stmt;
2419 if (TREE_CODE (rhs) == SSA_NAME)
2421 tree tem = SSA_NAME_VALUE (rhs);
2422 if (tem)
2423 rhs = tem;
2425 /* Build a new statement with the RHS and LHS exchanged. */
2426 if (TREE_CODE (rhs) == SSA_NAME)
2428 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2429 new_stmt = gimple_build_assign (rhs, lhs);
2430 SSA_NAME_DEF_STMT (rhs) = defstmt;
2432 else
2433 new_stmt = gimple_build_assign (rhs, lhs);
2434 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2435 cached_lhs = lookup_avail_expr (new_stmt, false);
2436 if (cached_lhs
2437 && rhs == cached_lhs)
2439 basic_block bb = gimple_bb (stmt);
2440 unlink_stmt_vdef (stmt);
2441 if (gsi_remove (&si, true))
2443 bitmap_set_bit (need_eh_cleanup, bb->index);
2444 if (dump_file && (dump_flags & TDF_DETAILS))
2445 fprintf (dump_file, " Flagged to clear EH edges.\n");
2447 release_defs (stmt);
2448 return;
2453 /* Record any additional equivalences created by this statement. */
2454 if (is_gimple_assign (stmt))
2455 record_equivalences_from_stmt (stmt, may_optimize_p);
2457 /* If STMT is a COND_EXPR and it was modified, then we may know
2458 where it goes. If that is the case, then mark the CFG as altered.
2460 This will cause us to later call remove_unreachable_blocks and
2461 cleanup_tree_cfg when it is safe to do so. It is not safe to
2462 clean things up here since removal of edges and such can trigger
2463 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2464 the manager.
2466 That's all fine and good, except that once SSA_NAMEs are released
2467 to the manager, we must not call create_ssa_name until all references
2468 to released SSA_NAMEs have been eliminated.
2470 All references to the deleted SSA_NAMEs can not be eliminated until
2471 we remove unreachable blocks.
2473 We can not remove unreachable blocks until after we have completed
2474 any queued jump threading.
2476 We can not complete any queued jump threads until we have taken
2477 appropriate variables out of SSA form. Taking variables out of
2478 SSA form can call create_ssa_name and thus we lose.
2480 Ultimately I suspect we're going to need to change the interface
2481 into the SSA_NAME manager. */
2482 if (gimple_modified_p (stmt) || modified_p)
2484 tree val = NULL;
2486 update_stmt_if_modified (stmt);
2488 if (gimple_code (stmt) == GIMPLE_COND)
2489 val = fold_binary_loc (gimple_location (stmt),
2490 gimple_cond_code (stmt), boolean_type_node,
2491 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2492 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2493 val = gimple_switch_index (stmt);
2495 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2496 cfg_altered = true;
2498 /* If we simplified a statement in such a way as to be shown that it
2499 cannot trap, update the eh information and the cfg to match. */
2500 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2502 bitmap_set_bit (need_eh_cleanup, bb->index);
2503 if (dump_file && (dump_flags & TDF_DETAILS))
2504 fprintf (dump_file, " Flagged to clear EH edges.\n");
2509 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2510 If found, return its LHS. Otherwise insert STMT in the table and
2511 return NULL_TREE.
2513 Also, when an expression is first inserted in the table, it is also
2514 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2515 we finish processing this block and its children. */
2517 static tree
2518 lookup_avail_expr (gimple stmt, bool insert)
2520 expr_hash_elt **slot;
2521 tree lhs;
2522 tree temp;
2523 struct expr_hash_elt element;
2525 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2526 if (gimple_code (stmt) == GIMPLE_PHI)
2527 lhs = gimple_phi_result (stmt);
2528 else
2529 lhs = gimple_get_lhs (stmt);
2531 initialize_hash_element (stmt, lhs, &element);
2533 if (dump_file && (dump_flags & TDF_DETAILS))
2535 fprintf (dump_file, "LKUP ");
2536 print_expr_hash_elt (dump_file, &element);
2539 /* Don't bother remembering constant assignments and copy operations.
2540 Constants and copy operations are handled by the constant/copy propagator
2541 in optimize_stmt. */
2542 if (element.expr.kind == EXPR_SINGLE
2543 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2544 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2545 return NULL_TREE;
2547 /* Finally try to find the expression in the main expression hash table. */
2548 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2549 (insert ? INSERT : NO_INSERT));
2550 if (slot == NULL)
2552 free_expr_hash_elt_contents (&element);
2553 return NULL_TREE;
2555 else if (*slot == NULL)
2557 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2558 *element2 = element;
2559 element2->stamp = element2;
2560 *slot = element2;
2562 if (dump_file && (dump_flags & TDF_DETAILS))
2564 fprintf (dump_file, "2>>> ");
2565 print_expr_hash_elt (dump_file, element2);
2568 avail_exprs_stack.safe_push (element2);
2569 return NULL_TREE;
2571 else
2572 free_expr_hash_elt_contents (&element);
2574 /* Extract the LHS of the assignment so that it can be used as the current
2575 definition of another variable. */
2576 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2578 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2579 use the value from the const_and_copies table. */
2580 if (TREE_CODE (lhs) == SSA_NAME)
2582 temp = SSA_NAME_VALUE (lhs);
2583 if (temp)
2584 lhs = temp;
2587 if (dump_file && (dump_flags & TDF_DETAILS))
2589 fprintf (dump_file, "FIND: ");
2590 print_generic_expr (dump_file, lhs, 0);
2591 fprintf (dump_file, "\n");
2594 return lhs;
2597 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2598 for expressions using the code of the expression and the SSA numbers of
2599 its operands. */
2601 static hashval_t
2602 avail_expr_hash (const void *p)
2604 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2605 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2606 tree vuse;
2607 hashval_t val = 0;
2609 val = iterative_hash_hashable_expr (expr, val);
2611 /* If the hash table entry is not associated with a statement, then we
2612 can just hash the expression and not worry about virtual operands
2613 and such. */
2614 if (!stmt)
2615 return val;
2617 /* Add the SSA version numbers of the vuse operand. This is important
2618 because compound variables like arrays are not renamed in the
2619 operands. Rather, the rename is done on the virtual variable
2620 representing all the elements of the array. */
2621 if ((vuse = gimple_vuse (stmt)))
2622 val = iterative_hash_expr (vuse, val);
2624 return val;
2627 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2628 up degenerate PHIs created by or exposed by jump threading. */
2630 /* Given a statement STMT, which is either a PHI node or an assignment,
2631 remove it from the IL. */
2633 static void
2634 remove_stmt_or_phi (gimple stmt)
2636 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2638 if (gimple_code (stmt) == GIMPLE_PHI)
2639 remove_phi_node (&gsi, true);
2640 else
2642 gsi_remove (&gsi, true);
2643 release_defs (stmt);
2647 /* Given a statement STMT, which is either a PHI node or an assignment,
2648 return the "rhs" of the node, in the case of a non-degenerate
2649 phi, NULL is returned. */
2651 static tree
2652 get_rhs_or_phi_arg (gimple stmt)
2654 if (gimple_code (stmt) == GIMPLE_PHI)
2655 return degenerate_phi_result (stmt);
2656 else if (gimple_assign_single_p (stmt))
2657 return gimple_assign_rhs1 (stmt);
2658 else
2659 gcc_unreachable ();
2663 /* Given a statement STMT, which is either a PHI node or an assignment,
2664 return the "lhs" of the node. */
2666 static tree
2667 get_lhs_or_phi_result (gimple stmt)
2669 if (gimple_code (stmt) == GIMPLE_PHI)
2670 return gimple_phi_result (stmt);
2671 else if (is_gimple_assign (stmt))
2672 return gimple_assign_lhs (stmt);
2673 else
2674 gcc_unreachable ();
2677 /* Propagate RHS into all uses of LHS (when possible).
2679 RHS and LHS are derived from STMT, which is passed in solely so
2680 that we can remove it if propagation is successful.
2682 When propagating into a PHI node or into a statement which turns
2683 into a trivial copy or constant initialization, set the
2684 appropriate bit in INTERESTING_NAMEs so that we will visit those
2685 nodes as well in an effort to pick up secondary optimization
2686 opportunities. */
2688 static void
2689 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2691 /* First verify that propagation is valid and isn't going to move a
2692 loop variant variable outside its loop. */
2693 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2694 && (TREE_CODE (rhs) != SSA_NAME
2695 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2696 && may_propagate_copy (lhs, rhs)
2697 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2699 use_operand_p use_p;
2700 imm_use_iterator iter;
2701 gimple use_stmt;
2702 bool all = true;
2704 /* Dump details. */
2705 if (dump_file && (dump_flags & TDF_DETAILS))
2707 fprintf (dump_file, " Replacing '");
2708 print_generic_expr (dump_file, lhs, dump_flags);
2709 fprintf (dump_file, "' with %s '",
2710 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2711 print_generic_expr (dump_file, rhs, dump_flags);
2712 fprintf (dump_file, "'\n");
2715 /* Walk over every use of LHS and try to replace the use with RHS.
2716 At this point the only reason why such a propagation would not
2717 be successful would be if the use occurs in an ASM_EXPR. */
2718 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2720 /* Leave debug stmts alone. If we succeed in propagating
2721 all non-debug uses, we'll drop the DEF, and propagation
2722 into debug stmts will occur then. */
2723 if (gimple_debug_bind_p (use_stmt))
2724 continue;
2726 /* It's not always safe to propagate into an ASM_EXPR. */
2727 if (gimple_code (use_stmt) == GIMPLE_ASM
2728 && ! may_propagate_copy_into_asm (lhs))
2730 all = false;
2731 continue;
2734 /* It's not ok to propagate into the definition stmt of RHS.
2735 <bb 9>:
2736 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2737 g_67.1_6 = prephitmp.12_36;
2738 goto <bb 9>;
2739 While this is strictly all dead code we do not want to
2740 deal with this here. */
2741 if (TREE_CODE (rhs) == SSA_NAME
2742 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2744 all = false;
2745 continue;
2748 /* Dump details. */
2749 if (dump_file && (dump_flags & TDF_DETAILS))
2751 fprintf (dump_file, " Original statement:");
2752 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2755 /* Propagate the RHS into this use of the LHS. */
2756 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2757 propagate_value (use_p, rhs);
2759 /* Special cases to avoid useless calls into the folding
2760 routines, operand scanning, etc.
2762 Propagation into a PHI may cause the PHI to become
2763 a degenerate, so mark the PHI as interesting. No other
2764 actions are necessary. */
2765 if (gimple_code (use_stmt) == GIMPLE_PHI)
2767 tree result;
2769 /* Dump details. */
2770 if (dump_file && (dump_flags & TDF_DETAILS))
2772 fprintf (dump_file, " Updated statement:");
2773 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2776 result = get_lhs_or_phi_result (use_stmt);
2777 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2778 continue;
2781 /* From this point onward we are propagating into a
2782 real statement. Folding may (or may not) be possible,
2783 we may expose new operands, expose dead EH edges,
2784 etc. */
2785 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2786 cannot fold a call that simplifies to a constant,
2787 because the GIMPLE_CALL must be replaced by a
2788 GIMPLE_ASSIGN, and there is no way to effect such a
2789 transformation in-place. We might want to consider
2790 using the more general fold_stmt here. */
2792 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2793 fold_stmt_inplace (&gsi);
2796 /* Sometimes propagation can expose new operands to the
2797 renamer. */
2798 update_stmt (use_stmt);
2800 /* Dump details. */
2801 if (dump_file && (dump_flags & TDF_DETAILS))
2803 fprintf (dump_file, " Updated statement:");
2804 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2807 /* If we replaced a variable index with a constant, then
2808 we would need to update the invariant flag for ADDR_EXPRs. */
2809 if (gimple_assign_single_p (use_stmt)
2810 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2811 recompute_tree_invariant_for_addr_expr
2812 (gimple_assign_rhs1 (use_stmt));
2814 /* If we cleaned up EH information from the statement,
2815 mark its containing block as needing EH cleanups. */
2816 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2818 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2819 if (dump_file && (dump_flags & TDF_DETAILS))
2820 fprintf (dump_file, " Flagged to clear EH edges.\n");
2823 /* Propagation may expose new trivial copy/constant propagation
2824 opportunities. */
2825 if (gimple_assign_single_p (use_stmt)
2826 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2827 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2828 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2830 tree result = get_lhs_or_phi_result (use_stmt);
2831 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2834 /* Propagation into these nodes may make certain edges in
2835 the CFG unexecutable. We want to identify them as PHI nodes
2836 at the destination of those unexecutable edges may become
2837 degenerates. */
2838 else if (gimple_code (use_stmt) == GIMPLE_COND
2839 || gimple_code (use_stmt) == GIMPLE_SWITCH
2840 || gimple_code (use_stmt) == GIMPLE_GOTO)
2842 tree val;
2844 if (gimple_code (use_stmt) == GIMPLE_COND)
2845 val = fold_binary_loc (gimple_location (use_stmt),
2846 gimple_cond_code (use_stmt),
2847 boolean_type_node,
2848 gimple_cond_lhs (use_stmt),
2849 gimple_cond_rhs (use_stmt));
2850 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2851 val = gimple_switch_index (use_stmt);
2852 else
2853 val = gimple_goto_dest (use_stmt);
2855 if (val && is_gimple_min_invariant (val))
2857 basic_block bb = gimple_bb (use_stmt);
2858 edge te = find_taken_edge (bb, val);
2859 edge_iterator ei;
2860 edge e;
2861 gimple_stmt_iterator gsi, psi;
2863 /* Remove all outgoing edges except TE. */
2864 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2866 if (e != te)
2868 /* Mark all the PHI nodes at the destination of
2869 the unexecutable edge as interesting. */
2870 for (psi = gsi_start_phis (e->dest);
2871 !gsi_end_p (psi);
2872 gsi_next (&psi))
2874 gimple phi = gsi_stmt (psi);
2876 tree result = gimple_phi_result (phi);
2877 int version = SSA_NAME_VERSION (result);
2879 bitmap_set_bit (interesting_names, version);
2882 te->probability += e->probability;
2884 te->count += e->count;
2885 remove_edge (e);
2886 cfg_altered = true;
2888 else
2889 ei_next (&ei);
2892 gsi = gsi_last_bb (gimple_bb (use_stmt));
2893 gsi_remove (&gsi, true);
2895 /* And fixup the flags on the single remaining edge. */
2896 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2897 te->flags &= ~EDGE_ABNORMAL;
2898 te->flags |= EDGE_FALLTHRU;
2899 if (te->probability > REG_BR_PROB_BASE)
2900 te->probability = REG_BR_PROB_BASE;
2905 /* Ensure there is nothing else to do. */
2906 gcc_assert (!all || has_zero_uses (lhs));
2908 /* If we were able to propagate away all uses of LHS, then
2909 we can remove STMT. */
2910 if (all)
2911 remove_stmt_or_phi (stmt);
2915 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2916 a statement that is a trivial copy or constant initialization.
2918 Attempt to eliminate T by propagating its RHS into all uses of
2919 its LHS. This may in turn set new bits in INTERESTING_NAMES
2920 for nodes we want to revisit later.
2922 All exit paths should clear INTERESTING_NAMES for the result
2923 of STMT. */
2925 static void
2926 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2928 tree lhs = get_lhs_or_phi_result (stmt);
2929 tree rhs;
2930 int version = SSA_NAME_VERSION (lhs);
2932 /* If the LHS of this statement or PHI has no uses, then we can
2933 just eliminate it. This can occur if, for example, the PHI
2934 was created by block duplication due to threading and its only
2935 use was in the conditional at the end of the block which was
2936 deleted. */
2937 if (has_zero_uses (lhs))
2939 bitmap_clear_bit (interesting_names, version);
2940 remove_stmt_or_phi (stmt);
2941 return;
2944 /* Get the RHS of the assignment or PHI node if the PHI is a
2945 degenerate. */
2946 rhs = get_rhs_or_phi_arg (stmt);
2947 if (!rhs)
2949 bitmap_clear_bit (interesting_names, version);
2950 return;
2953 if (!virtual_operand_p (lhs))
2954 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2955 else
2957 gimple use_stmt;
2958 imm_use_iterator iter;
2959 use_operand_p use_p;
2960 /* For virtual operands we have to propagate into all uses as
2961 otherwise we will create overlapping life-ranges. */
2962 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2963 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2964 SET_USE (use_p, rhs);
2965 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2966 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2967 remove_stmt_or_phi (stmt);
2970 /* Note that STMT may well have been deleted by now, so do
2971 not access it, instead use the saved version # to clear
2972 T's entry in the worklist. */
2973 bitmap_clear_bit (interesting_names, version);
2976 /* The first phase in degenerate PHI elimination.
2978 Eliminate the degenerate PHIs in BB, then recurse on the
2979 dominator children of BB. */
2981 static void
2982 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2984 gimple_stmt_iterator gsi;
2985 basic_block son;
2987 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2989 gimple phi = gsi_stmt (gsi);
2991 eliminate_const_or_copy (phi, interesting_names);
2994 /* Recurse into the dominator children of BB. */
2995 for (son = first_dom_son (CDI_DOMINATORS, bb);
2996 son;
2997 son = next_dom_son (CDI_DOMINATORS, son))
2998 eliminate_degenerate_phis_1 (son, interesting_names);
3002 /* A very simple pass to eliminate degenerate PHI nodes from the
3003 IL. This is meant to be fast enough to be able to be run several
3004 times in the optimization pipeline.
3006 Certain optimizations, particularly those which duplicate blocks
3007 or remove edges from the CFG can create or expose PHIs which are
3008 trivial copies or constant initializations.
3010 While we could pick up these optimizations in DOM or with the
3011 combination of copy-prop and CCP, those solutions are far too
3012 heavy-weight for our needs.
3014 This implementation has two phases so that we can efficiently
3015 eliminate the first order degenerate PHIs and second order
3016 degenerate PHIs.
3018 The first phase performs a dominator walk to identify and eliminate
3019 the vast majority of the degenerate PHIs. When a degenerate PHI
3020 is identified and eliminated any affected statements or PHIs
3021 are put on a worklist.
3023 The second phase eliminates degenerate PHIs and trivial copies
3024 or constant initializations using the worklist. This is how we
3025 pick up the secondary optimization opportunities with minimal
3026 cost. */
3028 namespace {
3030 const pass_data pass_data_phi_only_cprop =
3032 GIMPLE_PASS, /* type */
3033 "phicprop", /* name */
3034 OPTGROUP_NONE, /* optinfo_flags */
3035 true, /* has_execute */
3036 TV_TREE_PHI_CPROP, /* tv_id */
3037 ( PROP_cfg | PROP_ssa ), /* properties_required */
3038 0, /* properties_provided */
3039 0, /* properties_destroyed */
3040 0, /* todo_flags_start */
3041 ( TODO_cleanup_cfg | TODO_verify_ssa
3042 | TODO_verify_stmts
3043 | TODO_update_ssa ), /* todo_flags_finish */
3046 class pass_phi_only_cprop : public gimple_opt_pass
3048 public:
3049 pass_phi_only_cprop (gcc::context *ctxt)
3050 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3053 /* opt_pass methods: */
3054 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3055 virtual bool gate (function *) { return flag_tree_dom != 0; }
3056 virtual unsigned int execute (function *);
3058 }; // class pass_phi_only_cprop
3060 unsigned int
3061 pass_phi_only_cprop::execute (function *fun)
3063 bitmap interesting_names;
3064 bitmap interesting_names1;
3066 /* Bitmap of blocks which need EH information updated. We can not
3067 update it on-the-fly as doing so invalidates the dominator tree. */
3068 need_eh_cleanup = BITMAP_ALLOC (NULL);
3070 /* INTERESTING_NAMES is effectively our worklist, indexed by
3071 SSA_NAME_VERSION.
3073 A set bit indicates that the statement or PHI node which
3074 defines the SSA_NAME should be (re)examined to determine if
3075 it has become a degenerate PHI or trivial const/copy propagation
3076 opportunity.
3078 Experiments have show we generally get better compilation
3079 time behavior with bitmaps rather than sbitmaps. */
3080 interesting_names = BITMAP_ALLOC (NULL);
3081 interesting_names1 = BITMAP_ALLOC (NULL);
3083 calculate_dominance_info (CDI_DOMINATORS);
3084 cfg_altered = false;
3086 /* First phase. Eliminate degenerate PHIs via a dominator
3087 walk of the CFG.
3089 Experiments have indicated that we generally get better
3090 compile-time behavior by visiting blocks in the first
3091 phase in dominator order. Presumably this is because walking
3092 in dominator order leaves fewer PHIs for later examination
3093 by the worklist phase. */
3094 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3095 interesting_names);
3097 /* Second phase. Eliminate second order degenerate PHIs as well
3098 as trivial copies or constant initializations identified by
3099 the first phase or this phase. Basically we keep iterating
3100 until our set of INTERESTING_NAMEs is empty. */
3101 while (!bitmap_empty_p (interesting_names))
3103 unsigned int i;
3104 bitmap_iterator bi;
3106 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3107 changed during the loop. Copy it to another bitmap and
3108 use that. */
3109 bitmap_copy (interesting_names1, interesting_names);
3111 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3113 tree name = ssa_name (i);
3115 /* Ignore SSA_NAMEs that have been released because
3116 their defining statement was deleted (unreachable). */
3117 if (name)
3118 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3119 interesting_names);
3123 if (cfg_altered)
3125 free_dominance_info (CDI_DOMINATORS);
3126 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3127 if (current_loops)
3128 loops_state_set (LOOPS_NEED_FIXUP);
3131 /* Propagation of const and copies may make some EH edges dead. Purge
3132 such edges from the CFG as needed. */
3133 if (!bitmap_empty_p (need_eh_cleanup))
3135 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3136 BITMAP_FREE (need_eh_cleanup);
3139 BITMAP_FREE (interesting_names);
3140 BITMAP_FREE (interesting_names1);
3141 return 0;
3144 } // anon namespace
3146 gimple_opt_pass *
3147 make_pass_phi_only_cprop (gcc::context *ctxt)
3149 return new pass_phi_only_cprop (ctxt);