* config.gcc: Remove MASK_JUMP_IN_DELAY from target_cpu_default2.
[official-gcc.git] / gcc / tree-ssa-dom.c
blobeb62f567d03d8f24671276b8dad377a63d7857f0
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "inchash.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "vec.h"
36 #include "machmode.h"
37 #include "hard-reg-set.h"
38 #include "input.h"
39 #include "function.h"
40 #include "gimple-pretty-print.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-fold.h"
44 #include "tree-eh.h"
45 #include "gimple-expr.h"
46 #include "is-a.h"
47 #include "gimple.h"
48 #include "gimple-iterator.h"
49 #include "gimple-ssa.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "tree-into-ssa.h"
56 #include "domwalk.h"
57 #include "tree-pass.h"
58 #include "tree-ssa-propagate.h"
59 #include "tree-ssa-threadupdate.h"
60 #include "langhooks.h"
61 #include "params.h"
62 #include "tree-ssa-threadedge.h"
63 #include "tree-ssa-dom.h"
64 #include "inchash.h"
66 /* This file implements optimizations on the dominator tree. */
68 /* Representation of a "naked" right-hand-side expression, to be used
69 in recording available expressions in the expression hash table. */
71 enum expr_kind
73 EXPR_SINGLE,
74 EXPR_UNARY,
75 EXPR_BINARY,
76 EXPR_TERNARY,
77 EXPR_CALL,
78 EXPR_PHI
81 struct hashable_expr
83 tree type;
84 enum expr_kind kind;
85 union {
86 struct { tree rhs; } single;
87 struct { enum tree_code op; tree opnd; } unary;
88 struct { enum tree_code op; tree opnd0, opnd1; } binary;
89 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
90 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
91 struct { size_t nargs; tree *args; } phi;
92 } ops;
95 /* Structure for recording known values of a conditional expression
96 at the exits from its block. */
98 typedef struct cond_equivalence_s
100 struct hashable_expr cond;
101 tree value;
102 } cond_equivalence;
105 /* Structure for recording edge equivalences as well as any pending
106 edge redirections during the dominator optimizer.
108 Computing and storing the edge equivalences instead of creating
109 them on-demand can save significant amounts of time, particularly
110 for pathological cases involving switch statements.
112 These structures live for a single iteration of the dominator
113 optimizer in the edge's AUX field. At the end of an iteration we
114 free each of these structures and update the AUX field to point
115 to any requested redirection target (the code for updating the
116 CFG and SSA graph for edge redirection expects redirection edge
117 targets to be in the AUX field for each edge. */
119 struct edge_info
121 /* If this edge creates a simple equivalence, the LHS and RHS of
122 the equivalence will be stored here. */
123 tree lhs;
124 tree rhs;
126 /* Traversing an edge may also indicate one or more particular conditions
127 are true or false. */
128 vec<cond_equivalence> cond_equivalences;
131 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
132 expressions it enters into the hash table along with a marker entry
133 (null). When we finish processing the block, we pop off entries and
134 remove the expressions from the global hash table until we hit the
135 marker. */
136 typedef struct expr_hash_elt * expr_hash_elt_t;
138 static vec<expr_hash_elt_t> avail_exprs_stack;
140 /* Structure for entries in the expression hash table. */
142 struct expr_hash_elt
144 /* The value (lhs) of this expression. */
145 tree lhs;
147 /* The expression (rhs) we want to record. */
148 struct hashable_expr expr;
150 /* The stmt pointer if this element corresponds to a statement. */
151 gimple stmt;
153 /* The hash value for RHS. */
154 hashval_t hash;
156 /* A unique stamp, typically the address of the hash
157 element itself, used in removing entries from the table. */
158 struct expr_hash_elt *stamp;
161 /* Hashtable helpers. */
163 static bool hashable_expr_equal_p (const struct hashable_expr *,
164 const struct hashable_expr *);
165 static void free_expr_hash_elt (void *);
167 struct expr_elt_hasher
169 typedef expr_hash_elt *value_type;
170 typedef expr_hash_elt *compare_type;
171 typedef int store_values_directly;
172 static inline hashval_t hash (const value_type &);
173 static inline bool equal (const value_type &, const compare_type &);
174 static inline void remove (value_type &);
177 inline hashval_t
178 expr_elt_hasher::hash (const value_type &p)
180 return p->hash;
183 inline bool
184 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
186 gimple stmt1 = p1->stmt;
187 const struct hashable_expr *expr1 = &p1->expr;
188 const struct expr_hash_elt *stamp1 = p1->stamp;
189 gimple stmt2 = p2->stmt;
190 const struct hashable_expr *expr2 = &p2->expr;
191 const struct expr_hash_elt *stamp2 = p2->stamp;
193 /* This case should apply only when removing entries from the table. */
194 if (stamp1 == stamp2)
195 return true;
197 /* FIXME tuples:
198 We add stmts to a hash table and them modify them. To detect the case
199 that we modify a stmt and then search for it, we assume that the hash
200 is always modified by that change.
201 We have to fully check why this doesn't happen on trunk or rewrite
202 this in a more reliable (and easier to understand) way. */
203 if (((const struct expr_hash_elt *)p1)->hash
204 != ((const struct expr_hash_elt *)p2)->hash)
205 return false;
207 /* In case of a collision, both RHS have to be identical and have the
208 same VUSE operands. */
209 if (hashable_expr_equal_p (expr1, expr2)
210 && types_compatible_p (expr1->type, expr2->type))
212 /* Note that STMT1 and/or STMT2 may be NULL. */
213 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
214 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
217 return false;
220 /* Delete an expr_hash_elt and reclaim its storage. */
222 inline void
223 expr_elt_hasher::remove (value_type &element)
225 free_expr_hash_elt (element);
228 /* Hash table with expressions made available during the renaming process.
229 When an assignment of the form X_i = EXPR is found, the statement is
230 stored in this table. If the same expression EXPR is later found on the
231 RHS of another statement, it is replaced with X_i (thus performing
232 global redundancy elimination). Similarly as we pass through conditionals
233 we record the conditional itself as having either a true or false value
234 in this table. */
235 static hash_table<expr_elt_hasher> *avail_exprs;
237 /* Stack of dest,src pairs that need to be restored during finalization.
239 A NULL entry is used to mark the end of pairs which need to be
240 restored during finalization of this block. */
241 static vec<tree> const_and_copies_stack;
243 /* Track whether or not we have changed the control flow graph. */
244 static bool cfg_altered;
246 /* Bitmap of blocks that have had EH statements cleaned. We should
247 remove their dead edges eventually. */
248 static bitmap need_eh_cleanup;
250 /* Statistics for dominator optimizations. */
251 struct opt_stats_d
253 long num_stmts;
254 long num_exprs_considered;
255 long num_re;
256 long num_const_prop;
257 long num_copy_prop;
260 static struct opt_stats_d opt_stats;
262 /* Local functions. */
263 static void optimize_stmt (basic_block, gimple_stmt_iterator);
264 static tree lookup_avail_expr (gimple, bool);
265 static hashval_t avail_expr_hash (const void *);
266 static void htab_statistics (FILE *,
267 const hash_table<expr_elt_hasher> &);
268 static void record_cond (cond_equivalence *);
269 static void record_const_or_copy (tree, tree);
270 static void record_equality (tree, tree);
271 static void record_equivalences_from_phis (basic_block);
272 static void record_equivalences_from_incoming_edge (basic_block);
273 static void eliminate_redundant_computations (gimple_stmt_iterator *);
274 static void record_equivalences_from_stmt (gimple, int);
275 static void remove_local_expressions_from_table (void);
276 static void restore_vars_to_original_value (void);
277 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
280 /* Given a statement STMT, initialize the hash table element pointed to
281 by ELEMENT. */
283 static void
284 initialize_hash_element (gimple stmt, tree lhs,
285 struct expr_hash_elt *element)
287 enum gimple_code code = gimple_code (stmt);
288 struct hashable_expr *expr = &element->expr;
290 if (code == GIMPLE_ASSIGN)
292 enum tree_code subcode = gimple_assign_rhs_code (stmt);
294 switch (get_gimple_rhs_class (subcode))
296 case GIMPLE_SINGLE_RHS:
297 expr->kind = EXPR_SINGLE;
298 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
299 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
300 break;
301 case GIMPLE_UNARY_RHS:
302 expr->kind = EXPR_UNARY;
303 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
304 expr->ops.unary.op = subcode;
305 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
306 break;
307 case GIMPLE_BINARY_RHS:
308 expr->kind = EXPR_BINARY;
309 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
310 expr->ops.binary.op = subcode;
311 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
312 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
313 break;
314 case GIMPLE_TERNARY_RHS:
315 expr->kind = EXPR_TERNARY;
316 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
317 expr->ops.ternary.op = subcode;
318 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
319 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
320 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
321 break;
322 default:
323 gcc_unreachable ();
326 else if (code == GIMPLE_COND)
328 expr->type = boolean_type_node;
329 expr->kind = EXPR_BINARY;
330 expr->ops.binary.op = gimple_cond_code (stmt);
331 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
332 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
334 else if (code == GIMPLE_CALL)
336 size_t nargs = gimple_call_num_args (stmt);
337 size_t i;
339 gcc_assert (gimple_call_lhs (stmt));
341 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
342 expr->kind = EXPR_CALL;
343 expr->ops.call.fn_from = stmt;
345 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
346 expr->ops.call.pure = true;
347 else
348 expr->ops.call.pure = false;
350 expr->ops.call.nargs = nargs;
351 expr->ops.call.args = XCNEWVEC (tree, nargs);
352 for (i = 0; i < nargs; i++)
353 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
355 else if (code == GIMPLE_SWITCH)
357 expr->type = TREE_TYPE (gimple_switch_index (stmt));
358 expr->kind = EXPR_SINGLE;
359 expr->ops.single.rhs = gimple_switch_index (stmt);
361 else if (code == GIMPLE_GOTO)
363 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
364 expr->kind = EXPR_SINGLE;
365 expr->ops.single.rhs = gimple_goto_dest (stmt);
367 else if (code == GIMPLE_PHI)
369 size_t nargs = gimple_phi_num_args (stmt);
370 size_t i;
372 expr->type = TREE_TYPE (gimple_phi_result (stmt));
373 expr->kind = EXPR_PHI;
374 expr->ops.phi.nargs = nargs;
375 expr->ops.phi.args = XCNEWVEC (tree, nargs);
377 for (i = 0; i < nargs; i++)
378 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
380 else
381 gcc_unreachable ();
383 element->lhs = lhs;
384 element->stmt = stmt;
385 element->hash = avail_expr_hash (element);
386 element->stamp = element;
389 /* Given a conditional expression COND as a tree, initialize
390 a hashable_expr expression EXPR. The conditional must be a
391 comparison or logical negation. A constant or a variable is
392 not permitted. */
394 static void
395 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
397 expr->type = boolean_type_node;
399 if (COMPARISON_CLASS_P (cond))
401 expr->kind = EXPR_BINARY;
402 expr->ops.binary.op = TREE_CODE (cond);
403 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
404 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
406 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
408 expr->kind = EXPR_UNARY;
409 expr->ops.unary.op = TRUTH_NOT_EXPR;
410 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
412 else
413 gcc_unreachable ();
416 /* Given a hashable_expr expression EXPR and an LHS,
417 initialize the hash table element pointed to by ELEMENT. */
419 static void
420 initialize_hash_element_from_expr (struct hashable_expr *expr,
421 tree lhs,
422 struct expr_hash_elt *element)
424 element->expr = *expr;
425 element->lhs = lhs;
426 element->stmt = NULL;
427 element->hash = avail_expr_hash (element);
428 element->stamp = element;
431 /* Compare two hashable_expr structures for equivalence.
432 They are considered equivalent when the the expressions
433 they denote must necessarily be equal. The logic is intended
434 to follow that of operand_equal_p in fold-const.c */
436 static bool
437 hashable_expr_equal_p (const struct hashable_expr *expr0,
438 const struct hashable_expr *expr1)
440 tree type0 = expr0->type;
441 tree type1 = expr1->type;
443 /* If either type is NULL, there is nothing to check. */
444 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
445 return false;
447 /* If both types don't have the same signedness, precision, and mode,
448 then we can't consider them equal. */
449 if (type0 != type1
450 && (TREE_CODE (type0) == ERROR_MARK
451 || TREE_CODE (type1) == ERROR_MARK
452 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
453 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
454 || TYPE_MODE (type0) != TYPE_MODE (type1)))
455 return false;
457 if (expr0->kind != expr1->kind)
458 return false;
460 switch (expr0->kind)
462 case EXPR_SINGLE:
463 return operand_equal_p (expr0->ops.single.rhs,
464 expr1->ops.single.rhs, 0);
466 case EXPR_UNARY:
467 if (expr0->ops.unary.op != expr1->ops.unary.op)
468 return false;
470 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
471 || expr0->ops.unary.op == NON_LVALUE_EXPR)
472 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
473 return false;
475 return operand_equal_p (expr0->ops.unary.opnd,
476 expr1->ops.unary.opnd, 0);
478 case EXPR_BINARY:
479 if (expr0->ops.binary.op != expr1->ops.binary.op)
480 return false;
482 if (operand_equal_p (expr0->ops.binary.opnd0,
483 expr1->ops.binary.opnd0, 0)
484 && operand_equal_p (expr0->ops.binary.opnd1,
485 expr1->ops.binary.opnd1, 0))
486 return true;
488 /* For commutative ops, allow the other order. */
489 return (commutative_tree_code (expr0->ops.binary.op)
490 && operand_equal_p (expr0->ops.binary.opnd0,
491 expr1->ops.binary.opnd1, 0)
492 && operand_equal_p (expr0->ops.binary.opnd1,
493 expr1->ops.binary.opnd0, 0));
495 case EXPR_TERNARY:
496 if (expr0->ops.ternary.op != expr1->ops.ternary.op
497 || !operand_equal_p (expr0->ops.ternary.opnd2,
498 expr1->ops.ternary.opnd2, 0))
499 return false;
501 if (operand_equal_p (expr0->ops.ternary.opnd0,
502 expr1->ops.ternary.opnd0, 0)
503 && operand_equal_p (expr0->ops.ternary.opnd1,
504 expr1->ops.ternary.opnd1, 0))
505 return true;
507 /* For commutative ops, allow the other order. */
508 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
509 && operand_equal_p (expr0->ops.ternary.opnd0,
510 expr1->ops.ternary.opnd1, 0)
511 && operand_equal_p (expr0->ops.ternary.opnd1,
512 expr1->ops.ternary.opnd0, 0));
514 case EXPR_CALL:
516 size_t i;
518 /* If the calls are to different functions, then they
519 clearly cannot be equal. */
520 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
521 expr1->ops.call.fn_from))
522 return false;
524 if (! expr0->ops.call.pure)
525 return false;
527 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
528 return false;
530 for (i = 0; i < expr0->ops.call.nargs; i++)
531 if (! operand_equal_p (expr0->ops.call.args[i],
532 expr1->ops.call.args[i], 0))
533 return false;
535 if (stmt_could_throw_p (expr0->ops.call.fn_from))
537 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
538 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
539 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
540 return false;
543 return true;
546 case EXPR_PHI:
548 size_t i;
550 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
551 return false;
553 for (i = 0; i < expr0->ops.phi.nargs; i++)
554 if (! operand_equal_p (expr0->ops.phi.args[i],
555 expr1->ops.phi.args[i], 0))
556 return false;
558 return true;
561 default:
562 gcc_unreachable ();
566 /* Generate a hash value for a pair of expressions. This can be used
567 iteratively by passing a previous result in HSTATE.
569 The same hash value is always returned for a given pair of expressions,
570 regardless of the order in which they are presented. This is useful in
571 hashing the operands of commutative functions. */
573 namespace inchash
576 static void
577 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
579 hash one, two;
581 inchash::add_expr (t1, one);
582 inchash::add_expr (t2, two);
583 hstate.add_commutative (one, two);
586 /* Compute a hash value for a hashable_expr value EXPR and a
587 previously accumulated hash value VAL. If two hashable_expr
588 values compare equal with hashable_expr_equal_p, they must
589 hash to the same value, given an identical value of VAL.
590 The logic is intended to follow inchash::add_expr in tree.c. */
592 static void
593 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
595 switch (expr->kind)
597 case EXPR_SINGLE:
598 inchash::add_expr (expr->ops.single.rhs, hstate);
599 break;
601 case EXPR_UNARY:
602 hstate.add_object (expr->ops.unary.op);
604 /* Make sure to include signedness in the hash computation.
605 Don't hash the type, that can lead to having nodes which
606 compare equal according to operand_equal_p, but which
607 have different hash codes. */
608 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
609 || expr->ops.unary.op == NON_LVALUE_EXPR)
610 hstate.add_int (TYPE_UNSIGNED (expr->type));
612 inchash::add_expr (expr->ops.unary.opnd, hstate);
613 break;
615 case EXPR_BINARY:
616 hstate.add_object (expr->ops.binary.op);
617 if (commutative_tree_code (expr->ops.binary.op))
618 inchash::add_expr_commutative (expr->ops.binary.opnd0,
619 expr->ops.binary.opnd1, hstate);
620 else
622 inchash::add_expr (expr->ops.binary.opnd0, hstate);
623 inchash::add_expr (expr->ops.binary.opnd1, hstate);
625 break;
627 case EXPR_TERNARY:
628 hstate.add_object (expr->ops.ternary.op);
629 if (commutative_ternary_tree_code (expr->ops.ternary.op))
630 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
631 expr->ops.ternary.opnd1, hstate);
632 else
634 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
635 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
637 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
638 break;
640 case EXPR_CALL:
642 size_t i;
643 enum tree_code code = CALL_EXPR;
644 gimple fn_from;
646 hstate.add_object (code);
647 fn_from = expr->ops.call.fn_from;
648 if (gimple_call_internal_p (fn_from))
649 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
650 else
651 inchash::add_expr (gimple_call_fn (fn_from), hstate);
652 for (i = 0; i < expr->ops.call.nargs; i++)
653 inchash::add_expr (expr->ops.call.args[i], hstate);
655 break;
657 case EXPR_PHI:
659 size_t i;
661 for (i = 0; i < expr->ops.phi.nargs; i++)
662 inchash::add_expr (expr->ops.phi.args[i], hstate);
664 break;
666 default:
667 gcc_unreachable ();
673 /* Print a diagnostic dump of an expression hash table entry. */
675 static void
676 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
678 if (element->stmt)
679 fprintf (stream, "STMT ");
680 else
681 fprintf (stream, "COND ");
683 if (element->lhs)
685 print_generic_expr (stream, element->lhs, 0);
686 fprintf (stream, " = ");
689 switch (element->expr.kind)
691 case EXPR_SINGLE:
692 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
693 break;
695 case EXPR_UNARY:
696 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
697 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
698 break;
700 case EXPR_BINARY:
701 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
702 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
703 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
704 break;
706 case EXPR_TERNARY:
707 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
708 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
709 fputs (", ", stream);
710 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
711 fputs (", ", stream);
712 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
713 fputs (">", stream);
714 break;
716 case EXPR_CALL:
718 size_t i;
719 size_t nargs = element->expr.ops.call.nargs;
720 gimple fn_from;
722 fn_from = element->expr.ops.call.fn_from;
723 if (gimple_call_internal_p (fn_from))
724 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
725 stream);
726 else
727 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
728 fprintf (stream, " (");
729 for (i = 0; i < nargs; i++)
731 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
732 if (i + 1 < nargs)
733 fprintf (stream, ", ");
735 fprintf (stream, ")");
737 break;
739 case EXPR_PHI:
741 size_t i;
742 size_t nargs = element->expr.ops.phi.nargs;
744 fprintf (stream, "PHI <");
745 for (i = 0; i < nargs; i++)
747 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
748 if (i + 1 < nargs)
749 fprintf (stream, ", ");
751 fprintf (stream, ">");
753 break;
755 fprintf (stream, "\n");
757 if (element->stmt)
759 fprintf (stream, " ");
760 print_gimple_stmt (stream, element->stmt, 0, 0);
764 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
766 static void
767 free_expr_hash_elt_contents (struct expr_hash_elt *element)
769 if (element->expr.kind == EXPR_CALL)
770 free (element->expr.ops.call.args);
771 else if (element->expr.kind == EXPR_PHI)
772 free (element->expr.ops.phi.args);
775 /* Delete an expr_hash_elt and reclaim its storage. */
777 static void
778 free_expr_hash_elt (void *elt)
780 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
781 free_expr_hash_elt_contents (element);
782 free (element);
785 /* Allocate an EDGE_INFO for edge E and attach it to E.
786 Return the new EDGE_INFO structure. */
788 static struct edge_info *
789 allocate_edge_info (edge e)
791 struct edge_info *edge_info;
793 edge_info = XCNEW (struct edge_info);
795 e->aux = edge_info;
796 return edge_info;
799 /* Free all EDGE_INFO structures associated with edges in the CFG.
800 If a particular edge can be threaded, copy the redirection
801 target from the EDGE_INFO structure into the edge's AUX field
802 as required by code to update the CFG and SSA graph for
803 jump threading. */
805 static void
806 free_all_edge_infos (void)
808 basic_block bb;
809 edge_iterator ei;
810 edge e;
812 FOR_EACH_BB_FN (bb, cfun)
814 FOR_EACH_EDGE (e, ei, bb->preds)
816 struct edge_info *edge_info = (struct edge_info *) e->aux;
818 if (edge_info)
820 edge_info->cond_equivalences.release ();
821 free (edge_info);
822 e->aux = NULL;
828 class dom_opt_dom_walker : public dom_walker
830 public:
831 dom_opt_dom_walker (cdi_direction direction)
832 : dom_walker (direction), m_dummy_cond (NULL) {}
834 virtual void before_dom_children (basic_block);
835 virtual void after_dom_children (basic_block);
837 private:
838 void thread_across_edge (edge);
840 gimple m_dummy_cond;
843 /* Jump threading, redundancy elimination and const/copy propagation.
845 This pass may expose new symbols that need to be renamed into SSA. For
846 every new symbol exposed, its corresponding bit will be set in
847 VARS_TO_RENAME. */
849 namespace {
851 const pass_data pass_data_dominator =
853 GIMPLE_PASS, /* type */
854 "dom", /* name */
855 OPTGROUP_NONE, /* optinfo_flags */
856 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
857 ( PROP_cfg | PROP_ssa ), /* properties_required */
858 0, /* properties_provided */
859 0, /* properties_destroyed */
860 0, /* todo_flags_start */
861 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
864 class pass_dominator : public gimple_opt_pass
866 public:
867 pass_dominator (gcc::context *ctxt)
868 : gimple_opt_pass (pass_data_dominator, ctxt)
871 /* opt_pass methods: */
872 opt_pass * clone () { return new pass_dominator (m_ctxt); }
873 virtual bool gate (function *) { return flag_tree_dom != 0; }
874 virtual unsigned int execute (function *);
876 }; // class pass_dominator
878 unsigned int
879 pass_dominator::execute (function *fun)
881 memset (&opt_stats, 0, sizeof (opt_stats));
883 /* Create our hash tables. */
884 avail_exprs = new hash_table<expr_elt_hasher> (1024);
885 avail_exprs_stack.create (20);
886 const_and_copies_stack.create (20);
887 need_eh_cleanup = BITMAP_ALLOC (NULL);
889 calculate_dominance_info (CDI_DOMINATORS);
890 cfg_altered = false;
892 /* We need to know loop structures in order to avoid destroying them
893 in jump threading. Note that we still can e.g. thread through loop
894 headers to an exit edge, or through loop header to the loop body, assuming
895 that we update the loop info.
897 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
898 to several overly conservative bail-outs in jump threading, case
899 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
900 missing. We should improve jump threading in future then
901 LOOPS_HAVE_PREHEADERS won't be needed here. */
902 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
904 /* Initialize the value-handle array. */
905 threadedge_initialize_values ();
907 /* We need accurate information regarding back edges in the CFG
908 for jump threading; this may include back edges that are not part of
909 a single loop. */
910 mark_dfs_back_edges ();
912 /* Recursively walk the dominator tree optimizing statements. */
913 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
916 gimple_stmt_iterator gsi;
917 basic_block bb;
918 FOR_EACH_BB_FN (bb, fun)
920 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
921 update_stmt_if_modified (gsi_stmt (gsi));
925 /* If we exposed any new variables, go ahead and put them into
926 SSA form now, before we handle jump threading. This simplifies
927 interactions between rewriting of _DECL nodes into SSA form
928 and rewriting SSA_NAME nodes into SSA form after block
929 duplication and CFG manipulation. */
930 update_ssa (TODO_update_ssa);
932 free_all_edge_infos ();
934 /* Thread jumps, creating duplicate blocks as needed. */
935 cfg_altered |= thread_through_all_blocks (first_pass_instance);
937 if (cfg_altered)
938 free_dominance_info (CDI_DOMINATORS);
940 /* Removal of statements may make some EH edges dead. Purge
941 such edges from the CFG as needed. */
942 if (!bitmap_empty_p (need_eh_cleanup))
944 unsigned i;
945 bitmap_iterator bi;
947 /* Jump threading may have created forwarder blocks from blocks
948 needing EH cleanup; the new successor of these blocks, which
949 has inherited from the original block, needs the cleanup.
950 Don't clear bits in the bitmap, as that can break the bitmap
951 iterator. */
952 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
954 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
955 if (bb == NULL)
956 continue;
957 while (single_succ_p (bb)
958 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
959 bb = single_succ (bb);
960 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
961 continue;
962 if ((unsigned) bb->index != i)
963 bitmap_set_bit (need_eh_cleanup, bb->index);
966 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
967 bitmap_clear (need_eh_cleanup);
970 statistics_counter_event (fun, "Redundant expressions eliminated",
971 opt_stats.num_re);
972 statistics_counter_event (fun, "Constants propagated",
973 opt_stats.num_const_prop);
974 statistics_counter_event (fun, "Copies propagated",
975 opt_stats.num_copy_prop);
977 /* Debugging dumps. */
978 if (dump_file && (dump_flags & TDF_STATS))
979 dump_dominator_optimization_stats (dump_file);
981 loop_optimizer_finalize ();
983 /* Delete our main hashtable. */
984 delete avail_exprs;
985 avail_exprs = NULL;
987 /* Free asserted bitmaps and stacks. */
988 BITMAP_FREE (need_eh_cleanup);
990 avail_exprs_stack.release ();
991 const_and_copies_stack.release ();
993 /* Free the value-handle array. */
994 threadedge_finalize_values ();
996 return 0;
999 } // anon namespace
1001 gimple_opt_pass *
1002 make_pass_dominator (gcc::context *ctxt)
1004 return new pass_dominator (ctxt);
1008 /* Given a conditional statement CONDSTMT, convert the
1009 condition to a canonical form. */
1011 static void
1012 canonicalize_comparison (gimple condstmt)
1014 tree op0;
1015 tree op1;
1016 enum tree_code code;
1018 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1020 op0 = gimple_cond_lhs (condstmt);
1021 op1 = gimple_cond_rhs (condstmt);
1023 code = gimple_cond_code (condstmt);
1025 /* If it would be profitable to swap the operands, then do so to
1026 canonicalize the statement, enabling better optimization.
1028 By placing canonicalization of such expressions here we
1029 transparently keep statements in canonical form, even
1030 when the statement is modified. */
1031 if (tree_swap_operands_p (op0, op1, false))
1033 /* For relationals we need to swap the operands
1034 and change the code. */
1035 if (code == LT_EXPR
1036 || code == GT_EXPR
1037 || code == LE_EXPR
1038 || code == GE_EXPR)
1040 code = swap_tree_comparison (code);
1042 gimple_cond_set_code (condstmt, code);
1043 gimple_cond_set_lhs (condstmt, op1);
1044 gimple_cond_set_rhs (condstmt, op0);
1046 update_stmt (condstmt);
1051 /* Initialize local stacks for this optimizer and record equivalences
1052 upon entry to BB. Equivalences can come from the edge traversed to
1053 reach BB or they may come from PHI nodes at the start of BB. */
1055 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1056 LIMIT entries left in LOCALs. */
1058 static void
1059 remove_local_expressions_from_table (void)
1061 /* Remove all the expressions made available in this block. */
1062 while (avail_exprs_stack.length () > 0)
1064 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1065 expr_hash_elt **slot;
1067 if (victim == NULL)
1068 break;
1070 /* This must precede the actual removal from the hash table,
1071 as ELEMENT and the table entry may share a call argument
1072 vector which will be freed during removal. */
1073 if (dump_file && (dump_flags & TDF_DETAILS))
1075 fprintf (dump_file, "<<<< ");
1076 print_expr_hash_elt (dump_file, victim);
1079 slot = avail_exprs->find_slot (victim, NO_INSERT);
1080 gcc_assert (slot && *slot == victim);
1081 avail_exprs->clear_slot (slot);
1085 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1086 CONST_AND_COPIES to its original state, stopping when we hit a
1087 NULL marker. */
1089 static void
1090 restore_vars_to_original_value (void)
1092 while (const_and_copies_stack.length () > 0)
1094 tree prev_value, dest;
1096 dest = const_and_copies_stack.pop ();
1098 if (dest == NULL)
1099 break;
1101 if (dump_file && (dump_flags & TDF_DETAILS))
1103 fprintf (dump_file, "<<<< COPY ");
1104 print_generic_expr (dump_file, dest, 0);
1105 fprintf (dump_file, " = ");
1106 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1107 fprintf (dump_file, "\n");
1110 prev_value = const_and_copies_stack.pop ();
1111 set_ssa_name_value (dest, prev_value);
1115 /* A trivial wrapper so that we can present the generic jump
1116 threading code with a simple API for simplifying statements. */
1117 static tree
1118 simplify_stmt_for_jump_threading (gimple stmt,
1119 gimple within_stmt ATTRIBUTE_UNUSED)
1121 return lookup_avail_expr (stmt, false);
1124 /* Record into the equivalence tables any equivalences implied by
1125 traversing edge E (which are cached in E->aux).
1127 Callers are responsible for managing the unwinding markers. */
1128 static void
1129 record_temporary_equivalences (edge e)
1131 int i;
1132 struct edge_info *edge_info = (struct edge_info *) e->aux;
1134 /* If we have info associated with this edge, record it into
1135 our equivalence tables. */
1136 if (edge_info)
1138 cond_equivalence *eq;
1139 tree lhs = edge_info->lhs;
1140 tree rhs = edge_info->rhs;
1142 /* If we have a simple NAME = VALUE equivalence, record it. */
1143 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1144 record_const_or_copy (lhs, rhs);
1146 /* If we have 0 = COND or 1 = COND equivalences, record them
1147 into our expression hash tables. */
1148 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1149 record_cond (eq);
1153 /* Wrapper for common code to attempt to thread an edge. For example,
1154 it handles lazily building the dummy condition and the bookkeeping
1155 when jump threading is successful. */
1157 void
1158 dom_opt_dom_walker::thread_across_edge (edge e)
1160 if (! m_dummy_cond)
1161 m_dummy_cond =
1162 gimple_build_cond (NE_EXPR,
1163 integer_zero_node, integer_zero_node,
1164 NULL, NULL);
1166 /* Push a marker on both stacks so we can unwind the tables back to their
1167 current state. */
1168 avail_exprs_stack.safe_push (NULL);
1169 const_and_copies_stack.safe_push (NULL_TREE);
1171 /* Traversing E may result in equivalences we can utilize. */
1172 record_temporary_equivalences (e);
1174 /* With all the edge equivalences in the tables, go ahead and attempt
1175 to thread through E->dest. */
1176 ::thread_across_edge (m_dummy_cond, e, false,
1177 &const_and_copies_stack,
1178 simplify_stmt_for_jump_threading);
1180 /* And restore the various tables to their state before
1181 we threaded this edge.
1183 XXX The code in tree-ssa-threadedge.c will restore the state of
1184 the const_and_copies table. We we just have to restore the expression
1185 table. */
1186 remove_local_expressions_from_table ();
1189 /* PHI nodes can create equivalences too.
1191 Ignoring any alternatives which are the same as the result, if
1192 all the alternatives are equal, then the PHI node creates an
1193 equivalence. */
1195 static void
1196 record_equivalences_from_phis (basic_block bb)
1198 gimple_stmt_iterator gsi;
1200 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1202 gimple phi = gsi_stmt (gsi);
1204 tree lhs = gimple_phi_result (phi);
1205 tree rhs = NULL;
1206 size_t i;
1208 for (i = 0; i < gimple_phi_num_args (phi); i++)
1210 tree t = gimple_phi_arg_def (phi, i);
1212 /* Ignore alternatives which are the same as our LHS. Since
1213 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1214 can simply compare pointers. */
1215 if (lhs == t)
1216 continue;
1218 /* If we have not processed an alternative yet, then set
1219 RHS to this alternative. */
1220 if (rhs == NULL)
1221 rhs = t;
1222 /* If we have processed an alternative (stored in RHS), then
1223 see if it is equal to this one. If it isn't, then stop
1224 the search. */
1225 else if (! operand_equal_for_phi_arg_p (rhs, t))
1226 break;
1229 /* If we had no interesting alternatives, then all the RHS alternatives
1230 must have been the same as LHS. */
1231 if (!rhs)
1232 rhs = lhs;
1234 /* If we managed to iterate through each PHI alternative without
1235 breaking out of the loop, then we have a PHI which may create
1236 a useful equivalence. We do not need to record unwind data for
1237 this, since this is a true assignment and not an equivalence
1238 inferred from a comparison. All uses of this ssa name are dominated
1239 by this assignment, so unwinding just costs time and space. */
1240 if (i == gimple_phi_num_args (phi)
1241 && may_propagate_copy (lhs, rhs))
1242 set_ssa_name_value (lhs, rhs);
1246 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1247 return that edge. Otherwise return NULL. */
1248 static edge
1249 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1251 edge retval = NULL;
1252 edge e;
1253 edge_iterator ei;
1255 FOR_EACH_EDGE (e, ei, bb->preds)
1257 /* A loop back edge can be identified by the destination of
1258 the edge dominating the source of the edge. */
1259 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1260 continue;
1262 /* If we have already seen a non-loop edge, then we must have
1263 multiple incoming non-loop edges and thus we return NULL. */
1264 if (retval)
1265 return NULL;
1267 /* This is the first non-loop incoming edge we have found. Record
1268 it. */
1269 retval = e;
1272 return retval;
1275 /* Record any equivalences created by the incoming edge to BB. If BB
1276 has more than one incoming edge, then no equivalence is created. */
1278 static void
1279 record_equivalences_from_incoming_edge (basic_block bb)
1281 edge e;
1282 basic_block parent;
1283 struct edge_info *edge_info;
1285 /* If our parent block ended with a control statement, then we may be
1286 able to record some equivalences based on which outgoing edge from
1287 the parent was followed. */
1288 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1290 e = single_incoming_edge_ignoring_loop_edges (bb);
1292 /* If we had a single incoming edge from our parent block, then enter
1293 any data associated with the edge into our tables. */
1294 if (e && e->src == parent)
1296 unsigned int i;
1298 edge_info = (struct edge_info *) e->aux;
1300 if (edge_info)
1302 tree lhs = edge_info->lhs;
1303 tree rhs = edge_info->rhs;
1304 cond_equivalence *eq;
1306 if (lhs)
1307 record_equality (lhs, rhs);
1309 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1310 set via a widening type conversion, then we may be able to record
1311 additional equivalences. */
1312 if (lhs
1313 && TREE_CODE (lhs) == SSA_NAME
1314 && is_gimple_constant (rhs)
1315 && TREE_CODE (rhs) == INTEGER_CST)
1317 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1319 if (defstmt
1320 && is_gimple_assign (defstmt)
1321 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1323 tree old_rhs = gimple_assign_rhs1 (defstmt);
1325 /* If the conversion widens the original value and
1326 the constant is in the range of the type of OLD_RHS,
1327 then convert the constant and record the equivalence.
1329 Note that int_fits_type_p does not check the precision
1330 if the upper and lower bounds are OK. */
1331 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1332 && (TYPE_PRECISION (TREE_TYPE (lhs))
1333 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1334 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1336 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1337 record_equality (old_rhs, newval);
1342 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1343 record_cond (eq);
1348 /* Dump SSA statistics on FILE. */
1350 void
1351 dump_dominator_optimization_stats (FILE *file)
1353 fprintf (file, "Total number of statements: %6ld\n\n",
1354 opt_stats.num_stmts);
1355 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1356 opt_stats.num_exprs_considered);
1358 fprintf (file, "\nHash table statistics:\n");
1360 fprintf (file, " avail_exprs: ");
1361 htab_statistics (file, *avail_exprs);
1365 /* Dump SSA statistics on stderr. */
1367 DEBUG_FUNCTION void
1368 debug_dominator_optimization_stats (void)
1370 dump_dominator_optimization_stats (stderr);
1374 /* Dump statistics for the hash table HTAB. */
1376 static void
1377 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1379 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1380 (long) htab.size (),
1381 (long) htab.elements (),
1382 htab.collisions ());
1386 /* Enter condition equivalence into the expression hash table.
1387 This indicates that a conditional expression has a known
1388 boolean value. */
1390 static void
1391 record_cond (cond_equivalence *p)
1393 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1394 expr_hash_elt **slot;
1396 initialize_hash_element_from_expr (&p->cond, p->value, element);
1398 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1399 if (*slot == NULL)
1401 *slot = element;
1403 if (dump_file && (dump_flags & TDF_DETAILS))
1405 fprintf (dump_file, "1>>> ");
1406 print_expr_hash_elt (dump_file, element);
1409 avail_exprs_stack.safe_push (element);
1411 else
1412 free_expr_hash_elt (element);
1415 /* Build a cond_equivalence record indicating that the comparison
1416 CODE holds between operands OP0 and OP1 and push it to **P. */
1418 static void
1419 build_and_record_new_cond (enum tree_code code,
1420 tree op0, tree op1,
1421 vec<cond_equivalence> *p)
1423 cond_equivalence c;
1424 struct hashable_expr *cond = &c.cond;
1426 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1428 cond->type = boolean_type_node;
1429 cond->kind = EXPR_BINARY;
1430 cond->ops.binary.op = code;
1431 cond->ops.binary.opnd0 = op0;
1432 cond->ops.binary.opnd1 = op1;
1434 c.value = boolean_true_node;
1435 p->safe_push (c);
1438 /* Record that COND is true and INVERTED is false into the edge information
1439 structure. Also record that any conditions dominated by COND are true
1440 as well.
1442 For example, if a < b is true, then a <= b must also be true. */
1444 static void
1445 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1447 tree op0, op1;
1448 cond_equivalence c;
1450 if (!COMPARISON_CLASS_P (cond))
1451 return;
1453 op0 = TREE_OPERAND (cond, 0);
1454 op1 = TREE_OPERAND (cond, 1);
1456 switch (TREE_CODE (cond))
1458 case LT_EXPR:
1459 case GT_EXPR:
1460 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1462 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1463 &edge_info->cond_equivalences);
1464 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1465 &edge_info->cond_equivalences);
1468 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1469 ? LE_EXPR : GE_EXPR),
1470 op0, op1, &edge_info->cond_equivalences);
1471 build_and_record_new_cond (NE_EXPR, op0, op1,
1472 &edge_info->cond_equivalences);
1473 break;
1475 case GE_EXPR:
1476 case LE_EXPR:
1477 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1479 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1480 &edge_info->cond_equivalences);
1482 break;
1484 case EQ_EXPR:
1485 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1487 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1488 &edge_info->cond_equivalences);
1490 build_and_record_new_cond (LE_EXPR, op0, op1,
1491 &edge_info->cond_equivalences);
1492 build_and_record_new_cond (GE_EXPR, op0, op1,
1493 &edge_info->cond_equivalences);
1494 break;
1496 case UNORDERED_EXPR:
1497 build_and_record_new_cond (NE_EXPR, op0, op1,
1498 &edge_info->cond_equivalences);
1499 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1500 &edge_info->cond_equivalences);
1501 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1506 &edge_info->cond_equivalences);
1507 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1508 &edge_info->cond_equivalences);
1509 break;
1511 case UNLT_EXPR:
1512 case UNGT_EXPR:
1513 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1514 ? UNLE_EXPR : UNGE_EXPR),
1515 op0, op1, &edge_info->cond_equivalences);
1516 build_and_record_new_cond (NE_EXPR, op0, op1,
1517 &edge_info->cond_equivalences);
1518 break;
1520 case UNEQ_EXPR:
1521 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1522 &edge_info->cond_equivalences);
1523 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1524 &edge_info->cond_equivalences);
1525 break;
1527 case LTGT_EXPR:
1528 build_and_record_new_cond (NE_EXPR, op0, op1,
1529 &edge_info->cond_equivalences);
1530 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1531 &edge_info->cond_equivalences);
1532 break;
1534 default:
1535 break;
1538 /* Now store the original true and false conditions into the first
1539 two slots. */
1540 initialize_expr_from_cond (cond, &c.cond);
1541 c.value = boolean_true_node;
1542 edge_info->cond_equivalences.safe_push (c);
1544 /* It is possible for INVERTED to be the negation of a comparison,
1545 and not a valid RHS or GIMPLE_COND condition. This happens because
1546 invert_truthvalue may return such an expression when asked to invert
1547 a floating-point comparison. These comparisons are not assumed to
1548 obey the trichotomy law. */
1549 initialize_expr_from_cond (inverted, &c.cond);
1550 c.value = boolean_false_node;
1551 edge_info->cond_equivalences.safe_push (c);
1554 /* A helper function for record_const_or_copy and record_equality.
1555 Do the work of recording the value and undo info. */
1557 static void
1558 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1560 set_ssa_name_value (x, y);
1562 if (dump_file && (dump_flags & TDF_DETAILS))
1564 fprintf (dump_file, "0>>> COPY ");
1565 print_generic_expr (dump_file, x, 0);
1566 fprintf (dump_file, " = ");
1567 print_generic_expr (dump_file, y, 0);
1568 fprintf (dump_file, "\n");
1571 const_and_copies_stack.reserve (2);
1572 const_and_copies_stack.quick_push (prev_x);
1573 const_and_copies_stack.quick_push (x);
1576 /* Record that X is equal to Y in const_and_copies. Record undo
1577 information in the block-local vector. */
1579 static void
1580 record_const_or_copy (tree x, tree y)
1582 tree prev_x = SSA_NAME_VALUE (x);
1584 gcc_assert (TREE_CODE (x) == SSA_NAME);
1586 if (TREE_CODE (y) == SSA_NAME)
1588 tree tmp = SSA_NAME_VALUE (y);
1589 if (tmp)
1590 y = tmp;
1593 record_const_or_copy_1 (x, y, prev_x);
1596 /* Return the loop depth of the basic block of the defining statement of X.
1597 This number should not be treated as absolutely correct because the loop
1598 information may not be completely up-to-date when dom runs. However, it
1599 will be relatively correct, and as more passes are taught to keep loop info
1600 up to date, the result will become more and more accurate. */
1602 static int
1603 loop_depth_of_name (tree x)
1605 gimple defstmt;
1606 basic_block defbb;
1608 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1609 if (TREE_CODE (x) != SSA_NAME)
1610 return 0;
1612 /* Otherwise return the loop depth of the defining statement's bb.
1613 Note that there may not actually be a bb for this statement, if the
1614 ssa_name is live on entry. */
1615 defstmt = SSA_NAME_DEF_STMT (x);
1616 defbb = gimple_bb (defstmt);
1617 if (!defbb)
1618 return 0;
1620 return bb_loop_depth (defbb);
1623 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1624 This constrains the cases in which we may treat this as assignment. */
1626 static void
1627 record_equality (tree x, tree y)
1629 tree prev_x = NULL, prev_y = NULL;
1631 if (TREE_CODE (x) == SSA_NAME)
1632 prev_x = SSA_NAME_VALUE (x);
1633 if (TREE_CODE (y) == SSA_NAME)
1634 prev_y = SSA_NAME_VALUE (y);
1636 /* If one of the previous values is invariant, or invariant in more loops
1637 (by depth), then use that.
1638 Otherwise it doesn't matter which value we choose, just so
1639 long as we canonicalize on one value. */
1640 if (is_gimple_min_invariant (y))
1642 else if (is_gimple_min_invariant (x)
1643 /* ??? When threading over backedges the following is important
1644 for correctness. See PR61757. */
1645 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1646 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1647 else if (prev_x && is_gimple_min_invariant (prev_x))
1648 x = y, y = prev_x, prev_x = prev_y;
1649 else if (prev_y)
1650 y = prev_y;
1652 /* After the swapping, we must have one SSA_NAME. */
1653 if (TREE_CODE (x) != SSA_NAME)
1654 return;
1656 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1657 variable compared against zero. If we're honoring signed zeros,
1658 then we cannot record this value unless we know that the value is
1659 nonzero. */
1660 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1661 && (TREE_CODE (y) != REAL_CST
1662 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1663 return;
1665 record_const_or_copy_1 (x, y, prev_x);
1668 /* Returns true when STMT is a simple iv increment. It detects the
1669 following situation:
1671 i_1 = phi (..., i_2)
1672 i_2 = i_1 +/- ... */
1674 bool
1675 simple_iv_increment_p (gimple stmt)
1677 enum tree_code code;
1678 tree lhs, preinc;
1679 gimple phi;
1680 size_t i;
1682 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1683 return false;
1685 lhs = gimple_assign_lhs (stmt);
1686 if (TREE_CODE (lhs) != SSA_NAME)
1687 return false;
1689 code = gimple_assign_rhs_code (stmt);
1690 if (code != PLUS_EXPR
1691 && code != MINUS_EXPR
1692 && code != POINTER_PLUS_EXPR)
1693 return false;
1695 preinc = gimple_assign_rhs1 (stmt);
1696 if (TREE_CODE (preinc) != SSA_NAME)
1697 return false;
1699 phi = SSA_NAME_DEF_STMT (preinc);
1700 if (gimple_code (phi) != GIMPLE_PHI)
1701 return false;
1703 for (i = 0; i < gimple_phi_num_args (phi); i++)
1704 if (gimple_phi_arg_def (phi, i) == lhs)
1705 return true;
1707 return false;
1710 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1711 known value for that SSA_NAME (or NULL if no value is known).
1713 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1714 successors of BB. */
1716 static void
1717 cprop_into_successor_phis (basic_block bb)
1719 edge e;
1720 edge_iterator ei;
1722 FOR_EACH_EDGE (e, ei, bb->succs)
1724 int indx;
1725 gimple_stmt_iterator gsi;
1727 /* If this is an abnormal edge, then we do not want to copy propagate
1728 into the PHI alternative associated with this edge. */
1729 if (e->flags & EDGE_ABNORMAL)
1730 continue;
1732 gsi = gsi_start_phis (e->dest);
1733 if (gsi_end_p (gsi))
1734 continue;
1736 /* We may have an equivalence associated with this edge. While
1737 we can not propagate it into non-dominated blocks, we can
1738 propagate them into PHIs in non-dominated blocks. */
1740 /* Push the unwind marker so we can reset the const and copies
1741 table back to its original state after processing this edge. */
1742 const_and_copies_stack.safe_push (NULL_TREE);
1744 /* Extract and record any simple NAME = VALUE equivalences.
1746 Don't bother with [01] = COND equivalences, they're not useful
1747 here. */
1748 struct edge_info *edge_info = (struct edge_info *) e->aux;
1749 if (edge_info)
1751 tree lhs = edge_info->lhs;
1752 tree rhs = edge_info->rhs;
1754 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1755 record_const_or_copy (lhs, rhs);
1758 indx = e->dest_idx;
1759 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1761 tree new_val;
1762 use_operand_p orig_p;
1763 tree orig_val;
1764 gimple phi = gsi_stmt (gsi);
1766 /* The alternative may be associated with a constant, so verify
1767 it is an SSA_NAME before doing anything with it. */
1768 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1769 orig_val = get_use_from_ptr (orig_p);
1770 if (TREE_CODE (orig_val) != SSA_NAME)
1771 continue;
1773 /* If we have *ORIG_P in our constant/copy table, then replace
1774 ORIG_P with its value in our constant/copy table. */
1775 new_val = SSA_NAME_VALUE (orig_val);
1776 if (new_val
1777 && new_val != orig_val
1778 && (TREE_CODE (new_val) == SSA_NAME
1779 || is_gimple_min_invariant (new_val))
1780 && may_propagate_copy (orig_val, new_val))
1781 propagate_value (orig_p, new_val);
1784 restore_vars_to_original_value ();
1788 /* We have finished optimizing BB, record any information implied by
1789 taking a specific outgoing edge from BB. */
1791 static void
1792 record_edge_info (basic_block bb)
1794 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1795 struct edge_info *edge_info;
1797 if (! gsi_end_p (gsi))
1799 gimple stmt = gsi_stmt (gsi);
1800 location_t loc = gimple_location (stmt);
1802 if (gimple_code (stmt) == GIMPLE_SWITCH)
1804 tree index = gimple_switch_index (stmt);
1806 if (TREE_CODE (index) == SSA_NAME)
1808 int i;
1809 int n_labels = gimple_switch_num_labels (stmt);
1810 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1811 edge e;
1812 edge_iterator ei;
1814 for (i = 0; i < n_labels; i++)
1816 tree label = gimple_switch_label (stmt, i);
1817 basic_block target_bb = label_to_block (CASE_LABEL (label));
1818 if (CASE_HIGH (label)
1819 || !CASE_LOW (label)
1820 || info[target_bb->index])
1821 info[target_bb->index] = error_mark_node;
1822 else
1823 info[target_bb->index] = label;
1826 FOR_EACH_EDGE (e, ei, bb->succs)
1828 basic_block target_bb = e->dest;
1829 tree label = info[target_bb->index];
1831 if (label != NULL && label != error_mark_node)
1833 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1834 CASE_LOW (label));
1835 edge_info = allocate_edge_info (e);
1836 edge_info->lhs = index;
1837 edge_info->rhs = x;
1840 free (info);
1844 /* A COND_EXPR may create equivalences too. */
1845 if (gimple_code (stmt) == GIMPLE_COND)
1847 edge true_edge;
1848 edge false_edge;
1850 tree op0 = gimple_cond_lhs (stmt);
1851 tree op1 = gimple_cond_rhs (stmt);
1852 enum tree_code code = gimple_cond_code (stmt);
1854 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1856 /* Special case comparing booleans against a constant as we
1857 know the value of OP0 on both arms of the branch. i.e., we
1858 can record an equivalence for OP0 rather than COND. */
1859 if ((code == EQ_EXPR || code == NE_EXPR)
1860 && TREE_CODE (op0) == SSA_NAME
1861 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1862 && is_gimple_min_invariant (op1))
1864 if (code == EQ_EXPR)
1866 edge_info = allocate_edge_info (true_edge);
1867 edge_info->lhs = op0;
1868 edge_info->rhs = (integer_zerop (op1)
1869 ? boolean_false_node
1870 : boolean_true_node);
1872 edge_info = allocate_edge_info (false_edge);
1873 edge_info->lhs = op0;
1874 edge_info->rhs = (integer_zerop (op1)
1875 ? boolean_true_node
1876 : boolean_false_node);
1878 else
1880 edge_info = allocate_edge_info (true_edge);
1881 edge_info->lhs = op0;
1882 edge_info->rhs = (integer_zerop (op1)
1883 ? boolean_true_node
1884 : boolean_false_node);
1886 edge_info = allocate_edge_info (false_edge);
1887 edge_info->lhs = op0;
1888 edge_info->rhs = (integer_zerop (op1)
1889 ? boolean_false_node
1890 : boolean_true_node);
1893 else if (is_gimple_min_invariant (op0)
1894 && (TREE_CODE (op1) == SSA_NAME
1895 || is_gimple_min_invariant (op1)))
1897 tree cond = build2 (code, boolean_type_node, op0, op1);
1898 tree inverted = invert_truthvalue_loc (loc, cond);
1899 bool can_infer_simple_equiv
1900 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1901 && real_zerop (op0));
1902 struct edge_info *edge_info;
1904 edge_info = allocate_edge_info (true_edge);
1905 record_conditions (edge_info, cond, inverted);
1907 if (can_infer_simple_equiv && code == EQ_EXPR)
1909 edge_info->lhs = op1;
1910 edge_info->rhs = op0;
1913 edge_info = allocate_edge_info (false_edge);
1914 record_conditions (edge_info, inverted, cond);
1916 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1918 edge_info->lhs = op1;
1919 edge_info->rhs = op0;
1923 else if (TREE_CODE (op0) == SSA_NAME
1924 && (TREE_CODE (op1) == SSA_NAME
1925 || is_gimple_min_invariant (op1)))
1927 tree cond = build2 (code, boolean_type_node, op0, op1);
1928 tree inverted = invert_truthvalue_loc (loc, cond);
1929 bool can_infer_simple_equiv
1930 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1931 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1932 struct edge_info *edge_info;
1934 edge_info = allocate_edge_info (true_edge);
1935 record_conditions (edge_info, cond, inverted);
1937 if (can_infer_simple_equiv && code == EQ_EXPR)
1939 edge_info->lhs = op0;
1940 edge_info->rhs = op1;
1943 edge_info = allocate_edge_info (false_edge);
1944 record_conditions (edge_info, inverted, cond);
1946 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1948 edge_info->lhs = op0;
1949 edge_info->rhs = op1;
1954 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1958 void
1959 dom_opt_dom_walker::before_dom_children (basic_block bb)
1961 gimple_stmt_iterator gsi;
1963 if (dump_file && (dump_flags & TDF_DETAILS))
1964 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1966 /* Push a marker on the stacks of local information so that we know how
1967 far to unwind when we finalize this block. */
1968 avail_exprs_stack.safe_push (NULL);
1969 const_and_copies_stack.safe_push (NULL_TREE);
1971 record_equivalences_from_incoming_edge (bb);
1973 /* PHI nodes can create equivalences too. */
1974 record_equivalences_from_phis (bb);
1976 /* Create equivalences from redundant PHIs. PHIs are only truly
1977 redundant when they exist in the same block, so push another
1978 marker and unwind right afterwards. */
1979 avail_exprs_stack.safe_push (NULL);
1980 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1981 eliminate_redundant_computations (&gsi);
1982 remove_local_expressions_from_table ();
1984 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1985 optimize_stmt (bb, gsi);
1987 /* Now prepare to process dominated blocks. */
1988 record_edge_info (bb);
1989 cprop_into_successor_phis (bb);
1992 /* We have finished processing the dominator children of BB, perform
1993 any finalization actions in preparation for leaving this node in
1994 the dominator tree. */
1996 void
1997 dom_opt_dom_walker::after_dom_children (basic_block bb)
1999 gimple last;
2001 /* If we have an outgoing edge to a block with multiple incoming and
2002 outgoing edges, then we may be able to thread the edge, i.e., we
2003 may be able to statically determine which of the outgoing edges
2004 will be traversed when the incoming edge from BB is traversed. */
2005 if (single_succ_p (bb)
2006 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2007 && potentially_threadable_block (single_succ (bb)))
2009 thread_across_edge (single_succ_edge (bb));
2011 else if ((last = last_stmt (bb))
2012 && gimple_code (last) == GIMPLE_COND
2013 && EDGE_COUNT (bb->succs) == 2
2014 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2015 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2017 edge true_edge, false_edge;
2019 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2021 /* Only try to thread the edge if it reaches a target block with
2022 more than one predecessor and more than one successor. */
2023 if (potentially_threadable_block (true_edge->dest))
2024 thread_across_edge (true_edge);
2026 /* Similarly for the ELSE arm. */
2027 if (potentially_threadable_block (false_edge->dest))
2028 thread_across_edge (false_edge);
2032 /* These remove expressions local to BB from the tables. */
2033 remove_local_expressions_from_table ();
2034 restore_vars_to_original_value ();
2037 /* Search for redundant computations in STMT. If any are found, then
2038 replace them with the variable holding the result of the computation.
2040 If safe, record this expression into the available expression hash
2041 table. */
2043 static void
2044 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2046 tree expr_type;
2047 tree cached_lhs;
2048 tree def;
2049 bool insert = true;
2050 bool assigns_var_p = false;
2052 gimple stmt = gsi_stmt (*gsi);
2054 if (gimple_code (stmt) == GIMPLE_PHI)
2055 def = gimple_phi_result (stmt);
2056 else
2057 def = gimple_get_lhs (stmt);
2059 /* Certain expressions on the RHS can be optimized away, but can not
2060 themselves be entered into the hash tables. */
2061 if (! def
2062 || TREE_CODE (def) != SSA_NAME
2063 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2064 || gimple_vdef (stmt)
2065 /* Do not record equivalences for increments of ivs. This would create
2066 overlapping live ranges for a very questionable gain. */
2067 || simple_iv_increment_p (stmt))
2068 insert = false;
2070 /* Check if the expression has been computed before. */
2071 cached_lhs = lookup_avail_expr (stmt, insert);
2073 opt_stats.num_exprs_considered++;
2075 /* Get the type of the expression we are trying to optimize. */
2076 if (is_gimple_assign (stmt))
2078 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2079 assigns_var_p = true;
2081 else if (gimple_code (stmt) == GIMPLE_COND)
2082 expr_type = boolean_type_node;
2083 else if (is_gimple_call (stmt))
2085 gcc_assert (gimple_call_lhs (stmt));
2086 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2087 assigns_var_p = true;
2089 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2090 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2091 else if (gimple_code (stmt) == GIMPLE_PHI)
2092 /* We can't propagate into a phi, so the logic below doesn't apply.
2093 Instead record an equivalence between the cached LHS and the
2094 PHI result of this statement, provided they are in the same block.
2095 This should be sufficient to kill the redundant phi. */
2097 if (def && cached_lhs)
2098 record_const_or_copy (def, cached_lhs);
2099 return;
2101 else
2102 gcc_unreachable ();
2104 if (!cached_lhs)
2105 return;
2107 /* It is safe to ignore types here since we have already done
2108 type checking in the hashing and equality routines. In fact
2109 type checking here merely gets in the way of constant
2110 propagation. Also, make sure that it is safe to propagate
2111 CACHED_LHS into the expression in STMT. */
2112 if ((TREE_CODE (cached_lhs) != SSA_NAME
2113 && (assigns_var_p
2114 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2115 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2117 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2118 || is_gimple_min_invariant (cached_lhs));
2120 if (dump_file && (dump_flags & TDF_DETAILS))
2122 fprintf (dump_file, " Replaced redundant expr '");
2123 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2124 fprintf (dump_file, "' with '");
2125 print_generic_expr (dump_file, cached_lhs, dump_flags);
2126 fprintf (dump_file, "'\n");
2129 opt_stats.num_re++;
2131 if (assigns_var_p
2132 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2133 cached_lhs = fold_convert (expr_type, cached_lhs);
2135 propagate_tree_value_into_stmt (gsi, cached_lhs);
2137 /* Since it is always necessary to mark the result as modified,
2138 perhaps we should move this into propagate_tree_value_into_stmt
2139 itself. */
2140 gimple_set_modified (gsi_stmt (*gsi), true);
2144 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2145 the available expressions table or the const_and_copies table.
2146 Detect and record those equivalences. */
2147 /* We handle only very simple copy equivalences here. The heavy
2148 lifing is done by eliminate_redundant_computations. */
2150 static void
2151 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2153 tree lhs;
2154 enum tree_code lhs_code;
2156 gcc_assert (is_gimple_assign (stmt));
2158 lhs = gimple_assign_lhs (stmt);
2159 lhs_code = TREE_CODE (lhs);
2161 if (lhs_code == SSA_NAME
2162 && gimple_assign_single_p (stmt))
2164 tree rhs = gimple_assign_rhs1 (stmt);
2166 /* If the RHS of the assignment is a constant or another variable that
2167 may be propagated, register it in the CONST_AND_COPIES table. We
2168 do not need to record unwind data for this, since this is a true
2169 assignment and not an equivalence inferred from a comparison. All
2170 uses of this ssa name are dominated by this assignment, so unwinding
2171 just costs time and space. */
2172 if (may_optimize_p
2173 && (TREE_CODE (rhs) == SSA_NAME
2174 || is_gimple_min_invariant (rhs)))
2176 if (dump_file && (dump_flags & TDF_DETAILS))
2178 fprintf (dump_file, "==== ASGN ");
2179 print_generic_expr (dump_file, lhs, 0);
2180 fprintf (dump_file, " = ");
2181 print_generic_expr (dump_file, rhs, 0);
2182 fprintf (dump_file, "\n");
2185 set_ssa_name_value (lhs, rhs);
2189 /* A memory store, even an aliased store, creates a useful
2190 equivalence. By exchanging the LHS and RHS, creating suitable
2191 vops and recording the result in the available expression table,
2192 we may be able to expose more redundant loads. */
2193 if (!gimple_has_volatile_ops (stmt)
2194 && gimple_references_memory_p (stmt)
2195 && gimple_assign_single_p (stmt)
2196 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2197 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2198 && !is_gimple_reg (lhs))
2200 tree rhs = gimple_assign_rhs1 (stmt);
2201 gimple new_stmt;
2203 /* Build a new statement with the RHS and LHS exchanged. */
2204 if (TREE_CODE (rhs) == SSA_NAME)
2206 /* NOTE tuples. The call to gimple_build_assign below replaced
2207 a call to build_gimple_modify_stmt, which did not set the
2208 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2209 may cause an SSA validation failure, as the LHS may be a
2210 default-initialized name and should have no definition. I'm
2211 a bit dubious of this, as the artificial statement that we
2212 generate here may in fact be ill-formed, but it is simply
2213 used as an internal device in this pass, and never becomes
2214 part of the CFG. */
2215 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2216 new_stmt = gimple_build_assign (rhs, lhs);
2217 SSA_NAME_DEF_STMT (rhs) = defstmt;
2219 else
2220 new_stmt = gimple_build_assign (rhs, lhs);
2222 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2224 /* Finally enter the statement into the available expression
2225 table. */
2226 lookup_avail_expr (new_stmt, true);
2230 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2231 CONST_AND_COPIES. */
2233 static void
2234 cprop_operand (gimple stmt, use_operand_p op_p)
2236 tree val;
2237 tree op = USE_FROM_PTR (op_p);
2239 /* If the operand has a known constant value or it is known to be a
2240 copy of some other variable, use the value or copy stored in
2241 CONST_AND_COPIES. */
2242 val = SSA_NAME_VALUE (op);
2243 if (val && val != op)
2245 /* Do not replace hard register operands in asm statements. */
2246 if (gimple_code (stmt) == GIMPLE_ASM
2247 && !may_propagate_copy_into_asm (op))
2248 return;
2250 /* Certain operands are not allowed to be copy propagated due
2251 to their interaction with exception handling and some GCC
2252 extensions. */
2253 if (!may_propagate_copy (op, val))
2254 return;
2256 /* Do not propagate copies into simple IV increment statements.
2257 See PR23821 for how this can disturb IV analysis. */
2258 if (TREE_CODE (val) != INTEGER_CST
2259 && simple_iv_increment_p (stmt))
2260 return;
2262 /* Dump details. */
2263 if (dump_file && (dump_flags & TDF_DETAILS))
2265 fprintf (dump_file, " Replaced '");
2266 print_generic_expr (dump_file, op, dump_flags);
2267 fprintf (dump_file, "' with %s '",
2268 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2269 print_generic_expr (dump_file, val, dump_flags);
2270 fprintf (dump_file, "'\n");
2273 if (TREE_CODE (val) != SSA_NAME)
2274 opt_stats.num_const_prop++;
2275 else
2276 opt_stats.num_copy_prop++;
2278 propagate_value (op_p, val);
2280 /* And note that we modified this statement. This is now
2281 safe, even if we changed virtual operands since we will
2282 rescan the statement and rewrite its operands again. */
2283 gimple_set_modified (stmt, true);
2287 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2288 known value for that SSA_NAME (or NULL if no value is known).
2290 Propagate values from CONST_AND_COPIES into the uses, vuses and
2291 vdef_ops of STMT. */
2293 static void
2294 cprop_into_stmt (gimple stmt)
2296 use_operand_p op_p;
2297 ssa_op_iter iter;
2299 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2300 cprop_operand (stmt, op_p);
2303 /* Optimize the statement pointed to by iterator SI.
2305 We try to perform some simplistic global redundancy elimination and
2306 constant propagation:
2308 1- To detect global redundancy, we keep track of expressions that have
2309 been computed in this block and its dominators. If we find that the
2310 same expression is computed more than once, we eliminate repeated
2311 computations by using the target of the first one.
2313 2- Constant values and copy assignments. This is used to do very
2314 simplistic constant and copy propagation. When a constant or copy
2315 assignment is found, we map the value on the RHS of the assignment to
2316 the variable in the LHS in the CONST_AND_COPIES table. */
2318 static void
2319 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2321 gimple stmt, old_stmt;
2322 bool may_optimize_p;
2323 bool modified_p = false;
2325 old_stmt = stmt = gsi_stmt (si);
2327 if (dump_file && (dump_flags & TDF_DETAILS))
2329 fprintf (dump_file, "Optimizing statement ");
2330 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2333 if (gimple_code (stmt) == GIMPLE_COND)
2334 canonicalize_comparison (stmt);
2336 update_stmt_if_modified (stmt);
2337 opt_stats.num_stmts++;
2339 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2340 cprop_into_stmt (stmt);
2342 /* If the statement has been modified with constant replacements,
2343 fold its RHS before checking for redundant computations. */
2344 if (gimple_modified_p (stmt))
2346 tree rhs = NULL;
2348 /* Try to fold the statement making sure that STMT is kept
2349 up to date. */
2350 if (fold_stmt (&si))
2352 stmt = gsi_stmt (si);
2353 gimple_set_modified (stmt, true);
2355 if (dump_file && (dump_flags & TDF_DETAILS))
2357 fprintf (dump_file, " Folded to: ");
2358 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2362 /* We only need to consider cases that can yield a gimple operand. */
2363 if (gimple_assign_single_p (stmt))
2364 rhs = gimple_assign_rhs1 (stmt);
2365 else if (gimple_code (stmt) == GIMPLE_GOTO)
2366 rhs = gimple_goto_dest (stmt);
2367 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2368 /* This should never be an ADDR_EXPR. */
2369 rhs = gimple_switch_index (stmt);
2371 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2372 recompute_tree_invariant_for_addr_expr (rhs);
2374 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2375 even if fold_stmt updated the stmt already and thus cleared
2376 gimple_modified_p flag on it. */
2377 modified_p = true;
2380 /* Check for redundant computations. Do this optimization only
2381 for assignments that have no volatile ops and conditionals. */
2382 may_optimize_p = (!gimple_has_side_effects (stmt)
2383 && (is_gimple_assign (stmt)
2384 || (is_gimple_call (stmt)
2385 && gimple_call_lhs (stmt) != NULL_TREE)
2386 || gimple_code (stmt) == GIMPLE_COND
2387 || gimple_code (stmt) == GIMPLE_SWITCH));
2389 if (may_optimize_p)
2391 if (gimple_code (stmt) == GIMPLE_CALL)
2393 /* Resolve __builtin_constant_p. If it hasn't been
2394 folded to integer_one_node by now, it's fairly
2395 certain that the value simply isn't constant. */
2396 tree callee = gimple_call_fndecl (stmt);
2397 if (callee
2398 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2399 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2401 propagate_tree_value_into_stmt (&si, integer_zero_node);
2402 stmt = gsi_stmt (si);
2406 update_stmt_if_modified (stmt);
2407 eliminate_redundant_computations (&si);
2408 stmt = gsi_stmt (si);
2410 /* Perform simple redundant store elimination. */
2411 if (gimple_assign_single_p (stmt)
2412 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2414 tree lhs = gimple_assign_lhs (stmt);
2415 tree rhs = gimple_assign_rhs1 (stmt);
2416 tree cached_lhs;
2417 gimple new_stmt;
2418 if (TREE_CODE (rhs) == SSA_NAME)
2420 tree tem = SSA_NAME_VALUE (rhs);
2421 if (tem)
2422 rhs = tem;
2424 /* Build a new statement with the RHS and LHS exchanged. */
2425 if (TREE_CODE (rhs) == SSA_NAME)
2427 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2428 new_stmt = gimple_build_assign (rhs, lhs);
2429 SSA_NAME_DEF_STMT (rhs) = defstmt;
2431 else
2432 new_stmt = gimple_build_assign (rhs, lhs);
2433 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2434 cached_lhs = lookup_avail_expr (new_stmt, false);
2435 if (cached_lhs
2436 && rhs == cached_lhs)
2438 basic_block bb = gimple_bb (stmt);
2439 unlink_stmt_vdef (stmt);
2440 if (gsi_remove (&si, true))
2442 bitmap_set_bit (need_eh_cleanup, bb->index);
2443 if (dump_file && (dump_flags & TDF_DETAILS))
2444 fprintf (dump_file, " Flagged to clear EH edges.\n");
2446 release_defs (stmt);
2447 return;
2452 /* Record any additional equivalences created by this statement. */
2453 if (is_gimple_assign (stmt))
2454 record_equivalences_from_stmt (stmt, may_optimize_p);
2456 /* If STMT is a COND_EXPR and it was modified, then we may know
2457 where it goes. If that is the case, then mark the CFG as altered.
2459 This will cause us to later call remove_unreachable_blocks and
2460 cleanup_tree_cfg when it is safe to do so. It is not safe to
2461 clean things up here since removal of edges and such can trigger
2462 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2463 the manager.
2465 That's all fine and good, except that once SSA_NAMEs are released
2466 to the manager, we must not call create_ssa_name until all references
2467 to released SSA_NAMEs have been eliminated.
2469 All references to the deleted SSA_NAMEs can not be eliminated until
2470 we remove unreachable blocks.
2472 We can not remove unreachable blocks until after we have completed
2473 any queued jump threading.
2475 We can not complete any queued jump threads until we have taken
2476 appropriate variables out of SSA form. Taking variables out of
2477 SSA form can call create_ssa_name and thus we lose.
2479 Ultimately I suspect we're going to need to change the interface
2480 into the SSA_NAME manager. */
2481 if (gimple_modified_p (stmt) || modified_p)
2483 tree val = NULL;
2485 update_stmt_if_modified (stmt);
2487 if (gimple_code (stmt) == GIMPLE_COND)
2488 val = fold_binary_loc (gimple_location (stmt),
2489 gimple_cond_code (stmt), boolean_type_node,
2490 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2491 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2492 val = gimple_switch_index (stmt);
2494 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2495 cfg_altered = true;
2497 /* If we simplified a statement in such a way as to be shown that it
2498 cannot trap, update the eh information and the cfg to match. */
2499 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2501 bitmap_set_bit (need_eh_cleanup, bb->index);
2502 if (dump_file && (dump_flags & TDF_DETAILS))
2503 fprintf (dump_file, " Flagged to clear EH edges.\n");
2508 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2509 If found, return its LHS. Otherwise insert STMT in the table and
2510 return NULL_TREE.
2512 Also, when an expression is first inserted in the table, it is also
2513 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2514 we finish processing this block and its children. */
2516 static tree
2517 lookup_avail_expr (gimple stmt, bool insert)
2519 expr_hash_elt **slot;
2520 tree lhs;
2521 tree temp;
2522 struct expr_hash_elt element;
2524 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2525 if (gimple_code (stmt) == GIMPLE_PHI)
2526 lhs = gimple_phi_result (stmt);
2527 else
2528 lhs = gimple_get_lhs (stmt);
2530 initialize_hash_element (stmt, lhs, &element);
2532 if (dump_file && (dump_flags & TDF_DETAILS))
2534 fprintf (dump_file, "LKUP ");
2535 print_expr_hash_elt (dump_file, &element);
2538 /* Don't bother remembering constant assignments and copy operations.
2539 Constants and copy operations are handled by the constant/copy propagator
2540 in optimize_stmt. */
2541 if (element.expr.kind == EXPR_SINGLE
2542 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2543 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2544 return NULL_TREE;
2546 /* Finally try to find the expression in the main expression hash table. */
2547 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2548 if (slot == NULL)
2550 free_expr_hash_elt_contents (&element);
2551 return NULL_TREE;
2553 else if (*slot == NULL)
2555 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2556 *element2 = element;
2557 element2->stamp = element2;
2558 *slot = element2;
2560 if (dump_file && (dump_flags & TDF_DETAILS))
2562 fprintf (dump_file, "2>>> ");
2563 print_expr_hash_elt (dump_file, element2);
2566 avail_exprs_stack.safe_push (element2);
2567 return NULL_TREE;
2569 else
2570 free_expr_hash_elt_contents (&element);
2572 /* Extract the LHS of the assignment so that it can be used as the current
2573 definition of another variable. */
2574 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2576 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2577 use the value from the const_and_copies table. */
2578 if (TREE_CODE (lhs) == SSA_NAME)
2580 temp = SSA_NAME_VALUE (lhs);
2581 if (temp)
2582 lhs = temp;
2585 if (dump_file && (dump_flags & TDF_DETAILS))
2587 fprintf (dump_file, "FIND: ");
2588 print_generic_expr (dump_file, lhs, 0);
2589 fprintf (dump_file, "\n");
2592 return lhs;
2595 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2596 for expressions using the code of the expression and the SSA numbers of
2597 its operands. */
2599 static hashval_t
2600 avail_expr_hash (const void *p)
2602 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2603 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2604 tree vuse;
2605 inchash::hash hstate;
2607 inchash::add_hashable_expr (expr, hstate);
2609 /* If the hash table entry is not associated with a statement, then we
2610 can just hash the expression and not worry about virtual operands
2611 and such. */
2612 if (!stmt)
2613 return hstate.end ();
2615 /* Add the SSA version numbers of the vuse operand. This is important
2616 because compound variables like arrays are not renamed in the
2617 operands. Rather, the rename is done on the virtual variable
2618 representing all the elements of the array. */
2619 if ((vuse = gimple_vuse (stmt)))
2620 inchash::add_expr (vuse, hstate);
2622 return hstate.end ();
2625 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2626 up degenerate PHIs created by or exposed by jump threading. */
2628 /* Given a statement STMT, which is either a PHI node or an assignment,
2629 remove it from the IL. */
2631 static void
2632 remove_stmt_or_phi (gimple stmt)
2634 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2636 if (gimple_code (stmt) == GIMPLE_PHI)
2637 remove_phi_node (&gsi, true);
2638 else
2640 gsi_remove (&gsi, true);
2641 release_defs (stmt);
2645 /* Given a statement STMT, which is either a PHI node or an assignment,
2646 return the "rhs" of the node, in the case of a non-degenerate
2647 phi, NULL is returned. */
2649 static tree
2650 get_rhs_or_phi_arg (gimple stmt)
2652 if (gimple_code (stmt) == GIMPLE_PHI)
2653 return degenerate_phi_result (stmt);
2654 else if (gimple_assign_single_p (stmt))
2655 return gimple_assign_rhs1 (stmt);
2656 else
2657 gcc_unreachable ();
2661 /* Given a statement STMT, which is either a PHI node or an assignment,
2662 return the "lhs" of the node. */
2664 static tree
2665 get_lhs_or_phi_result (gimple stmt)
2667 if (gimple_code (stmt) == GIMPLE_PHI)
2668 return gimple_phi_result (stmt);
2669 else if (is_gimple_assign (stmt))
2670 return gimple_assign_lhs (stmt);
2671 else
2672 gcc_unreachable ();
2675 /* Propagate RHS into all uses of LHS (when possible).
2677 RHS and LHS are derived from STMT, which is passed in solely so
2678 that we can remove it if propagation is successful.
2680 When propagating into a PHI node or into a statement which turns
2681 into a trivial copy or constant initialization, set the
2682 appropriate bit in INTERESTING_NAMEs so that we will visit those
2683 nodes as well in an effort to pick up secondary optimization
2684 opportunities. */
2686 static void
2687 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2689 /* First verify that propagation is valid. */
2690 if (may_propagate_copy (lhs, rhs))
2692 use_operand_p use_p;
2693 imm_use_iterator iter;
2694 gimple use_stmt;
2695 bool all = true;
2697 /* Dump details. */
2698 if (dump_file && (dump_flags & TDF_DETAILS))
2700 fprintf (dump_file, " Replacing '");
2701 print_generic_expr (dump_file, lhs, dump_flags);
2702 fprintf (dump_file, "' with %s '",
2703 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2704 print_generic_expr (dump_file, rhs, dump_flags);
2705 fprintf (dump_file, "'\n");
2708 /* Walk over every use of LHS and try to replace the use with RHS.
2709 At this point the only reason why such a propagation would not
2710 be successful would be if the use occurs in an ASM_EXPR. */
2711 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2713 /* Leave debug stmts alone. If we succeed in propagating
2714 all non-debug uses, we'll drop the DEF, and propagation
2715 into debug stmts will occur then. */
2716 if (gimple_debug_bind_p (use_stmt))
2717 continue;
2719 /* It's not always safe to propagate into an ASM_EXPR. */
2720 if (gimple_code (use_stmt) == GIMPLE_ASM
2721 && ! may_propagate_copy_into_asm (lhs))
2723 all = false;
2724 continue;
2727 /* It's not ok to propagate into the definition stmt of RHS.
2728 <bb 9>:
2729 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2730 g_67.1_6 = prephitmp.12_36;
2731 goto <bb 9>;
2732 While this is strictly all dead code we do not want to
2733 deal with this here. */
2734 if (TREE_CODE (rhs) == SSA_NAME
2735 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2737 all = false;
2738 continue;
2741 /* Dump details. */
2742 if (dump_file && (dump_flags & TDF_DETAILS))
2744 fprintf (dump_file, " Original statement:");
2745 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2748 /* Propagate the RHS into this use of the LHS. */
2749 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2750 propagate_value (use_p, rhs);
2752 /* Special cases to avoid useless calls into the folding
2753 routines, operand scanning, etc.
2755 Propagation into a PHI may cause the PHI to become
2756 a degenerate, so mark the PHI as interesting. No other
2757 actions are necessary. */
2758 if (gimple_code (use_stmt) == GIMPLE_PHI)
2760 tree result;
2762 /* Dump details. */
2763 if (dump_file && (dump_flags & TDF_DETAILS))
2765 fprintf (dump_file, " Updated statement:");
2766 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2769 result = get_lhs_or_phi_result (use_stmt);
2770 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2771 continue;
2774 /* From this point onward we are propagating into a
2775 real statement. Folding may (or may not) be possible,
2776 we may expose new operands, expose dead EH edges,
2777 etc. */
2778 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2779 cannot fold a call that simplifies to a constant,
2780 because the GIMPLE_CALL must be replaced by a
2781 GIMPLE_ASSIGN, and there is no way to effect such a
2782 transformation in-place. We might want to consider
2783 using the more general fold_stmt here. */
2785 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2786 fold_stmt_inplace (&gsi);
2789 /* Sometimes propagation can expose new operands to the
2790 renamer. */
2791 update_stmt (use_stmt);
2793 /* Dump details. */
2794 if (dump_file && (dump_flags & TDF_DETAILS))
2796 fprintf (dump_file, " Updated statement:");
2797 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2800 /* If we replaced a variable index with a constant, then
2801 we would need to update the invariant flag for ADDR_EXPRs. */
2802 if (gimple_assign_single_p (use_stmt)
2803 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2804 recompute_tree_invariant_for_addr_expr
2805 (gimple_assign_rhs1 (use_stmt));
2807 /* If we cleaned up EH information from the statement,
2808 mark its containing block as needing EH cleanups. */
2809 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2811 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2812 if (dump_file && (dump_flags & TDF_DETAILS))
2813 fprintf (dump_file, " Flagged to clear EH edges.\n");
2816 /* Propagation may expose new trivial copy/constant propagation
2817 opportunities. */
2818 if (gimple_assign_single_p (use_stmt)
2819 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2820 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2821 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2823 tree result = get_lhs_or_phi_result (use_stmt);
2824 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2827 /* Propagation into these nodes may make certain edges in
2828 the CFG unexecutable. We want to identify them as PHI nodes
2829 at the destination of those unexecutable edges may become
2830 degenerates. */
2831 else if (gimple_code (use_stmt) == GIMPLE_COND
2832 || gimple_code (use_stmt) == GIMPLE_SWITCH
2833 || gimple_code (use_stmt) == GIMPLE_GOTO)
2835 tree val;
2837 if (gimple_code (use_stmt) == GIMPLE_COND)
2838 val = fold_binary_loc (gimple_location (use_stmt),
2839 gimple_cond_code (use_stmt),
2840 boolean_type_node,
2841 gimple_cond_lhs (use_stmt),
2842 gimple_cond_rhs (use_stmt));
2843 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2844 val = gimple_switch_index (use_stmt);
2845 else
2846 val = gimple_goto_dest (use_stmt);
2848 if (val && is_gimple_min_invariant (val))
2850 basic_block bb = gimple_bb (use_stmt);
2851 edge te = find_taken_edge (bb, val);
2852 edge_iterator ei;
2853 edge e;
2854 gimple_stmt_iterator gsi, psi;
2856 /* Remove all outgoing edges except TE. */
2857 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2859 if (e != te)
2861 /* Mark all the PHI nodes at the destination of
2862 the unexecutable edge as interesting. */
2863 for (psi = gsi_start_phis (e->dest);
2864 !gsi_end_p (psi);
2865 gsi_next (&psi))
2867 gimple phi = gsi_stmt (psi);
2869 tree result = gimple_phi_result (phi);
2870 int version = SSA_NAME_VERSION (result);
2872 bitmap_set_bit (interesting_names, version);
2875 te->probability += e->probability;
2877 te->count += e->count;
2878 remove_edge (e);
2879 cfg_altered = true;
2881 else
2882 ei_next (&ei);
2885 gsi = gsi_last_bb (gimple_bb (use_stmt));
2886 gsi_remove (&gsi, true);
2888 /* And fixup the flags on the single remaining edge. */
2889 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2890 te->flags &= ~EDGE_ABNORMAL;
2891 te->flags |= EDGE_FALLTHRU;
2892 if (te->probability > REG_BR_PROB_BASE)
2893 te->probability = REG_BR_PROB_BASE;
2898 /* Ensure there is nothing else to do. */
2899 gcc_assert (!all || has_zero_uses (lhs));
2901 /* If we were able to propagate away all uses of LHS, then
2902 we can remove STMT. */
2903 if (all)
2904 remove_stmt_or_phi (stmt);
2908 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2909 a statement that is a trivial copy or constant initialization.
2911 Attempt to eliminate T by propagating its RHS into all uses of
2912 its LHS. This may in turn set new bits in INTERESTING_NAMES
2913 for nodes we want to revisit later.
2915 All exit paths should clear INTERESTING_NAMES for the result
2916 of STMT. */
2918 static void
2919 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2921 tree lhs = get_lhs_or_phi_result (stmt);
2922 tree rhs;
2923 int version = SSA_NAME_VERSION (lhs);
2925 /* If the LHS of this statement or PHI has no uses, then we can
2926 just eliminate it. This can occur if, for example, the PHI
2927 was created by block duplication due to threading and its only
2928 use was in the conditional at the end of the block which was
2929 deleted. */
2930 if (has_zero_uses (lhs))
2932 bitmap_clear_bit (interesting_names, version);
2933 remove_stmt_or_phi (stmt);
2934 return;
2937 /* Get the RHS of the assignment or PHI node if the PHI is a
2938 degenerate. */
2939 rhs = get_rhs_or_phi_arg (stmt);
2940 if (!rhs)
2942 bitmap_clear_bit (interesting_names, version);
2943 return;
2946 if (!virtual_operand_p (lhs))
2947 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2948 else
2950 gimple use_stmt;
2951 imm_use_iterator iter;
2952 use_operand_p use_p;
2953 /* For virtual operands we have to propagate into all uses as
2954 otherwise we will create overlapping life-ranges. */
2955 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2956 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2957 SET_USE (use_p, rhs);
2958 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2959 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2960 remove_stmt_or_phi (stmt);
2963 /* Note that STMT may well have been deleted by now, so do
2964 not access it, instead use the saved version # to clear
2965 T's entry in the worklist. */
2966 bitmap_clear_bit (interesting_names, version);
2969 /* The first phase in degenerate PHI elimination.
2971 Eliminate the degenerate PHIs in BB, then recurse on the
2972 dominator children of BB. */
2974 static void
2975 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2977 gimple_stmt_iterator gsi;
2978 basic_block son;
2980 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2982 gimple phi = gsi_stmt (gsi);
2984 eliminate_const_or_copy (phi, interesting_names);
2987 /* Recurse into the dominator children of BB. */
2988 for (son = first_dom_son (CDI_DOMINATORS, bb);
2989 son;
2990 son = next_dom_son (CDI_DOMINATORS, son))
2991 eliminate_degenerate_phis_1 (son, interesting_names);
2995 /* A very simple pass to eliminate degenerate PHI nodes from the
2996 IL. This is meant to be fast enough to be able to be run several
2997 times in the optimization pipeline.
2999 Certain optimizations, particularly those which duplicate blocks
3000 or remove edges from the CFG can create or expose PHIs which are
3001 trivial copies or constant initializations.
3003 While we could pick up these optimizations in DOM or with the
3004 combination of copy-prop and CCP, those solutions are far too
3005 heavy-weight for our needs.
3007 This implementation has two phases so that we can efficiently
3008 eliminate the first order degenerate PHIs and second order
3009 degenerate PHIs.
3011 The first phase performs a dominator walk to identify and eliminate
3012 the vast majority of the degenerate PHIs. When a degenerate PHI
3013 is identified and eliminated any affected statements or PHIs
3014 are put on a worklist.
3016 The second phase eliminates degenerate PHIs and trivial copies
3017 or constant initializations using the worklist. This is how we
3018 pick up the secondary optimization opportunities with minimal
3019 cost. */
3021 namespace {
3023 const pass_data pass_data_phi_only_cprop =
3025 GIMPLE_PASS, /* type */
3026 "phicprop", /* name */
3027 OPTGROUP_NONE, /* optinfo_flags */
3028 TV_TREE_PHI_CPROP, /* tv_id */
3029 ( PROP_cfg | PROP_ssa ), /* properties_required */
3030 0, /* properties_provided */
3031 0, /* properties_destroyed */
3032 0, /* todo_flags_start */
3033 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3036 class pass_phi_only_cprop : public gimple_opt_pass
3038 public:
3039 pass_phi_only_cprop (gcc::context *ctxt)
3040 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3043 /* opt_pass methods: */
3044 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3045 virtual bool gate (function *) { return flag_tree_dom != 0; }
3046 virtual unsigned int execute (function *);
3048 }; // class pass_phi_only_cprop
3050 unsigned int
3051 pass_phi_only_cprop::execute (function *fun)
3053 bitmap interesting_names;
3054 bitmap interesting_names1;
3056 /* Bitmap of blocks which need EH information updated. We can not
3057 update it on-the-fly as doing so invalidates the dominator tree. */
3058 need_eh_cleanup = BITMAP_ALLOC (NULL);
3060 /* INTERESTING_NAMES is effectively our worklist, indexed by
3061 SSA_NAME_VERSION.
3063 A set bit indicates that the statement or PHI node which
3064 defines the SSA_NAME should be (re)examined to determine if
3065 it has become a degenerate PHI or trivial const/copy propagation
3066 opportunity.
3068 Experiments have show we generally get better compilation
3069 time behavior with bitmaps rather than sbitmaps. */
3070 interesting_names = BITMAP_ALLOC (NULL);
3071 interesting_names1 = BITMAP_ALLOC (NULL);
3073 calculate_dominance_info (CDI_DOMINATORS);
3074 cfg_altered = false;
3076 /* First phase. Eliminate degenerate PHIs via a dominator
3077 walk of the CFG.
3079 Experiments have indicated that we generally get better
3080 compile-time behavior by visiting blocks in the first
3081 phase in dominator order. Presumably this is because walking
3082 in dominator order leaves fewer PHIs for later examination
3083 by the worklist phase. */
3084 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3085 interesting_names);
3087 /* Second phase. Eliminate second order degenerate PHIs as well
3088 as trivial copies or constant initializations identified by
3089 the first phase or this phase. Basically we keep iterating
3090 until our set of INTERESTING_NAMEs is empty. */
3091 while (!bitmap_empty_p (interesting_names))
3093 unsigned int i;
3094 bitmap_iterator bi;
3096 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3097 changed during the loop. Copy it to another bitmap and
3098 use that. */
3099 bitmap_copy (interesting_names1, interesting_names);
3101 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3103 tree name = ssa_name (i);
3105 /* Ignore SSA_NAMEs that have been released because
3106 their defining statement was deleted (unreachable). */
3107 if (name)
3108 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3109 interesting_names);
3113 if (cfg_altered)
3115 free_dominance_info (CDI_DOMINATORS);
3116 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3117 loops_state_set (LOOPS_NEED_FIXUP);
3120 /* Propagation of const and copies may make some EH edges dead. Purge
3121 such edges from the CFG as needed. */
3122 if (!bitmap_empty_p (need_eh_cleanup))
3124 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3125 BITMAP_FREE (need_eh_cleanup);
3128 BITMAP_FREE (interesting_names);
3129 BITMAP_FREE (interesting_names1);
3130 return 0;
3133 } // anon namespace
3135 gimple_opt_pass *
3136 make_pass_phi_only_cprop (gcc::context *ctxt)
3138 return new pass_phi_only_cprop (ctxt);