PR preprocessor/63831
[official-gcc.git] / gcc / tree-ssa-dom.c
blob7842b79b770994b011815e0c125518c49992fce6
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "predict.h"
31 #include "vec.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "machmode.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "dominance.h"
39 #include "cfg.h"
40 #include "cfganal.h"
41 #include "basic-block.h"
42 #include "cfgloop.h"
43 #include "inchash.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-fold.h"
48 #include "tree-eh.h"
49 #include "gimple-expr.h"
50 #include "is-a.h"
51 #include "gimple.h"
52 #include "gimple-iterator.h"
53 #include "gimple-ssa.h"
54 #include "tree-cfg.h"
55 #include "tree-phinodes.h"
56 #include "ssa-iterators.h"
57 #include "stringpool.h"
58 #include "tree-ssanames.h"
59 #include "tree-into-ssa.h"
60 #include "domwalk.h"
61 #include "tree-pass.h"
62 #include "tree-ssa-propagate.h"
63 #include "tree-ssa-threadupdate.h"
64 #include "langhooks.h"
65 #include "params.h"
66 #include "tree-ssa-threadedge.h"
67 #include "tree-ssa-dom.h"
68 #include "inchash.h"
69 #include "gimplify.h"
71 /* This file implements optimizations on the dominator tree. */
73 /* Representation of a "naked" right-hand-side expression, to be used
74 in recording available expressions in the expression hash table. */
76 enum expr_kind
78 EXPR_SINGLE,
79 EXPR_UNARY,
80 EXPR_BINARY,
81 EXPR_TERNARY,
82 EXPR_CALL,
83 EXPR_PHI
86 struct hashable_expr
88 tree type;
89 enum expr_kind kind;
90 union {
91 struct { tree rhs; } single;
92 struct { enum tree_code op; tree opnd; } unary;
93 struct { enum tree_code op; tree opnd0, opnd1; } binary;
94 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
95 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
96 struct { size_t nargs; tree *args; } phi;
97 } ops;
100 /* Structure for recording known values of a conditional expression
101 at the exits from its block. */
103 typedef struct cond_equivalence_s
105 struct hashable_expr cond;
106 tree value;
107 } cond_equivalence;
110 /* Structure for recording edge equivalences as well as any pending
111 edge redirections during the dominator optimizer.
113 Computing and storing the edge equivalences instead of creating
114 them on-demand can save significant amounts of time, particularly
115 for pathological cases involving switch statements.
117 These structures live for a single iteration of the dominator
118 optimizer in the edge's AUX field. At the end of an iteration we
119 free each of these structures and update the AUX field to point
120 to any requested redirection target (the code for updating the
121 CFG and SSA graph for edge redirection expects redirection edge
122 targets to be in the AUX field for each edge. */
124 struct edge_info
126 /* If this edge creates a simple equivalence, the LHS and RHS of
127 the equivalence will be stored here. */
128 tree lhs;
129 tree rhs;
131 /* Traversing an edge may also indicate one or more particular conditions
132 are true or false. */
133 vec<cond_equivalence> cond_equivalences;
136 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
137 expressions it enters into the hash table along with a marker entry
138 (null). When we finish processing the block, we pop off entries and
139 remove the expressions from the global hash table until we hit the
140 marker. */
141 typedef struct expr_hash_elt * expr_hash_elt_t;
143 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
145 /* Structure for entries in the expression hash table. */
147 struct expr_hash_elt
149 /* The value (lhs) of this expression. */
150 tree lhs;
152 /* The expression (rhs) we want to record. */
153 struct hashable_expr expr;
155 /* The virtual operand associated with the nearest dominating stmt
156 loading from or storing to expr. */
157 tree vop;
159 /* The hash value for RHS. */
160 hashval_t hash;
162 /* A unique stamp, typically the address of the hash
163 element itself, used in removing entries from the table. */
164 struct expr_hash_elt *stamp;
167 /* Hashtable helpers. */
169 static bool hashable_expr_equal_p (const struct hashable_expr *,
170 const struct hashable_expr *);
171 static void free_expr_hash_elt (void *);
173 struct expr_elt_hasher
175 typedef expr_hash_elt *value_type;
176 typedef expr_hash_elt *compare_type;
177 typedef int store_values_directly;
178 static inline hashval_t hash (const value_type &);
179 static inline bool equal (const value_type &, const compare_type &);
180 static inline void remove (value_type &);
183 inline hashval_t
184 expr_elt_hasher::hash (const value_type &p)
186 return p->hash;
189 inline bool
190 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
192 const struct hashable_expr *expr1 = &p1->expr;
193 const struct expr_hash_elt *stamp1 = p1->stamp;
194 const struct hashable_expr *expr2 = &p2->expr;
195 const struct expr_hash_elt *stamp2 = p2->stamp;
197 /* This case should apply only when removing entries from the table. */
198 if (stamp1 == stamp2)
199 return true;
201 if (p1->hash != p2->hash)
202 return false;
204 /* In case of a collision, both RHS have to be identical and have the
205 same VUSE operands. */
206 if (hashable_expr_equal_p (expr1, expr2)
207 && types_compatible_p (expr1->type, expr2->type))
208 return true;
210 return false;
213 /* Delete an expr_hash_elt and reclaim its storage. */
215 inline void
216 expr_elt_hasher::remove (value_type &element)
218 free_expr_hash_elt (element);
221 /* Hash table with expressions made available during the renaming process.
222 When an assignment of the form X_i = EXPR is found, the statement is
223 stored in this table. If the same expression EXPR is later found on the
224 RHS of another statement, it is replaced with X_i (thus performing
225 global redundancy elimination). Similarly as we pass through conditionals
226 we record the conditional itself as having either a true or false value
227 in this table. */
228 static hash_table<expr_elt_hasher> *avail_exprs;
230 /* Stack of dest,src pairs that need to be restored during finalization.
232 A NULL entry is used to mark the end of pairs which need to be
233 restored during finalization of this block. */
234 static vec<tree> const_and_copies_stack;
236 /* Track whether or not we have changed the control flow graph. */
237 static bool cfg_altered;
239 /* Bitmap of blocks that have had EH statements cleaned. We should
240 remove their dead edges eventually. */
241 static bitmap need_eh_cleanup;
243 /* Statistics for dominator optimizations. */
244 struct opt_stats_d
246 long num_stmts;
247 long num_exprs_considered;
248 long num_re;
249 long num_const_prop;
250 long num_copy_prop;
253 static struct opt_stats_d opt_stats;
255 /* Local functions. */
256 static void optimize_stmt (basic_block, gimple_stmt_iterator);
257 static tree lookup_avail_expr (gimple, bool);
258 static hashval_t avail_expr_hash (const void *);
259 static void htab_statistics (FILE *,
260 const hash_table<expr_elt_hasher> &);
261 static void record_cond (cond_equivalence *);
262 static void record_const_or_copy (tree, tree);
263 static void record_equality (tree, tree);
264 static void record_equivalences_from_phis (basic_block);
265 static void record_equivalences_from_incoming_edge (basic_block);
266 static void eliminate_redundant_computations (gimple_stmt_iterator *);
267 static void record_equivalences_from_stmt (gimple, int);
268 static void remove_local_expressions_from_table (void);
269 static void restore_vars_to_original_value (void);
270 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
273 /* Given a statement STMT, initialize the hash table element pointed to
274 by ELEMENT. */
276 static void
277 initialize_hash_element (gimple stmt, tree lhs,
278 struct expr_hash_elt *element)
280 enum gimple_code code = gimple_code (stmt);
281 struct hashable_expr *expr = &element->expr;
283 if (code == GIMPLE_ASSIGN)
285 enum tree_code subcode = gimple_assign_rhs_code (stmt);
287 switch (get_gimple_rhs_class (subcode))
289 case GIMPLE_SINGLE_RHS:
290 expr->kind = EXPR_SINGLE;
291 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
292 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
293 break;
294 case GIMPLE_UNARY_RHS:
295 expr->kind = EXPR_UNARY;
296 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
297 if (CONVERT_EXPR_CODE_P (subcode))
298 subcode = NOP_EXPR;
299 expr->ops.unary.op = subcode;
300 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
301 break;
302 case GIMPLE_BINARY_RHS:
303 expr->kind = EXPR_BINARY;
304 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
305 expr->ops.binary.op = subcode;
306 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
307 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
308 break;
309 case GIMPLE_TERNARY_RHS:
310 expr->kind = EXPR_TERNARY;
311 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
312 expr->ops.ternary.op = subcode;
313 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
314 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
315 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
316 break;
317 default:
318 gcc_unreachable ();
321 else if (code == GIMPLE_COND)
323 expr->type = boolean_type_node;
324 expr->kind = EXPR_BINARY;
325 expr->ops.binary.op = gimple_cond_code (stmt);
326 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
327 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
329 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
331 size_t nargs = gimple_call_num_args (call_stmt);
332 size_t i;
334 gcc_assert (gimple_call_lhs (call_stmt));
336 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
337 expr->kind = EXPR_CALL;
338 expr->ops.call.fn_from = call_stmt;
340 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
341 expr->ops.call.pure = true;
342 else
343 expr->ops.call.pure = false;
345 expr->ops.call.nargs = nargs;
346 expr->ops.call.args = XCNEWVEC (tree, nargs);
347 for (i = 0; i < nargs; i++)
348 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
350 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
352 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
353 expr->kind = EXPR_SINGLE;
354 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
356 else if (code == GIMPLE_GOTO)
358 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
359 expr->kind = EXPR_SINGLE;
360 expr->ops.single.rhs = gimple_goto_dest (stmt);
362 else if (code == GIMPLE_PHI)
364 size_t nargs = gimple_phi_num_args (stmt);
365 size_t i;
367 expr->type = TREE_TYPE (gimple_phi_result (stmt));
368 expr->kind = EXPR_PHI;
369 expr->ops.phi.nargs = nargs;
370 expr->ops.phi.args = XCNEWVEC (tree, nargs);
372 for (i = 0; i < nargs; i++)
373 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
375 else
376 gcc_unreachable ();
378 element->lhs = lhs;
379 element->vop = gimple_vuse (stmt);
380 element->hash = avail_expr_hash (element);
381 element->stamp = element;
384 /* Given a conditional expression COND as a tree, initialize
385 a hashable_expr expression EXPR. The conditional must be a
386 comparison or logical negation. A constant or a variable is
387 not permitted. */
389 static void
390 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
392 expr->type = boolean_type_node;
394 if (COMPARISON_CLASS_P (cond))
396 expr->kind = EXPR_BINARY;
397 expr->ops.binary.op = TREE_CODE (cond);
398 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
399 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
401 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
403 expr->kind = EXPR_UNARY;
404 expr->ops.unary.op = TRUTH_NOT_EXPR;
405 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
407 else
408 gcc_unreachable ();
411 /* Given a hashable_expr expression EXPR and an LHS,
412 initialize the hash table element pointed to by ELEMENT. */
414 static void
415 initialize_hash_element_from_expr (struct hashable_expr *expr,
416 tree lhs,
417 struct expr_hash_elt *element)
419 element->expr = *expr;
420 element->lhs = lhs;
421 element->vop = NULL_TREE;
422 element->hash = avail_expr_hash (element);
423 element->stamp = element;
426 /* Compare two hashable_expr structures for equivalence.
427 They are considered equivalent when the the expressions
428 they denote must necessarily be equal. The logic is intended
429 to follow that of operand_equal_p in fold-const.c */
431 static bool
432 hashable_expr_equal_p (const struct hashable_expr *expr0,
433 const struct hashable_expr *expr1)
435 tree type0 = expr0->type;
436 tree type1 = expr1->type;
438 /* If either type is NULL, there is nothing to check. */
439 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
440 return false;
442 /* If both types don't have the same signedness, precision, and mode,
443 then we can't consider them equal. */
444 if (type0 != type1
445 && (TREE_CODE (type0) == ERROR_MARK
446 || TREE_CODE (type1) == ERROR_MARK
447 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
448 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
449 || TYPE_MODE (type0) != TYPE_MODE (type1)))
450 return false;
452 if (expr0->kind != expr1->kind)
453 return false;
455 switch (expr0->kind)
457 case EXPR_SINGLE:
458 return operand_equal_p (expr0->ops.single.rhs,
459 expr1->ops.single.rhs, 0);
461 case EXPR_UNARY:
462 if (expr0->ops.unary.op != expr1->ops.unary.op)
463 return false;
465 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
466 || expr0->ops.unary.op == NON_LVALUE_EXPR)
467 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
468 return false;
470 return operand_equal_p (expr0->ops.unary.opnd,
471 expr1->ops.unary.opnd, 0);
473 case EXPR_BINARY:
474 if (expr0->ops.binary.op != expr1->ops.binary.op)
475 return false;
477 if (operand_equal_p (expr0->ops.binary.opnd0,
478 expr1->ops.binary.opnd0, 0)
479 && operand_equal_p (expr0->ops.binary.opnd1,
480 expr1->ops.binary.opnd1, 0))
481 return true;
483 /* For commutative ops, allow the other order. */
484 return (commutative_tree_code (expr0->ops.binary.op)
485 && operand_equal_p (expr0->ops.binary.opnd0,
486 expr1->ops.binary.opnd1, 0)
487 && operand_equal_p (expr0->ops.binary.opnd1,
488 expr1->ops.binary.opnd0, 0));
490 case EXPR_TERNARY:
491 if (expr0->ops.ternary.op != expr1->ops.ternary.op
492 || !operand_equal_p (expr0->ops.ternary.opnd2,
493 expr1->ops.ternary.opnd2, 0))
494 return false;
496 if (operand_equal_p (expr0->ops.ternary.opnd0,
497 expr1->ops.ternary.opnd0, 0)
498 && operand_equal_p (expr0->ops.ternary.opnd1,
499 expr1->ops.ternary.opnd1, 0))
500 return true;
502 /* For commutative ops, allow the other order. */
503 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
504 && operand_equal_p (expr0->ops.ternary.opnd0,
505 expr1->ops.ternary.opnd1, 0)
506 && operand_equal_p (expr0->ops.ternary.opnd1,
507 expr1->ops.ternary.opnd0, 0));
509 case EXPR_CALL:
511 size_t i;
513 /* If the calls are to different functions, then they
514 clearly cannot be equal. */
515 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
516 expr1->ops.call.fn_from))
517 return false;
519 if (! expr0->ops.call.pure)
520 return false;
522 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
523 return false;
525 for (i = 0; i < expr0->ops.call.nargs; i++)
526 if (! operand_equal_p (expr0->ops.call.args[i],
527 expr1->ops.call.args[i], 0))
528 return false;
530 if (stmt_could_throw_p (expr0->ops.call.fn_from))
532 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
533 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
534 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
535 return false;
538 return true;
541 case EXPR_PHI:
543 size_t i;
545 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
546 return false;
548 for (i = 0; i < expr0->ops.phi.nargs; i++)
549 if (! operand_equal_p (expr0->ops.phi.args[i],
550 expr1->ops.phi.args[i], 0))
551 return false;
553 return true;
556 default:
557 gcc_unreachable ();
561 /* Generate a hash value for a pair of expressions. This can be used
562 iteratively by passing a previous result in HSTATE.
564 The same hash value is always returned for a given pair of expressions,
565 regardless of the order in which they are presented. This is useful in
566 hashing the operands of commutative functions. */
568 namespace inchash
571 static void
572 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
574 hash one, two;
576 inchash::add_expr (t1, one);
577 inchash::add_expr (t2, two);
578 hstate.add_commutative (one, two);
581 /* Compute a hash value for a hashable_expr value EXPR and a
582 previously accumulated hash value VAL. If two hashable_expr
583 values compare equal with hashable_expr_equal_p, they must
584 hash to the same value, given an identical value of VAL.
585 The logic is intended to follow inchash::add_expr in tree.c. */
587 static void
588 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
590 switch (expr->kind)
592 case EXPR_SINGLE:
593 inchash::add_expr (expr->ops.single.rhs, hstate);
594 break;
596 case EXPR_UNARY:
597 hstate.add_object (expr->ops.unary.op);
599 /* Make sure to include signedness in the hash computation.
600 Don't hash the type, that can lead to having nodes which
601 compare equal according to operand_equal_p, but which
602 have different hash codes. */
603 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
604 || expr->ops.unary.op == NON_LVALUE_EXPR)
605 hstate.add_int (TYPE_UNSIGNED (expr->type));
607 inchash::add_expr (expr->ops.unary.opnd, hstate);
608 break;
610 case EXPR_BINARY:
611 hstate.add_object (expr->ops.binary.op);
612 if (commutative_tree_code (expr->ops.binary.op))
613 inchash::add_expr_commutative (expr->ops.binary.opnd0,
614 expr->ops.binary.opnd1, hstate);
615 else
617 inchash::add_expr (expr->ops.binary.opnd0, hstate);
618 inchash::add_expr (expr->ops.binary.opnd1, hstate);
620 break;
622 case EXPR_TERNARY:
623 hstate.add_object (expr->ops.ternary.op);
624 if (commutative_ternary_tree_code (expr->ops.ternary.op))
625 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
626 expr->ops.ternary.opnd1, hstate);
627 else
629 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
630 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
632 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
633 break;
635 case EXPR_CALL:
637 size_t i;
638 enum tree_code code = CALL_EXPR;
639 gcall *fn_from;
641 hstate.add_object (code);
642 fn_from = expr->ops.call.fn_from;
643 if (gimple_call_internal_p (fn_from))
644 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
645 else
646 inchash::add_expr (gimple_call_fn (fn_from), hstate);
647 for (i = 0; i < expr->ops.call.nargs; i++)
648 inchash::add_expr (expr->ops.call.args[i], hstate);
650 break;
652 case EXPR_PHI:
654 size_t i;
656 for (i = 0; i < expr->ops.phi.nargs; i++)
657 inchash::add_expr (expr->ops.phi.args[i], hstate);
659 break;
661 default:
662 gcc_unreachable ();
668 /* Print a diagnostic dump of an expression hash table entry. */
670 static void
671 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
673 fprintf (stream, "STMT ");
675 if (element->lhs)
677 print_generic_expr (stream, element->lhs, 0);
678 fprintf (stream, " = ");
681 switch (element->expr.kind)
683 case EXPR_SINGLE:
684 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
685 break;
687 case EXPR_UNARY:
688 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
689 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
690 break;
692 case EXPR_BINARY:
693 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
694 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
695 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
696 break;
698 case EXPR_TERNARY:
699 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
700 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
701 fputs (", ", stream);
702 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
703 fputs (", ", stream);
704 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
705 fputs (">", stream);
706 break;
708 case EXPR_CALL:
710 size_t i;
711 size_t nargs = element->expr.ops.call.nargs;
712 gcall *fn_from;
714 fn_from = element->expr.ops.call.fn_from;
715 if (gimple_call_internal_p (fn_from))
716 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
717 stream);
718 else
719 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
720 fprintf (stream, " (");
721 for (i = 0; i < nargs; i++)
723 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
724 if (i + 1 < nargs)
725 fprintf (stream, ", ");
727 fprintf (stream, ")");
729 break;
731 case EXPR_PHI:
733 size_t i;
734 size_t nargs = element->expr.ops.phi.nargs;
736 fprintf (stream, "PHI <");
737 for (i = 0; i < nargs; i++)
739 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
740 if (i + 1 < nargs)
741 fprintf (stream, ", ");
743 fprintf (stream, ">");
745 break;
748 if (element->vop)
750 fprintf (stream, " with ");
751 print_generic_expr (stream, element->vop, 0);
754 fprintf (stream, "\n");
757 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
759 static void
760 free_expr_hash_elt_contents (struct expr_hash_elt *element)
762 if (element->expr.kind == EXPR_CALL)
763 free (element->expr.ops.call.args);
764 else if (element->expr.kind == EXPR_PHI)
765 free (element->expr.ops.phi.args);
768 /* Delete an expr_hash_elt and reclaim its storage. */
770 static void
771 free_expr_hash_elt (void *elt)
773 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
774 free_expr_hash_elt_contents (element);
775 free (element);
778 /* Allocate an EDGE_INFO for edge E and attach it to E.
779 Return the new EDGE_INFO structure. */
781 static struct edge_info *
782 allocate_edge_info (edge e)
784 struct edge_info *edge_info;
786 edge_info = XCNEW (struct edge_info);
788 e->aux = edge_info;
789 return edge_info;
792 /* Free all EDGE_INFO structures associated with edges in the CFG.
793 If a particular edge can be threaded, copy the redirection
794 target from the EDGE_INFO structure into the edge's AUX field
795 as required by code to update the CFG and SSA graph for
796 jump threading. */
798 static void
799 free_all_edge_infos (void)
801 basic_block bb;
802 edge_iterator ei;
803 edge e;
805 FOR_EACH_BB_FN (bb, cfun)
807 FOR_EACH_EDGE (e, ei, bb->preds)
809 struct edge_info *edge_info = (struct edge_info *) e->aux;
811 if (edge_info)
813 edge_info->cond_equivalences.release ();
814 free (edge_info);
815 e->aux = NULL;
821 class dom_opt_dom_walker : public dom_walker
823 public:
824 dom_opt_dom_walker (cdi_direction direction)
825 : dom_walker (direction), m_dummy_cond (NULL) {}
827 virtual void before_dom_children (basic_block);
828 virtual void after_dom_children (basic_block);
830 private:
831 void thread_across_edge (edge);
833 gcond *m_dummy_cond;
836 /* Jump threading, redundancy elimination and const/copy propagation.
838 This pass may expose new symbols that need to be renamed into SSA. For
839 every new symbol exposed, its corresponding bit will be set in
840 VARS_TO_RENAME. */
842 namespace {
844 const pass_data pass_data_dominator =
846 GIMPLE_PASS, /* type */
847 "dom", /* name */
848 OPTGROUP_NONE, /* optinfo_flags */
849 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
850 ( PROP_cfg | PROP_ssa ), /* properties_required */
851 0, /* properties_provided */
852 0, /* properties_destroyed */
853 0, /* todo_flags_start */
854 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
857 class pass_dominator : public gimple_opt_pass
859 public:
860 pass_dominator (gcc::context *ctxt)
861 : gimple_opt_pass (pass_data_dominator, ctxt)
864 /* opt_pass methods: */
865 opt_pass * clone () { return new pass_dominator (m_ctxt); }
866 virtual bool gate (function *) { return flag_tree_dom != 0; }
867 virtual unsigned int execute (function *);
869 }; // class pass_dominator
871 unsigned int
872 pass_dominator::execute (function *fun)
874 memset (&opt_stats, 0, sizeof (opt_stats));
876 /* Create our hash tables. */
877 avail_exprs = new hash_table<expr_elt_hasher> (1024);
878 avail_exprs_stack.create (20);
879 const_and_copies_stack.create (20);
880 need_eh_cleanup = BITMAP_ALLOC (NULL);
882 calculate_dominance_info (CDI_DOMINATORS);
883 cfg_altered = false;
885 /* We need to know loop structures in order to avoid destroying them
886 in jump threading. Note that we still can e.g. thread through loop
887 headers to an exit edge, or through loop header to the loop body, assuming
888 that we update the loop info.
890 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
891 to several overly conservative bail-outs in jump threading, case
892 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
893 missing. We should improve jump threading in future then
894 LOOPS_HAVE_PREHEADERS won't be needed here. */
895 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
897 /* Initialize the value-handle array. */
898 threadedge_initialize_values ();
900 /* We need accurate information regarding back edges in the CFG
901 for jump threading; this may include back edges that are not part of
902 a single loop. */
903 mark_dfs_back_edges ();
905 /* Recursively walk the dominator tree optimizing statements. */
906 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
909 gimple_stmt_iterator gsi;
910 basic_block bb;
911 FOR_EACH_BB_FN (bb, fun)
913 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
914 update_stmt_if_modified (gsi_stmt (gsi));
918 /* If we exposed any new variables, go ahead and put them into
919 SSA form now, before we handle jump threading. This simplifies
920 interactions between rewriting of _DECL nodes into SSA form
921 and rewriting SSA_NAME nodes into SSA form after block
922 duplication and CFG manipulation. */
923 update_ssa (TODO_update_ssa);
925 free_all_edge_infos ();
927 /* Thread jumps, creating duplicate blocks as needed. */
928 cfg_altered |= thread_through_all_blocks (first_pass_instance);
930 if (cfg_altered)
931 free_dominance_info (CDI_DOMINATORS);
933 /* Removal of statements may make some EH edges dead. Purge
934 such edges from the CFG as needed. */
935 if (!bitmap_empty_p (need_eh_cleanup))
937 unsigned i;
938 bitmap_iterator bi;
940 /* Jump threading may have created forwarder blocks from blocks
941 needing EH cleanup; the new successor of these blocks, which
942 has inherited from the original block, needs the cleanup.
943 Don't clear bits in the bitmap, as that can break the bitmap
944 iterator. */
945 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
947 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
948 if (bb == NULL)
949 continue;
950 while (single_succ_p (bb)
951 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
952 bb = single_succ (bb);
953 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
954 continue;
955 if ((unsigned) bb->index != i)
956 bitmap_set_bit (need_eh_cleanup, bb->index);
959 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
960 bitmap_clear (need_eh_cleanup);
963 statistics_counter_event (fun, "Redundant expressions eliminated",
964 opt_stats.num_re);
965 statistics_counter_event (fun, "Constants propagated",
966 opt_stats.num_const_prop);
967 statistics_counter_event (fun, "Copies propagated",
968 opt_stats.num_copy_prop);
970 /* Debugging dumps. */
971 if (dump_file && (dump_flags & TDF_STATS))
972 dump_dominator_optimization_stats (dump_file);
974 loop_optimizer_finalize ();
976 /* Delete our main hashtable. */
977 delete avail_exprs;
978 avail_exprs = NULL;
980 /* Free asserted bitmaps and stacks. */
981 BITMAP_FREE (need_eh_cleanup);
983 avail_exprs_stack.release ();
984 const_and_copies_stack.release ();
986 /* Free the value-handle array. */
987 threadedge_finalize_values ();
989 return 0;
992 } // anon namespace
994 gimple_opt_pass *
995 make_pass_dominator (gcc::context *ctxt)
997 return new pass_dominator (ctxt);
1001 /* Given a conditional statement CONDSTMT, convert the
1002 condition to a canonical form. */
1004 static void
1005 canonicalize_comparison (gcond *condstmt)
1007 tree op0;
1008 tree op1;
1009 enum tree_code code;
1011 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1013 op0 = gimple_cond_lhs (condstmt);
1014 op1 = gimple_cond_rhs (condstmt);
1016 code = gimple_cond_code (condstmt);
1018 /* If it would be profitable to swap the operands, then do so to
1019 canonicalize the statement, enabling better optimization.
1021 By placing canonicalization of such expressions here we
1022 transparently keep statements in canonical form, even
1023 when the statement is modified. */
1024 if (tree_swap_operands_p (op0, op1, false))
1026 /* For relationals we need to swap the operands
1027 and change the code. */
1028 if (code == LT_EXPR
1029 || code == GT_EXPR
1030 || code == LE_EXPR
1031 || code == GE_EXPR)
1033 code = swap_tree_comparison (code);
1035 gimple_cond_set_code (condstmt, code);
1036 gimple_cond_set_lhs (condstmt, op1);
1037 gimple_cond_set_rhs (condstmt, op0);
1039 update_stmt (condstmt);
1044 /* Initialize local stacks for this optimizer and record equivalences
1045 upon entry to BB. Equivalences can come from the edge traversed to
1046 reach BB or they may come from PHI nodes at the start of BB. */
1048 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1049 LIMIT entries left in LOCALs. */
1051 static void
1052 remove_local_expressions_from_table (void)
1054 /* Remove all the expressions made available in this block. */
1055 while (avail_exprs_stack.length () > 0)
1057 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1058 = avail_exprs_stack.pop ();
1059 expr_hash_elt **slot;
1061 if (victim.first == NULL)
1062 break;
1064 /* This must precede the actual removal from the hash table,
1065 as ELEMENT and the table entry may share a call argument
1066 vector which will be freed during removal. */
1067 if (dump_file && (dump_flags & TDF_DETAILS))
1069 fprintf (dump_file, "<<<< ");
1070 print_expr_hash_elt (dump_file, victim.first);
1073 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1074 gcc_assert (slot && *slot == victim.first);
1075 if (victim.second != NULL)
1077 free_expr_hash_elt (*slot);
1078 *slot = victim.second;
1080 else
1081 avail_exprs->clear_slot (slot);
1085 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1086 CONST_AND_COPIES to its original state, stopping when we hit a
1087 NULL marker. */
1089 static void
1090 restore_vars_to_original_value (void)
1092 while (const_and_copies_stack.length () > 0)
1094 tree prev_value, dest;
1096 dest = const_and_copies_stack.pop ();
1098 if (dest == NULL)
1099 break;
1101 if (dump_file && (dump_flags & TDF_DETAILS))
1103 fprintf (dump_file, "<<<< COPY ");
1104 print_generic_expr (dump_file, dest, 0);
1105 fprintf (dump_file, " = ");
1106 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1107 fprintf (dump_file, "\n");
1110 prev_value = const_and_copies_stack.pop ();
1111 set_ssa_name_value (dest, prev_value);
1115 /* A trivial wrapper so that we can present the generic jump
1116 threading code with a simple API for simplifying statements. */
1117 static tree
1118 simplify_stmt_for_jump_threading (gimple stmt,
1119 gimple within_stmt ATTRIBUTE_UNUSED)
1121 return lookup_avail_expr (stmt, false);
1124 /* Record into the equivalence tables any equivalences implied by
1125 traversing edge E (which are cached in E->aux).
1127 Callers are responsible for managing the unwinding markers. */
1128 static void
1129 record_temporary_equivalences (edge e)
1131 int i;
1132 struct edge_info *edge_info = (struct edge_info *) e->aux;
1134 /* If we have info associated with this edge, record it into
1135 our equivalence tables. */
1136 if (edge_info)
1138 cond_equivalence *eq;
1139 tree lhs = edge_info->lhs;
1140 tree rhs = edge_info->rhs;
1142 /* If we have a simple NAME = VALUE equivalence, record it. */
1143 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1144 record_const_or_copy (lhs, rhs);
1146 /* If we have 0 = COND or 1 = COND equivalences, record them
1147 into our expression hash tables. */
1148 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1149 record_cond (eq);
1153 /* Wrapper for common code to attempt to thread an edge. For example,
1154 it handles lazily building the dummy condition and the bookkeeping
1155 when jump threading is successful. */
1157 void
1158 dom_opt_dom_walker::thread_across_edge (edge e)
1160 if (! m_dummy_cond)
1161 m_dummy_cond =
1162 gimple_build_cond (NE_EXPR,
1163 integer_zero_node, integer_zero_node,
1164 NULL, NULL);
1166 /* Push a marker on both stacks so we can unwind the tables back to their
1167 current state. */
1168 avail_exprs_stack.safe_push
1169 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1170 const_and_copies_stack.safe_push (NULL_TREE);
1172 /* Traversing E may result in equivalences we can utilize. */
1173 record_temporary_equivalences (e);
1175 /* With all the edge equivalences in the tables, go ahead and attempt
1176 to thread through E->dest. */
1177 ::thread_across_edge (m_dummy_cond, e, false,
1178 &const_and_copies_stack,
1179 simplify_stmt_for_jump_threading);
1181 /* And restore the various tables to their state before
1182 we threaded this edge.
1184 XXX The code in tree-ssa-threadedge.c will restore the state of
1185 the const_and_copies table. We we just have to restore the expression
1186 table. */
1187 remove_local_expressions_from_table ();
1190 /* PHI nodes can create equivalences too.
1192 Ignoring any alternatives which are the same as the result, if
1193 all the alternatives are equal, then the PHI node creates an
1194 equivalence. */
1196 static void
1197 record_equivalences_from_phis (basic_block bb)
1199 gphi_iterator gsi;
1201 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1203 gphi *phi = gsi.phi ();
1205 tree lhs = gimple_phi_result (phi);
1206 tree rhs = NULL;
1207 size_t i;
1209 for (i = 0; i < gimple_phi_num_args (phi); i++)
1211 tree t = gimple_phi_arg_def (phi, i);
1213 /* Ignore alternatives which are the same as our LHS. Since
1214 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1215 can simply compare pointers. */
1216 if (lhs == t)
1217 continue;
1219 /* If we have not processed an alternative yet, then set
1220 RHS to this alternative. */
1221 if (rhs == NULL)
1222 rhs = t;
1223 /* If we have processed an alternative (stored in RHS), then
1224 see if it is equal to this one. If it isn't, then stop
1225 the search. */
1226 else if (! operand_equal_for_phi_arg_p (rhs, t))
1227 break;
1230 /* If we had no interesting alternatives, then all the RHS alternatives
1231 must have been the same as LHS. */
1232 if (!rhs)
1233 rhs = lhs;
1235 /* If we managed to iterate through each PHI alternative without
1236 breaking out of the loop, then we have a PHI which may create
1237 a useful equivalence. We do not need to record unwind data for
1238 this, since this is a true assignment and not an equivalence
1239 inferred from a comparison. All uses of this ssa name are dominated
1240 by this assignment, so unwinding just costs time and space. */
1241 if (i == gimple_phi_num_args (phi)
1242 && may_propagate_copy (lhs, rhs))
1243 set_ssa_name_value (lhs, rhs);
1247 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1248 return that edge. Otherwise return NULL. */
1249 static edge
1250 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1252 edge retval = NULL;
1253 edge e;
1254 edge_iterator ei;
1256 FOR_EACH_EDGE (e, ei, bb->preds)
1258 /* A loop back edge can be identified by the destination of
1259 the edge dominating the source of the edge. */
1260 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1261 continue;
1263 /* If we have already seen a non-loop edge, then we must have
1264 multiple incoming non-loop edges and thus we return NULL. */
1265 if (retval)
1266 return NULL;
1268 /* This is the first non-loop incoming edge we have found. Record
1269 it. */
1270 retval = e;
1273 return retval;
1276 /* Record any equivalences created by the incoming edge to BB. If BB
1277 has more than one incoming edge, then no equivalence is created. */
1279 static void
1280 record_equivalences_from_incoming_edge (basic_block bb)
1282 edge e;
1283 basic_block parent;
1284 struct edge_info *edge_info;
1286 /* If our parent block ended with a control statement, then we may be
1287 able to record some equivalences based on which outgoing edge from
1288 the parent was followed. */
1289 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1291 e = single_incoming_edge_ignoring_loop_edges (bb);
1293 /* If we had a single incoming edge from our parent block, then enter
1294 any data associated with the edge into our tables. */
1295 if (e && e->src == parent)
1297 unsigned int i;
1299 edge_info = (struct edge_info *) e->aux;
1301 if (edge_info)
1303 tree lhs = edge_info->lhs;
1304 tree rhs = edge_info->rhs;
1305 cond_equivalence *eq;
1307 if (lhs)
1308 record_equality (lhs, rhs);
1310 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1311 set via a widening type conversion, then we may be able to record
1312 additional equivalences. */
1313 if (lhs
1314 && TREE_CODE (lhs) == SSA_NAME
1315 && is_gimple_constant (rhs)
1316 && TREE_CODE (rhs) == INTEGER_CST)
1318 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1320 if (defstmt
1321 && is_gimple_assign (defstmt)
1322 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1324 tree old_rhs = gimple_assign_rhs1 (defstmt);
1326 /* If the conversion widens the original value and
1327 the constant is in the range of the type of OLD_RHS,
1328 then convert the constant and record the equivalence.
1330 Note that int_fits_type_p does not check the precision
1331 if the upper and lower bounds are OK. */
1332 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1333 && (TYPE_PRECISION (TREE_TYPE (lhs))
1334 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1335 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1337 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1338 record_equality (old_rhs, newval);
1343 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1344 record_cond (eq);
1349 /* Dump SSA statistics on FILE. */
1351 void
1352 dump_dominator_optimization_stats (FILE *file)
1354 fprintf (file, "Total number of statements: %6ld\n\n",
1355 opt_stats.num_stmts);
1356 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1357 opt_stats.num_exprs_considered);
1359 fprintf (file, "\nHash table statistics:\n");
1361 fprintf (file, " avail_exprs: ");
1362 htab_statistics (file, *avail_exprs);
1366 /* Dump SSA statistics on stderr. */
1368 DEBUG_FUNCTION void
1369 debug_dominator_optimization_stats (void)
1371 dump_dominator_optimization_stats (stderr);
1375 /* Dump statistics for the hash table HTAB. */
1377 static void
1378 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1380 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1381 (long) htab.size (),
1382 (long) htab.elements (),
1383 htab.collisions ());
1387 /* Enter condition equivalence into the expression hash table.
1388 This indicates that a conditional expression has a known
1389 boolean value. */
1391 static void
1392 record_cond (cond_equivalence *p)
1394 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1395 expr_hash_elt **slot;
1397 initialize_hash_element_from_expr (&p->cond, p->value, element);
1399 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1400 if (*slot == NULL)
1402 *slot = element;
1404 if (dump_file && (dump_flags & TDF_DETAILS))
1406 fprintf (dump_file, "1>>> ");
1407 print_expr_hash_elt (dump_file, element);
1410 avail_exprs_stack.safe_push
1411 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1413 else
1414 free_expr_hash_elt (element);
1417 /* Build a cond_equivalence record indicating that the comparison
1418 CODE holds between operands OP0 and OP1 and push it to **P. */
1420 static void
1421 build_and_record_new_cond (enum tree_code code,
1422 tree op0, tree op1,
1423 vec<cond_equivalence> *p)
1425 cond_equivalence c;
1426 struct hashable_expr *cond = &c.cond;
1428 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1430 cond->type = boolean_type_node;
1431 cond->kind = EXPR_BINARY;
1432 cond->ops.binary.op = code;
1433 cond->ops.binary.opnd0 = op0;
1434 cond->ops.binary.opnd1 = op1;
1436 c.value = boolean_true_node;
1437 p->safe_push (c);
1440 /* Record that COND is true and INVERTED is false into the edge information
1441 structure. Also record that any conditions dominated by COND are true
1442 as well.
1444 For example, if a < b is true, then a <= b must also be true. */
1446 static void
1447 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1449 tree op0, op1;
1450 cond_equivalence c;
1452 if (!COMPARISON_CLASS_P (cond))
1453 return;
1455 op0 = TREE_OPERAND (cond, 0);
1456 op1 = TREE_OPERAND (cond, 1);
1458 switch (TREE_CODE (cond))
1460 case LT_EXPR:
1461 case GT_EXPR:
1462 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1464 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1465 &edge_info->cond_equivalences);
1466 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1467 &edge_info->cond_equivalences);
1470 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1471 ? LE_EXPR : GE_EXPR),
1472 op0, op1, &edge_info->cond_equivalences);
1473 build_and_record_new_cond (NE_EXPR, op0, op1,
1474 &edge_info->cond_equivalences);
1475 break;
1477 case GE_EXPR:
1478 case LE_EXPR:
1479 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1481 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1482 &edge_info->cond_equivalences);
1484 break;
1486 case EQ_EXPR:
1487 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1489 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1490 &edge_info->cond_equivalences);
1492 build_and_record_new_cond (LE_EXPR, op0, op1,
1493 &edge_info->cond_equivalences);
1494 build_and_record_new_cond (GE_EXPR, op0, op1,
1495 &edge_info->cond_equivalences);
1496 break;
1498 case UNORDERED_EXPR:
1499 build_and_record_new_cond (NE_EXPR, op0, op1,
1500 &edge_info->cond_equivalences);
1501 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1506 &edge_info->cond_equivalences);
1507 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1508 &edge_info->cond_equivalences);
1509 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1510 &edge_info->cond_equivalences);
1511 break;
1513 case UNLT_EXPR:
1514 case UNGT_EXPR:
1515 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1516 ? UNLE_EXPR : UNGE_EXPR),
1517 op0, op1, &edge_info->cond_equivalences);
1518 build_and_record_new_cond (NE_EXPR, op0, op1,
1519 &edge_info->cond_equivalences);
1520 break;
1522 case UNEQ_EXPR:
1523 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1524 &edge_info->cond_equivalences);
1525 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1526 &edge_info->cond_equivalences);
1527 break;
1529 case LTGT_EXPR:
1530 build_and_record_new_cond (NE_EXPR, op0, op1,
1531 &edge_info->cond_equivalences);
1532 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1533 &edge_info->cond_equivalences);
1534 break;
1536 default:
1537 break;
1540 /* Now store the original true and false conditions into the first
1541 two slots. */
1542 initialize_expr_from_cond (cond, &c.cond);
1543 c.value = boolean_true_node;
1544 edge_info->cond_equivalences.safe_push (c);
1546 /* It is possible for INVERTED to be the negation of a comparison,
1547 and not a valid RHS or GIMPLE_COND condition. This happens because
1548 invert_truthvalue may return such an expression when asked to invert
1549 a floating-point comparison. These comparisons are not assumed to
1550 obey the trichotomy law. */
1551 initialize_expr_from_cond (inverted, &c.cond);
1552 c.value = boolean_false_node;
1553 edge_info->cond_equivalences.safe_push (c);
1556 /* A helper function for record_const_or_copy and record_equality.
1557 Do the work of recording the value and undo info. */
1559 static void
1560 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1562 set_ssa_name_value (x, y);
1564 if (dump_file && (dump_flags & TDF_DETAILS))
1566 fprintf (dump_file, "0>>> COPY ");
1567 print_generic_expr (dump_file, x, 0);
1568 fprintf (dump_file, " = ");
1569 print_generic_expr (dump_file, y, 0);
1570 fprintf (dump_file, "\n");
1573 const_and_copies_stack.reserve (2);
1574 const_and_copies_stack.quick_push (prev_x);
1575 const_and_copies_stack.quick_push (x);
1578 /* Record that X is equal to Y in const_and_copies. Record undo
1579 information in the block-local vector. */
1581 static void
1582 record_const_or_copy (tree x, tree y)
1584 tree prev_x = SSA_NAME_VALUE (x);
1586 gcc_assert (TREE_CODE (x) == SSA_NAME);
1588 if (TREE_CODE (y) == SSA_NAME)
1590 tree tmp = SSA_NAME_VALUE (y);
1591 if (tmp)
1592 y = tmp;
1595 record_const_or_copy_1 (x, y, prev_x);
1598 /* Return the loop depth of the basic block of the defining statement of X.
1599 This number should not be treated as absolutely correct because the loop
1600 information may not be completely up-to-date when dom runs. However, it
1601 will be relatively correct, and as more passes are taught to keep loop info
1602 up to date, the result will become more and more accurate. */
1604 static int
1605 loop_depth_of_name (tree x)
1607 gimple defstmt;
1608 basic_block defbb;
1610 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1611 if (TREE_CODE (x) != SSA_NAME)
1612 return 0;
1614 /* Otherwise return the loop depth of the defining statement's bb.
1615 Note that there may not actually be a bb for this statement, if the
1616 ssa_name is live on entry. */
1617 defstmt = SSA_NAME_DEF_STMT (x);
1618 defbb = gimple_bb (defstmt);
1619 if (!defbb)
1620 return 0;
1622 return bb_loop_depth (defbb);
1625 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1626 This constrains the cases in which we may treat this as assignment. */
1628 static void
1629 record_equality (tree x, tree y)
1631 tree prev_x = NULL, prev_y = NULL;
1633 if (TREE_CODE (x) == SSA_NAME)
1634 prev_x = SSA_NAME_VALUE (x);
1635 if (TREE_CODE (y) == SSA_NAME)
1636 prev_y = SSA_NAME_VALUE (y);
1638 /* If one of the previous values is invariant, or invariant in more loops
1639 (by depth), then use that.
1640 Otherwise it doesn't matter which value we choose, just so
1641 long as we canonicalize on one value. */
1642 if (is_gimple_min_invariant (y))
1644 else if (is_gimple_min_invariant (x)
1645 /* ??? When threading over backedges the following is important
1646 for correctness. See PR61757. */
1647 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1648 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1649 else if (prev_x && is_gimple_min_invariant (prev_x))
1650 x = y, y = prev_x, prev_x = prev_y;
1651 else if (prev_y)
1652 y = prev_y;
1654 /* After the swapping, we must have one SSA_NAME. */
1655 if (TREE_CODE (x) != SSA_NAME)
1656 return;
1658 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1659 variable compared against zero. If we're honoring signed zeros,
1660 then we cannot record this value unless we know that the value is
1661 nonzero. */
1662 if (HONOR_SIGNED_ZEROS (x)
1663 && (TREE_CODE (y) != REAL_CST
1664 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1665 return;
1667 record_const_or_copy_1 (x, y, prev_x);
1670 /* Returns true when STMT is a simple iv increment. It detects the
1671 following situation:
1673 i_1 = phi (..., i_2)
1674 i_2 = i_1 +/- ... */
1676 bool
1677 simple_iv_increment_p (gimple stmt)
1679 enum tree_code code;
1680 tree lhs, preinc;
1681 gimple phi;
1682 size_t i;
1684 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1685 return false;
1687 lhs = gimple_assign_lhs (stmt);
1688 if (TREE_CODE (lhs) != SSA_NAME)
1689 return false;
1691 code = gimple_assign_rhs_code (stmt);
1692 if (code != PLUS_EXPR
1693 && code != MINUS_EXPR
1694 && code != POINTER_PLUS_EXPR)
1695 return false;
1697 preinc = gimple_assign_rhs1 (stmt);
1698 if (TREE_CODE (preinc) != SSA_NAME)
1699 return false;
1701 phi = SSA_NAME_DEF_STMT (preinc);
1702 if (gimple_code (phi) != GIMPLE_PHI)
1703 return false;
1705 for (i = 0; i < gimple_phi_num_args (phi); i++)
1706 if (gimple_phi_arg_def (phi, i) == lhs)
1707 return true;
1709 return false;
1712 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1713 known value for that SSA_NAME (or NULL if no value is known).
1715 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1716 successors of BB. */
1718 static void
1719 cprop_into_successor_phis (basic_block bb)
1721 edge e;
1722 edge_iterator ei;
1724 FOR_EACH_EDGE (e, ei, bb->succs)
1726 int indx;
1727 gphi_iterator gsi;
1729 /* If this is an abnormal edge, then we do not want to copy propagate
1730 into the PHI alternative associated with this edge. */
1731 if (e->flags & EDGE_ABNORMAL)
1732 continue;
1734 gsi = gsi_start_phis (e->dest);
1735 if (gsi_end_p (gsi))
1736 continue;
1738 /* We may have an equivalence associated with this edge. While
1739 we can not propagate it into non-dominated blocks, we can
1740 propagate them into PHIs in non-dominated blocks. */
1742 /* Push the unwind marker so we can reset the const and copies
1743 table back to its original state after processing this edge. */
1744 const_and_copies_stack.safe_push (NULL_TREE);
1746 /* Extract and record any simple NAME = VALUE equivalences.
1748 Don't bother with [01] = COND equivalences, they're not useful
1749 here. */
1750 struct edge_info *edge_info = (struct edge_info *) e->aux;
1751 if (edge_info)
1753 tree lhs = edge_info->lhs;
1754 tree rhs = edge_info->rhs;
1756 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1757 record_const_or_copy (lhs, rhs);
1760 indx = e->dest_idx;
1761 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1763 tree new_val;
1764 use_operand_p orig_p;
1765 tree orig_val;
1766 gphi *phi = gsi.phi ();
1768 /* The alternative may be associated with a constant, so verify
1769 it is an SSA_NAME before doing anything with it. */
1770 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1771 orig_val = get_use_from_ptr (orig_p);
1772 if (TREE_CODE (orig_val) != SSA_NAME)
1773 continue;
1775 /* If we have *ORIG_P in our constant/copy table, then replace
1776 ORIG_P with its value in our constant/copy table. */
1777 new_val = SSA_NAME_VALUE (orig_val);
1778 if (new_val
1779 && new_val != orig_val
1780 && (TREE_CODE (new_val) == SSA_NAME
1781 || is_gimple_min_invariant (new_val))
1782 && may_propagate_copy (orig_val, new_val))
1783 propagate_value (orig_p, new_val);
1786 restore_vars_to_original_value ();
1790 /* We have finished optimizing BB, record any information implied by
1791 taking a specific outgoing edge from BB. */
1793 static void
1794 record_edge_info (basic_block bb)
1796 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1797 struct edge_info *edge_info;
1799 if (! gsi_end_p (gsi))
1801 gimple stmt = gsi_stmt (gsi);
1802 location_t loc = gimple_location (stmt);
1804 if (gimple_code (stmt) == GIMPLE_SWITCH)
1806 gswitch *switch_stmt = as_a <gswitch *> (stmt);
1807 tree index = gimple_switch_index (switch_stmt);
1809 if (TREE_CODE (index) == SSA_NAME)
1811 int i;
1812 int n_labels = gimple_switch_num_labels (switch_stmt);
1813 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1814 edge e;
1815 edge_iterator ei;
1817 for (i = 0; i < n_labels; i++)
1819 tree label = gimple_switch_label (switch_stmt, i);
1820 basic_block target_bb = label_to_block (CASE_LABEL (label));
1821 if (CASE_HIGH (label)
1822 || !CASE_LOW (label)
1823 || info[target_bb->index])
1824 info[target_bb->index] = error_mark_node;
1825 else
1826 info[target_bb->index] = label;
1829 FOR_EACH_EDGE (e, ei, bb->succs)
1831 basic_block target_bb = e->dest;
1832 tree label = info[target_bb->index];
1834 if (label != NULL && label != error_mark_node)
1836 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1837 CASE_LOW (label));
1838 edge_info = allocate_edge_info (e);
1839 edge_info->lhs = index;
1840 edge_info->rhs = x;
1843 free (info);
1847 /* A COND_EXPR may create equivalences too. */
1848 if (gimple_code (stmt) == GIMPLE_COND)
1850 edge true_edge;
1851 edge false_edge;
1853 tree op0 = gimple_cond_lhs (stmt);
1854 tree op1 = gimple_cond_rhs (stmt);
1855 enum tree_code code = gimple_cond_code (stmt);
1857 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1859 /* Special case comparing booleans against a constant as we
1860 know the value of OP0 on both arms of the branch. i.e., we
1861 can record an equivalence for OP0 rather than COND. */
1862 if ((code == EQ_EXPR || code == NE_EXPR)
1863 && TREE_CODE (op0) == SSA_NAME
1864 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1865 && is_gimple_min_invariant (op1))
1867 if (code == EQ_EXPR)
1869 edge_info = allocate_edge_info (true_edge);
1870 edge_info->lhs = op0;
1871 edge_info->rhs = (integer_zerop (op1)
1872 ? boolean_false_node
1873 : boolean_true_node);
1875 edge_info = allocate_edge_info (false_edge);
1876 edge_info->lhs = op0;
1877 edge_info->rhs = (integer_zerop (op1)
1878 ? boolean_true_node
1879 : boolean_false_node);
1881 else
1883 edge_info = allocate_edge_info (true_edge);
1884 edge_info->lhs = op0;
1885 edge_info->rhs = (integer_zerop (op1)
1886 ? boolean_true_node
1887 : boolean_false_node);
1889 edge_info = allocate_edge_info (false_edge);
1890 edge_info->lhs = op0;
1891 edge_info->rhs = (integer_zerop (op1)
1892 ? boolean_false_node
1893 : boolean_true_node);
1896 else if (is_gimple_min_invariant (op0)
1897 && (TREE_CODE (op1) == SSA_NAME
1898 || is_gimple_min_invariant (op1)))
1900 tree cond = build2 (code, boolean_type_node, op0, op1);
1901 tree inverted = invert_truthvalue_loc (loc, cond);
1902 bool can_infer_simple_equiv
1903 = !(HONOR_SIGNED_ZEROS (op0)
1904 && real_zerop (op0));
1905 struct edge_info *edge_info;
1907 edge_info = allocate_edge_info (true_edge);
1908 record_conditions (edge_info, cond, inverted);
1910 if (can_infer_simple_equiv && code == EQ_EXPR)
1912 edge_info->lhs = op1;
1913 edge_info->rhs = op0;
1916 edge_info = allocate_edge_info (false_edge);
1917 record_conditions (edge_info, inverted, cond);
1919 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1921 edge_info->lhs = op1;
1922 edge_info->rhs = op0;
1926 else if (TREE_CODE (op0) == SSA_NAME
1927 && (TREE_CODE (op1) == SSA_NAME
1928 || is_gimple_min_invariant (op1)))
1930 tree cond = build2 (code, boolean_type_node, op0, op1);
1931 tree inverted = invert_truthvalue_loc (loc, cond);
1932 bool can_infer_simple_equiv
1933 = !(HONOR_SIGNED_ZEROS (op1)
1934 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1935 struct edge_info *edge_info;
1937 edge_info = allocate_edge_info (true_edge);
1938 record_conditions (edge_info, cond, inverted);
1940 if (can_infer_simple_equiv && code == EQ_EXPR)
1942 edge_info->lhs = op0;
1943 edge_info->rhs = op1;
1946 edge_info = allocate_edge_info (false_edge);
1947 record_conditions (edge_info, inverted, cond);
1949 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1951 edge_info->lhs = op0;
1952 edge_info->rhs = op1;
1957 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1961 void
1962 dom_opt_dom_walker::before_dom_children (basic_block bb)
1964 gimple_stmt_iterator gsi;
1966 if (dump_file && (dump_flags & TDF_DETAILS))
1967 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1969 /* Push a marker on the stacks of local information so that we know how
1970 far to unwind when we finalize this block. */
1971 avail_exprs_stack.safe_push
1972 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1973 const_and_copies_stack.safe_push (NULL_TREE);
1975 record_equivalences_from_incoming_edge (bb);
1977 /* PHI nodes can create equivalences too. */
1978 record_equivalences_from_phis (bb);
1980 /* Create equivalences from redundant PHIs. PHIs are only truly
1981 redundant when they exist in the same block, so push another
1982 marker and unwind right afterwards. */
1983 avail_exprs_stack.safe_push
1984 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1985 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1986 eliminate_redundant_computations (&gsi);
1987 remove_local_expressions_from_table ();
1989 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1990 optimize_stmt (bb, gsi);
1992 /* Now prepare to process dominated blocks. */
1993 record_edge_info (bb);
1994 cprop_into_successor_phis (bb);
1997 /* We have finished processing the dominator children of BB, perform
1998 any finalization actions in preparation for leaving this node in
1999 the dominator tree. */
2001 void
2002 dom_opt_dom_walker::after_dom_children (basic_block bb)
2004 gimple last;
2006 /* If we have an outgoing edge to a block with multiple incoming and
2007 outgoing edges, then we may be able to thread the edge, i.e., we
2008 may be able to statically determine which of the outgoing edges
2009 will be traversed when the incoming edge from BB is traversed. */
2010 if (single_succ_p (bb)
2011 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2012 && potentially_threadable_block (single_succ (bb)))
2014 thread_across_edge (single_succ_edge (bb));
2016 else if ((last = last_stmt (bb))
2017 && gimple_code (last) == GIMPLE_COND
2018 && EDGE_COUNT (bb->succs) == 2
2019 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2020 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2022 edge true_edge, false_edge;
2024 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2026 /* Only try to thread the edge if it reaches a target block with
2027 more than one predecessor and more than one successor. */
2028 if (potentially_threadable_block (true_edge->dest))
2029 thread_across_edge (true_edge);
2031 /* Similarly for the ELSE arm. */
2032 if (potentially_threadable_block (false_edge->dest))
2033 thread_across_edge (false_edge);
2037 /* These remove expressions local to BB from the tables. */
2038 remove_local_expressions_from_table ();
2039 restore_vars_to_original_value ();
2042 /* Search for redundant computations in STMT. If any are found, then
2043 replace them with the variable holding the result of the computation.
2045 If safe, record this expression into the available expression hash
2046 table. */
2048 static void
2049 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2051 tree expr_type;
2052 tree cached_lhs;
2053 tree def;
2054 bool insert = true;
2055 bool assigns_var_p = false;
2057 gimple stmt = gsi_stmt (*gsi);
2059 if (gimple_code (stmt) == GIMPLE_PHI)
2060 def = gimple_phi_result (stmt);
2061 else
2062 def = gimple_get_lhs (stmt);
2064 /* Certain expressions on the RHS can be optimized away, but can not
2065 themselves be entered into the hash tables. */
2066 if (! def
2067 || TREE_CODE (def) != SSA_NAME
2068 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2069 || gimple_vdef (stmt)
2070 /* Do not record equivalences for increments of ivs. This would create
2071 overlapping live ranges for a very questionable gain. */
2072 || simple_iv_increment_p (stmt))
2073 insert = false;
2075 /* Check if the expression has been computed before. */
2076 cached_lhs = lookup_avail_expr (stmt, insert);
2078 opt_stats.num_exprs_considered++;
2080 /* Get the type of the expression we are trying to optimize. */
2081 if (is_gimple_assign (stmt))
2083 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2084 assigns_var_p = true;
2086 else if (gimple_code (stmt) == GIMPLE_COND)
2087 expr_type = boolean_type_node;
2088 else if (is_gimple_call (stmt))
2090 gcc_assert (gimple_call_lhs (stmt));
2091 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2092 assigns_var_p = true;
2094 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2095 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2096 else if (gimple_code (stmt) == GIMPLE_PHI)
2097 /* We can't propagate into a phi, so the logic below doesn't apply.
2098 Instead record an equivalence between the cached LHS and the
2099 PHI result of this statement, provided they are in the same block.
2100 This should be sufficient to kill the redundant phi. */
2102 if (def && cached_lhs)
2103 record_const_or_copy (def, cached_lhs);
2104 return;
2106 else
2107 gcc_unreachable ();
2109 if (!cached_lhs)
2110 return;
2112 /* It is safe to ignore types here since we have already done
2113 type checking in the hashing and equality routines. In fact
2114 type checking here merely gets in the way of constant
2115 propagation. Also, make sure that it is safe to propagate
2116 CACHED_LHS into the expression in STMT. */
2117 if ((TREE_CODE (cached_lhs) != SSA_NAME
2118 && (assigns_var_p
2119 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2120 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2122 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2123 || is_gimple_min_invariant (cached_lhs));
2125 if (dump_file && (dump_flags & TDF_DETAILS))
2127 fprintf (dump_file, " Replaced redundant expr '");
2128 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2129 fprintf (dump_file, "' with '");
2130 print_generic_expr (dump_file, cached_lhs, dump_flags);
2131 fprintf (dump_file, "'\n");
2134 opt_stats.num_re++;
2136 if (assigns_var_p
2137 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2138 cached_lhs = fold_convert (expr_type, cached_lhs);
2140 propagate_tree_value_into_stmt (gsi, cached_lhs);
2142 /* Since it is always necessary to mark the result as modified,
2143 perhaps we should move this into propagate_tree_value_into_stmt
2144 itself. */
2145 gimple_set_modified (gsi_stmt (*gsi), true);
2149 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2150 the available expressions table or the const_and_copies table.
2151 Detect and record those equivalences. */
2152 /* We handle only very simple copy equivalences here. The heavy
2153 lifing is done by eliminate_redundant_computations. */
2155 static void
2156 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2158 tree lhs;
2159 enum tree_code lhs_code;
2161 gcc_assert (is_gimple_assign (stmt));
2163 lhs = gimple_assign_lhs (stmt);
2164 lhs_code = TREE_CODE (lhs);
2166 if (lhs_code == SSA_NAME
2167 && gimple_assign_single_p (stmt))
2169 tree rhs = gimple_assign_rhs1 (stmt);
2171 /* If the RHS of the assignment is a constant or another variable that
2172 may be propagated, register it in the CONST_AND_COPIES table. We
2173 do not need to record unwind data for this, since this is a true
2174 assignment and not an equivalence inferred from a comparison. All
2175 uses of this ssa name are dominated by this assignment, so unwinding
2176 just costs time and space. */
2177 if (may_optimize_p
2178 && (TREE_CODE (rhs) == SSA_NAME
2179 || is_gimple_min_invariant (rhs)))
2181 if (dump_file && (dump_flags & TDF_DETAILS))
2183 fprintf (dump_file, "==== ASGN ");
2184 print_generic_expr (dump_file, lhs, 0);
2185 fprintf (dump_file, " = ");
2186 print_generic_expr (dump_file, rhs, 0);
2187 fprintf (dump_file, "\n");
2190 set_ssa_name_value (lhs, rhs);
2194 /* Make sure we can propagate &x + CST. */
2195 if (lhs_code == SSA_NAME
2196 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2197 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2198 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2200 tree op0 = gimple_assign_rhs1 (stmt);
2201 tree op1 = gimple_assign_rhs2 (stmt);
2202 tree new_rhs
2203 = build_fold_addr_expr (fold_build2 (MEM_REF,
2204 TREE_TYPE (TREE_TYPE (op0)),
2205 unshare_expr (op0),
2206 fold_convert (ptr_type_node,
2207 op1)));
2208 if (dump_file && (dump_flags & TDF_DETAILS))
2210 fprintf (dump_file, "==== ASGN ");
2211 print_generic_expr (dump_file, lhs, 0);
2212 fprintf (dump_file, " = ");
2213 print_generic_expr (dump_file, new_rhs, 0);
2214 fprintf (dump_file, "\n");
2217 set_ssa_name_value (lhs, new_rhs);
2220 /* A memory store, even an aliased store, creates a useful
2221 equivalence. By exchanging the LHS and RHS, creating suitable
2222 vops and recording the result in the available expression table,
2223 we may be able to expose more redundant loads. */
2224 if (!gimple_has_volatile_ops (stmt)
2225 && gimple_references_memory_p (stmt)
2226 && gimple_assign_single_p (stmt)
2227 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2228 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2229 && !is_gimple_reg (lhs))
2231 tree rhs = gimple_assign_rhs1 (stmt);
2232 gassign *new_stmt;
2234 /* Build a new statement with the RHS and LHS exchanged. */
2235 if (TREE_CODE (rhs) == SSA_NAME)
2237 /* NOTE tuples. The call to gimple_build_assign below replaced
2238 a call to build_gimple_modify_stmt, which did not set the
2239 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2240 may cause an SSA validation failure, as the LHS may be a
2241 default-initialized name and should have no definition. I'm
2242 a bit dubious of this, as the artificial statement that we
2243 generate here may in fact be ill-formed, but it is simply
2244 used as an internal device in this pass, and never becomes
2245 part of the CFG. */
2246 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2247 new_stmt = gimple_build_assign (rhs, lhs);
2248 SSA_NAME_DEF_STMT (rhs) = defstmt;
2250 else
2251 new_stmt = gimple_build_assign (rhs, lhs);
2253 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2255 /* Finally enter the statement into the available expression
2256 table. */
2257 lookup_avail_expr (new_stmt, true);
2261 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2262 CONST_AND_COPIES. */
2264 static void
2265 cprop_operand (gimple stmt, use_operand_p op_p)
2267 tree val;
2268 tree op = USE_FROM_PTR (op_p);
2270 /* If the operand has a known constant value or it is known to be a
2271 copy of some other variable, use the value or copy stored in
2272 CONST_AND_COPIES. */
2273 val = SSA_NAME_VALUE (op);
2274 if (val && val != op)
2276 /* Do not replace hard register operands in asm statements. */
2277 if (gimple_code (stmt) == GIMPLE_ASM
2278 && !may_propagate_copy_into_asm (op))
2279 return;
2281 /* Certain operands are not allowed to be copy propagated due
2282 to their interaction with exception handling and some GCC
2283 extensions. */
2284 if (!may_propagate_copy (op, val))
2285 return;
2287 /* Do not propagate copies into simple IV increment statements.
2288 See PR23821 for how this can disturb IV analysis. */
2289 if (TREE_CODE (val) != INTEGER_CST
2290 && simple_iv_increment_p (stmt))
2291 return;
2293 /* Dump details. */
2294 if (dump_file && (dump_flags & TDF_DETAILS))
2296 fprintf (dump_file, " Replaced '");
2297 print_generic_expr (dump_file, op, dump_flags);
2298 fprintf (dump_file, "' with %s '",
2299 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2300 print_generic_expr (dump_file, val, dump_flags);
2301 fprintf (dump_file, "'\n");
2304 if (TREE_CODE (val) != SSA_NAME)
2305 opt_stats.num_const_prop++;
2306 else
2307 opt_stats.num_copy_prop++;
2309 propagate_value (op_p, val);
2311 /* And note that we modified this statement. This is now
2312 safe, even if we changed virtual operands since we will
2313 rescan the statement and rewrite its operands again. */
2314 gimple_set_modified (stmt, true);
2318 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2319 known value for that SSA_NAME (or NULL if no value is known).
2321 Propagate values from CONST_AND_COPIES into the uses, vuses and
2322 vdef_ops of STMT. */
2324 static void
2325 cprop_into_stmt (gimple stmt)
2327 use_operand_p op_p;
2328 ssa_op_iter iter;
2330 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2331 cprop_operand (stmt, op_p);
2334 /* Optimize the statement pointed to by iterator SI.
2336 We try to perform some simplistic global redundancy elimination and
2337 constant propagation:
2339 1- To detect global redundancy, we keep track of expressions that have
2340 been computed in this block and its dominators. If we find that the
2341 same expression is computed more than once, we eliminate repeated
2342 computations by using the target of the first one.
2344 2- Constant values and copy assignments. This is used to do very
2345 simplistic constant and copy propagation. When a constant or copy
2346 assignment is found, we map the value on the RHS of the assignment to
2347 the variable in the LHS in the CONST_AND_COPIES table. */
2349 static void
2350 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2352 gimple stmt, old_stmt;
2353 bool may_optimize_p;
2354 bool modified_p = false;
2356 old_stmt = stmt = gsi_stmt (si);
2358 if (dump_file && (dump_flags & TDF_DETAILS))
2360 fprintf (dump_file, "Optimizing statement ");
2361 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2364 if (gimple_code (stmt) == GIMPLE_COND)
2365 canonicalize_comparison (as_a <gcond *> (stmt));
2367 update_stmt_if_modified (stmt);
2368 opt_stats.num_stmts++;
2370 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2371 cprop_into_stmt (stmt);
2373 /* If the statement has been modified with constant replacements,
2374 fold its RHS before checking for redundant computations. */
2375 if (gimple_modified_p (stmt))
2377 tree rhs = NULL;
2379 /* Try to fold the statement making sure that STMT is kept
2380 up to date. */
2381 if (fold_stmt (&si))
2383 stmt = gsi_stmt (si);
2384 gimple_set_modified (stmt, true);
2386 if (dump_file && (dump_flags & TDF_DETAILS))
2388 fprintf (dump_file, " Folded to: ");
2389 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2393 /* We only need to consider cases that can yield a gimple operand. */
2394 if (gimple_assign_single_p (stmt))
2395 rhs = gimple_assign_rhs1 (stmt);
2396 else if (gimple_code (stmt) == GIMPLE_GOTO)
2397 rhs = gimple_goto_dest (stmt);
2398 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2399 /* This should never be an ADDR_EXPR. */
2400 rhs = gimple_switch_index (swtch_stmt);
2402 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2403 recompute_tree_invariant_for_addr_expr (rhs);
2405 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2406 even if fold_stmt updated the stmt already and thus cleared
2407 gimple_modified_p flag on it. */
2408 modified_p = true;
2411 /* Check for redundant computations. Do this optimization only
2412 for assignments that have no volatile ops and conditionals. */
2413 may_optimize_p = (!gimple_has_side_effects (stmt)
2414 && (is_gimple_assign (stmt)
2415 || (is_gimple_call (stmt)
2416 && gimple_call_lhs (stmt) != NULL_TREE)
2417 || gimple_code (stmt) == GIMPLE_COND
2418 || gimple_code (stmt) == GIMPLE_SWITCH));
2420 if (may_optimize_p)
2422 if (gimple_code (stmt) == GIMPLE_CALL)
2424 /* Resolve __builtin_constant_p. If it hasn't been
2425 folded to integer_one_node by now, it's fairly
2426 certain that the value simply isn't constant. */
2427 tree callee = gimple_call_fndecl (stmt);
2428 if (callee
2429 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2430 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2432 propagate_tree_value_into_stmt (&si, integer_zero_node);
2433 stmt = gsi_stmt (si);
2437 update_stmt_if_modified (stmt);
2438 eliminate_redundant_computations (&si);
2439 stmt = gsi_stmt (si);
2441 /* Perform simple redundant store elimination. */
2442 if (gimple_assign_single_p (stmt)
2443 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2445 tree lhs = gimple_assign_lhs (stmt);
2446 tree rhs = gimple_assign_rhs1 (stmt);
2447 tree cached_lhs;
2448 gassign *new_stmt;
2449 if (TREE_CODE (rhs) == SSA_NAME)
2451 tree tem = SSA_NAME_VALUE (rhs);
2452 if (tem)
2453 rhs = tem;
2455 /* Build a new statement with the RHS and LHS exchanged. */
2456 if (TREE_CODE (rhs) == SSA_NAME)
2458 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2459 new_stmt = gimple_build_assign (rhs, lhs);
2460 SSA_NAME_DEF_STMT (rhs) = defstmt;
2462 else
2463 new_stmt = gimple_build_assign (rhs, lhs);
2464 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2465 cached_lhs = lookup_avail_expr (new_stmt, false);
2466 if (cached_lhs
2467 && rhs == cached_lhs)
2469 basic_block bb = gimple_bb (stmt);
2470 unlink_stmt_vdef (stmt);
2471 if (gsi_remove (&si, true))
2473 bitmap_set_bit (need_eh_cleanup, bb->index);
2474 if (dump_file && (dump_flags & TDF_DETAILS))
2475 fprintf (dump_file, " Flagged to clear EH edges.\n");
2477 release_defs (stmt);
2478 return;
2483 /* Record any additional equivalences created by this statement. */
2484 if (is_gimple_assign (stmt))
2485 record_equivalences_from_stmt (stmt, may_optimize_p);
2487 /* If STMT is a COND_EXPR and it was modified, then we may know
2488 where it goes. If that is the case, then mark the CFG as altered.
2490 This will cause us to later call remove_unreachable_blocks and
2491 cleanup_tree_cfg when it is safe to do so. It is not safe to
2492 clean things up here since removal of edges and such can trigger
2493 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2494 the manager.
2496 That's all fine and good, except that once SSA_NAMEs are released
2497 to the manager, we must not call create_ssa_name until all references
2498 to released SSA_NAMEs have been eliminated.
2500 All references to the deleted SSA_NAMEs can not be eliminated until
2501 we remove unreachable blocks.
2503 We can not remove unreachable blocks until after we have completed
2504 any queued jump threading.
2506 We can not complete any queued jump threads until we have taken
2507 appropriate variables out of SSA form. Taking variables out of
2508 SSA form can call create_ssa_name and thus we lose.
2510 Ultimately I suspect we're going to need to change the interface
2511 into the SSA_NAME manager. */
2512 if (gimple_modified_p (stmt) || modified_p)
2514 tree val = NULL;
2516 update_stmt_if_modified (stmt);
2518 if (gimple_code (stmt) == GIMPLE_COND)
2519 val = fold_binary_loc (gimple_location (stmt),
2520 gimple_cond_code (stmt), boolean_type_node,
2521 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2522 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2523 val = gimple_switch_index (swtch_stmt);
2525 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2526 cfg_altered = true;
2528 /* If we simplified a statement in such a way as to be shown that it
2529 cannot trap, update the eh information and the cfg to match. */
2530 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2532 bitmap_set_bit (need_eh_cleanup, bb->index);
2533 if (dump_file && (dump_flags & TDF_DETAILS))
2534 fprintf (dump_file, " Flagged to clear EH edges.\n");
2539 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2540 the desired memory state. */
2542 static void *
2543 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2545 tree vuse2 = (tree) data;
2546 if (vuse1 == vuse2)
2547 return data;
2549 /* This bounds the stmt walks we perform on reference lookups
2550 to O(1) instead of O(N) where N is the number of dominating
2551 stores leading to a candidate. We re-use the SCCVN param
2552 for this as it is basically the same complexity. */
2553 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2554 return (void *)-1;
2556 return NULL;
2559 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2560 If found, return its LHS. Otherwise insert STMT in the table and
2561 return NULL_TREE.
2563 Also, when an expression is first inserted in the table, it is also
2564 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2565 we finish processing this block and its children. */
2567 static tree
2568 lookup_avail_expr (gimple stmt, bool insert)
2570 expr_hash_elt **slot;
2571 tree lhs;
2572 tree temp;
2573 struct expr_hash_elt element;
2575 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2576 if (gimple_code (stmt) == GIMPLE_PHI)
2577 lhs = gimple_phi_result (stmt);
2578 else
2579 lhs = gimple_get_lhs (stmt);
2581 initialize_hash_element (stmt, lhs, &element);
2583 if (dump_file && (dump_flags & TDF_DETAILS))
2585 fprintf (dump_file, "LKUP ");
2586 print_expr_hash_elt (dump_file, &element);
2589 /* Don't bother remembering constant assignments and copy operations.
2590 Constants and copy operations are handled by the constant/copy propagator
2591 in optimize_stmt. */
2592 if (element.expr.kind == EXPR_SINGLE
2593 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2594 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2595 return NULL_TREE;
2597 /* Finally try to find the expression in the main expression hash table. */
2598 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2599 if (slot == NULL)
2601 free_expr_hash_elt_contents (&element);
2602 return NULL_TREE;
2604 else if (*slot == NULL)
2606 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2607 *element2 = element;
2608 element2->stamp = element2;
2609 *slot = element2;
2611 if (dump_file && (dump_flags & TDF_DETAILS))
2613 fprintf (dump_file, "2>>> ");
2614 print_expr_hash_elt (dump_file, element2);
2617 avail_exprs_stack.safe_push
2618 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2619 return NULL_TREE;
2622 /* If we found a redundant memory operation do an alias walk to
2623 check if we can re-use it. */
2624 if (gimple_vuse (stmt) != (*slot)->vop)
2626 tree vuse1 = (*slot)->vop;
2627 tree vuse2 = gimple_vuse (stmt);
2628 /* If we have a load of a register and a candidate in the
2629 hash with vuse1 then try to reach its stmt by walking
2630 up the virtual use-def chain using walk_non_aliased_vuses.
2631 But don't do this when removing expressions from the hash. */
2632 ao_ref ref;
2633 if (!(vuse1 && vuse2
2634 && gimple_assign_single_p (stmt)
2635 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2636 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2637 && walk_non_aliased_vuses (&ref, vuse2,
2638 vuse_eq, NULL, NULL, vuse1) != NULL))
2640 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2641 *element2 = element;
2642 element2->stamp = element2;
2644 /* Insert the expr into the hash by replacing the current
2645 entry and recording the value to restore in the
2646 aval_exprs_stack. */
2647 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2648 *slot = element2;
2649 if (dump_file && (dump_flags & TDF_DETAILS))
2651 fprintf (dump_file, "2>>> ");
2652 print_expr_hash_elt (dump_file, *slot);
2654 return NULL_TREE;
2658 free_expr_hash_elt_contents (&element);
2660 /* Extract the LHS of the assignment so that it can be used as the current
2661 definition of another variable. */
2662 lhs = (*slot)->lhs;
2664 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2665 use the value from the const_and_copies table. */
2666 if (TREE_CODE (lhs) == SSA_NAME)
2668 temp = SSA_NAME_VALUE (lhs);
2669 if (temp)
2670 lhs = temp;
2673 if (dump_file && (dump_flags & TDF_DETAILS))
2675 fprintf (dump_file, "FIND: ");
2676 print_generic_expr (dump_file, lhs, 0);
2677 fprintf (dump_file, "\n");
2680 return lhs;
2683 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2684 for expressions using the code of the expression and the SSA numbers of
2685 its operands. */
2687 static hashval_t
2688 avail_expr_hash (const void *p)
2690 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2691 inchash::hash hstate;
2693 inchash::add_hashable_expr (expr, hstate);
2695 return hstate.end ();
2698 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2699 up degenerate PHIs created by or exposed by jump threading. */
2701 /* Given a statement STMT, which is either a PHI node or an assignment,
2702 remove it from the IL. */
2704 static void
2705 remove_stmt_or_phi (gimple stmt)
2707 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2709 if (gimple_code (stmt) == GIMPLE_PHI)
2710 remove_phi_node (&gsi, true);
2711 else
2713 gsi_remove (&gsi, true);
2714 release_defs (stmt);
2718 /* Given a statement STMT, which is either a PHI node or an assignment,
2719 return the "rhs" of the node, in the case of a non-degenerate
2720 phi, NULL is returned. */
2722 static tree
2723 get_rhs_or_phi_arg (gimple stmt)
2725 if (gimple_code (stmt) == GIMPLE_PHI)
2726 return degenerate_phi_result (as_a <gphi *> (stmt));
2727 else if (gimple_assign_single_p (stmt))
2728 return gimple_assign_rhs1 (stmt);
2729 else
2730 gcc_unreachable ();
2734 /* Given a statement STMT, which is either a PHI node or an assignment,
2735 return the "lhs" of the node. */
2737 static tree
2738 get_lhs_or_phi_result (gimple stmt)
2740 if (gimple_code (stmt) == GIMPLE_PHI)
2741 return gimple_phi_result (stmt);
2742 else if (is_gimple_assign (stmt))
2743 return gimple_assign_lhs (stmt);
2744 else
2745 gcc_unreachable ();
2748 /* Propagate RHS into all uses of LHS (when possible).
2750 RHS and LHS are derived from STMT, which is passed in solely so
2751 that we can remove it if propagation is successful.
2753 When propagating into a PHI node or into a statement which turns
2754 into a trivial copy or constant initialization, set the
2755 appropriate bit in INTERESTING_NAMEs so that we will visit those
2756 nodes as well in an effort to pick up secondary optimization
2757 opportunities. */
2759 static void
2760 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2762 /* First verify that propagation is valid. */
2763 if (may_propagate_copy (lhs, rhs))
2765 use_operand_p use_p;
2766 imm_use_iterator iter;
2767 gimple use_stmt;
2768 bool all = true;
2770 /* Dump details. */
2771 if (dump_file && (dump_flags & TDF_DETAILS))
2773 fprintf (dump_file, " Replacing '");
2774 print_generic_expr (dump_file, lhs, dump_flags);
2775 fprintf (dump_file, "' with %s '",
2776 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2777 print_generic_expr (dump_file, rhs, dump_flags);
2778 fprintf (dump_file, "'\n");
2781 /* Walk over every use of LHS and try to replace the use with RHS.
2782 At this point the only reason why such a propagation would not
2783 be successful would be if the use occurs in an ASM_EXPR. */
2784 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2786 /* Leave debug stmts alone. If we succeed in propagating
2787 all non-debug uses, we'll drop the DEF, and propagation
2788 into debug stmts will occur then. */
2789 if (gimple_debug_bind_p (use_stmt))
2790 continue;
2792 /* It's not always safe to propagate into an ASM_EXPR. */
2793 if (gimple_code (use_stmt) == GIMPLE_ASM
2794 && ! may_propagate_copy_into_asm (lhs))
2796 all = false;
2797 continue;
2800 /* It's not ok to propagate into the definition stmt of RHS.
2801 <bb 9>:
2802 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2803 g_67.1_6 = prephitmp.12_36;
2804 goto <bb 9>;
2805 While this is strictly all dead code we do not want to
2806 deal with this here. */
2807 if (TREE_CODE (rhs) == SSA_NAME
2808 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2810 all = false;
2811 continue;
2814 /* Dump details. */
2815 if (dump_file && (dump_flags & TDF_DETAILS))
2817 fprintf (dump_file, " Original statement:");
2818 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2821 /* Propagate the RHS into this use of the LHS. */
2822 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2823 propagate_value (use_p, rhs);
2825 /* Special cases to avoid useless calls into the folding
2826 routines, operand scanning, etc.
2828 Propagation into a PHI may cause the PHI to become
2829 a degenerate, so mark the PHI as interesting. No other
2830 actions are necessary. */
2831 if (gimple_code (use_stmt) == GIMPLE_PHI)
2833 tree result;
2835 /* Dump details. */
2836 if (dump_file && (dump_flags & TDF_DETAILS))
2838 fprintf (dump_file, " Updated statement:");
2839 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2842 result = get_lhs_or_phi_result (use_stmt);
2843 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2844 continue;
2847 /* From this point onward we are propagating into a
2848 real statement. Folding may (or may not) be possible,
2849 we may expose new operands, expose dead EH edges,
2850 etc. */
2851 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2852 cannot fold a call that simplifies to a constant,
2853 because the GIMPLE_CALL must be replaced by a
2854 GIMPLE_ASSIGN, and there is no way to effect such a
2855 transformation in-place. We might want to consider
2856 using the more general fold_stmt here. */
2858 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2859 fold_stmt_inplace (&gsi);
2862 /* Sometimes propagation can expose new operands to the
2863 renamer. */
2864 update_stmt (use_stmt);
2866 /* Dump details. */
2867 if (dump_file && (dump_flags & TDF_DETAILS))
2869 fprintf (dump_file, " Updated statement:");
2870 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2873 /* If we replaced a variable index with a constant, then
2874 we would need to update the invariant flag for ADDR_EXPRs. */
2875 if (gimple_assign_single_p (use_stmt)
2876 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2877 recompute_tree_invariant_for_addr_expr
2878 (gimple_assign_rhs1 (use_stmt));
2880 /* If we cleaned up EH information from the statement,
2881 mark its containing block as needing EH cleanups. */
2882 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2884 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2885 if (dump_file && (dump_flags & TDF_DETAILS))
2886 fprintf (dump_file, " Flagged to clear EH edges.\n");
2889 /* Propagation may expose new trivial copy/constant propagation
2890 opportunities. */
2891 if (gimple_assign_single_p (use_stmt)
2892 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2893 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2894 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2896 tree result = get_lhs_or_phi_result (use_stmt);
2897 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2900 /* Propagation into these nodes may make certain edges in
2901 the CFG unexecutable. We want to identify them as PHI nodes
2902 at the destination of those unexecutable edges may become
2903 degenerates. */
2904 else if (gimple_code (use_stmt) == GIMPLE_COND
2905 || gimple_code (use_stmt) == GIMPLE_SWITCH
2906 || gimple_code (use_stmt) == GIMPLE_GOTO)
2908 tree val;
2910 if (gimple_code (use_stmt) == GIMPLE_COND)
2911 val = fold_binary_loc (gimple_location (use_stmt),
2912 gimple_cond_code (use_stmt),
2913 boolean_type_node,
2914 gimple_cond_lhs (use_stmt),
2915 gimple_cond_rhs (use_stmt));
2916 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2917 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2918 else
2919 val = gimple_goto_dest (use_stmt);
2921 if (val && is_gimple_min_invariant (val))
2923 basic_block bb = gimple_bb (use_stmt);
2924 edge te = find_taken_edge (bb, val);
2925 edge_iterator ei;
2926 edge e;
2927 gimple_stmt_iterator gsi;
2928 gphi_iterator psi;
2930 /* Remove all outgoing edges except TE. */
2931 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2933 if (e != te)
2935 /* Mark all the PHI nodes at the destination of
2936 the unexecutable edge as interesting. */
2937 for (psi = gsi_start_phis (e->dest);
2938 !gsi_end_p (psi);
2939 gsi_next (&psi))
2941 gphi *phi = psi.phi ();
2943 tree result = gimple_phi_result (phi);
2944 int version = SSA_NAME_VERSION (result);
2946 bitmap_set_bit (interesting_names, version);
2949 te->probability += e->probability;
2951 te->count += e->count;
2952 remove_edge (e);
2953 cfg_altered = true;
2955 else
2956 ei_next (&ei);
2959 gsi = gsi_last_bb (gimple_bb (use_stmt));
2960 gsi_remove (&gsi, true);
2962 /* And fixup the flags on the single remaining edge. */
2963 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2964 te->flags &= ~EDGE_ABNORMAL;
2965 te->flags |= EDGE_FALLTHRU;
2966 if (te->probability > REG_BR_PROB_BASE)
2967 te->probability = REG_BR_PROB_BASE;
2972 /* Ensure there is nothing else to do. */
2973 gcc_assert (!all || has_zero_uses (lhs));
2975 /* If we were able to propagate away all uses of LHS, then
2976 we can remove STMT. */
2977 if (all)
2978 remove_stmt_or_phi (stmt);
2982 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2983 a statement that is a trivial copy or constant initialization.
2985 Attempt to eliminate T by propagating its RHS into all uses of
2986 its LHS. This may in turn set new bits in INTERESTING_NAMES
2987 for nodes we want to revisit later.
2989 All exit paths should clear INTERESTING_NAMES for the result
2990 of STMT. */
2992 static void
2993 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2995 tree lhs = get_lhs_or_phi_result (stmt);
2996 tree rhs;
2997 int version = SSA_NAME_VERSION (lhs);
2999 /* If the LHS of this statement or PHI has no uses, then we can
3000 just eliminate it. This can occur if, for example, the PHI
3001 was created by block duplication due to threading and its only
3002 use was in the conditional at the end of the block which was
3003 deleted. */
3004 if (has_zero_uses (lhs))
3006 bitmap_clear_bit (interesting_names, version);
3007 remove_stmt_or_phi (stmt);
3008 return;
3011 /* Get the RHS of the assignment or PHI node if the PHI is a
3012 degenerate. */
3013 rhs = get_rhs_or_phi_arg (stmt);
3014 if (!rhs)
3016 bitmap_clear_bit (interesting_names, version);
3017 return;
3020 if (!virtual_operand_p (lhs))
3021 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3022 else
3024 gimple use_stmt;
3025 imm_use_iterator iter;
3026 use_operand_p use_p;
3027 /* For virtual operands we have to propagate into all uses as
3028 otherwise we will create overlapping life-ranges. */
3029 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3030 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3031 SET_USE (use_p, rhs);
3032 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3033 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3034 remove_stmt_or_phi (stmt);
3037 /* Note that STMT may well have been deleted by now, so do
3038 not access it, instead use the saved version # to clear
3039 T's entry in the worklist. */
3040 bitmap_clear_bit (interesting_names, version);
3043 /* The first phase in degenerate PHI elimination.
3045 Eliminate the degenerate PHIs in BB, then recurse on the
3046 dominator children of BB. */
3048 static void
3049 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3051 gphi_iterator gsi;
3052 basic_block son;
3054 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3056 gphi *phi = gsi.phi ();
3058 eliminate_const_or_copy (phi, interesting_names);
3061 /* Recurse into the dominator children of BB. */
3062 for (son = first_dom_son (CDI_DOMINATORS, bb);
3063 son;
3064 son = next_dom_son (CDI_DOMINATORS, son))
3065 eliminate_degenerate_phis_1 (son, interesting_names);
3069 /* A very simple pass to eliminate degenerate PHI nodes from the
3070 IL. This is meant to be fast enough to be able to be run several
3071 times in the optimization pipeline.
3073 Certain optimizations, particularly those which duplicate blocks
3074 or remove edges from the CFG can create or expose PHIs which are
3075 trivial copies or constant initializations.
3077 While we could pick up these optimizations in DOM or with the
3078 combination of copy-prop and CCP, those solutions are far too
3079 heavy-weight for our needs.
3081 This implementation has two phases so that we can efficiently
3082 eliminate the first order degenerate PHIs and second order
3083 degenerate PHIs.
3085 The first phase performs a dominator walk to identify and eliminate
3086 the vast majority of the degenerate PHIs. When a degenerate PHI
3087 is identified and eliminated any affected statements or PHIs
3088 are put on a worklist.
3090 The second phase eliminates degenerate PHIs and trivial copies
3091 or constant initializations using the worklist. This is how we
3092 pick up the secondary optimization opportunities with minimal
3093 cost. */
3095 namespace {
3097 const pass_data pass_data_phi_only_cprop =
3099 GIMPLE_PASS, /* type */
3100 "phicprop", /* name */
3101 OPTGROUP_NONE, /* optinfo_flags */
3102 TV_TREE_PHI_CPROP, /* tv_id */
3103 ( PROP_cfg | PROP_ssa ), /* properties_required */
3104 0, /* properties_provided */
3105 0, /* properties_destroyed */
3106 0, /* todo_flags_start */
3107 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3110 class pass_phi_only_cprop : public gimple_opt_pass
3112 public:
3113 pass_phi_only_cprop (gcc::context *ctxt)
3114 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3117 /* opt_pass methods: */
3118 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3119 virtual bool gate (function *) { return flag_tree_dom != 0; }
3120 virtual unsigned int execute (function *);
3122 }; // class pass_phi_only_cprop
3124 unsigned int
3125 pass_phi_only_cprop::execute (function *fun)
3127 bitmap interesting_names;
3128 bitmap interesting_names1;
3130 /* Bitmap of blocks which need EH information updated. We can not
3131 update it on-the-fly as doing so invalidates the dominator tree. */
3132 need_eh_cleanup = BITMAP_ALLOC (NULL);
3134 /* INTERESTING_NAMES is effectively our worklist, indexed by
3135 SSA_NAME_VERSION.
3137 A set bit indicates that the statement or PHI node which
3138 defines the SSA_NAME should be (re)examined to determine if
3139 it has become a degenerate PHI or trivial const/copy propagation
3140 opportunity.
3142 Experiments have show we generally get better compilation
3143 time behavior with bitmaps rather than sbitmaps. */
3144 interesting_names = BITMAP_ALLOC (NULL);
3145 interesting_names1 = BITMAP_ALLOC (NULL);
3147 calculate_dominance_info (CDI_DOMINATORS);
3148 cfg_altered = false;
3150 /* First phase. Eliminate degenerate PHIs via a dominator
3151 walk of the CFG.
3153 Experiments have indicated that we generally get better
3154 compile-time behavior by visiting blocks in the first
3155 phase in dominator order. Presumably this is because walking
3156 in dominator order leaves fewer PHIs for later examination
3157 by the worklist phase. */
3158 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3159 interesting_names);
3161 /* Second phase. Eliminate second order degenerate PHIs as well
3162 as trivial copies or constant initializations identified by
3163 the first phase or this phase. Basically we keep iterating
3164 until our set of INTERESTING_NAMEs is empty. */
3165 while (!bitmap_empty_p (interesting_names))
3167 unsigned int i;
3168 bitmap_iterator bi;
3170 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3171 changed during the loop. Copy it to another bitmap and
3172 use that. */
3173 bitmap_copy (interesting_names1, interesting_names);
3175 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3177 tree name = ssa_name (i);
3179 /* Ignore SSA_NAMEs that have been released because
3180 their defining statement was deleted (unreachable). */
3181 if (name)
3182 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3183 interesting_names);
3187 if (cfg_altered)
3189 free_dominance_info (CDI_DOMINATORS);
3190 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3191 loops_state_set (LOOPS_NEED_FIXUP);
3194 /* Propagation of const and copies may make some EH edges dead. Purge
3195 such edges from the CFG as needed. */
3196 if (!bitmap_empty_p (need_eh_cleanup))
3198 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3199 BITMAP_FREE (need_eh_cleanup);
3202 BITMAP_FREE (interesting_names);
3203 BITMAP_FREE (interesting_names1);
3204 return 0;
3207 } // anon namespace
3209 gimple_opt_pass *
3210 make_pass_phi_only_cprop (gcc::context *ctxt)
3212 return new pass_phi_only_cprop (ctxt);