Daily bump.
[official-gcc.git] / gcc / tree-ssa-dom.c
blobea49eb7c3c79a6cf9a4083c78758d71b77c29c6a
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "real.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "flags.h"
40 #include "tm_p.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "input.h"
44 #include "function.h"
45 #include "dominance.h"
46 #include "cfg.h"
47 #include "cfganal.h"
48 #include "basic-block.h"
49 #include "cfgloop.h"
50 #include "inchash.h"
51 #include "gimple-pretty-print.h"
52 #include "tree-ssa-alias.h"
53 #include "internal-fn.h"
54 #include "gimple-fold.h"
55 #include "tree-eh.h"
56 #include "gimple-expr.h"
57 #include "is-a.h"
58 #include "gimple.h"
59 #include "gimple-iterator.h"
60 #include "gimple-ssa.h"
61 #include "tree-cfg.h"
62 #include "tree-phinodes.h"
63 #include "ssa-iterators.h"
64 #include "stringpool.h"
65 #include "tree-ssanames.h"
66 #include "tree-into-ssa.h"
67 #include "domwalk.h"
68 #include "tree-pass.h"
69 #include "tree-ssa-propagate.h"
70 #include "tree-ssa-threadupdate.h"
71 #include "langhooks.h"
72 #include "params.h"
73 #include "tree-ssa-threadedge.h"
74 #include "tree-ssa-dom.h"
75 #include "inchash.h"
76 #include "gimplify.h"
77 #include "tree-cfgcleanup.h"
79 /* This file implements optimizations on the dominator tree. */
81 /* Representation of a "naked" right-hand-side expression, to be used
82 in recording available expressions in the expression hash table. */
84 enum expr_kind
86 EXPR_SINGLE,
87 EXPR_UNARY,
88 EXPR_BINARY,
89 EXPR_TERNARY,
90 EXPR_CALL,
91 EXPR_PHI
94 struct hashable_expr
96 tree type;
97 enum expr_kind kind;
98 union {
99 struct { tree rhs; } single;
100 struct { enum tree_code op; tree opnd; } unary;
101 struct { enum tree_code op; tree opnd0, opnd1; } binary;
102 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
103 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
104 struct { size_t nargs; tree *args; } phi;
105 } ops;
108 /* Structure for recording known values of a conditional expression
109 at the exits from its block. */
111 typedef struct cond_equivalence_s
113 struct hashable_expr cond;
114 tree value;
115 } cond_equivalence;
118 /* Structure for recording edge equivalences as well as any pending
119 edge redirections during the dominator optimizer.
121 Computing and storing the edge equivalences instead of creating
122 them on-demand can save significant amounts of time, particularly
123 for pathological cases involving switch statements.
125 These structures live for a single iteration of the dominator
126 optimizer in the edge's AUX field. At the end of an iteration we
127 free each of these structures and update the AUX field to point
128 to any requested redirection target (the code for updating the
129 CFG and SSA graph for edge redirection expects redirection edge
130 targets to be in the AUX field for each edge. */
132 struct edge_info
134 /* If this edge creates a simple equivalence, the LHS and RHS of
135 the equivalence will be stored here. */
136 tree lhs;
137 tree rhs;
139 /* Traversing an edge may also indicate one or more particular conditions
140 are true or false. */
141 vec<cond_equivalence> cond_equivalences;
144 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
145 expressions it enters into the hash table along with a marker entry
146 (null). When we finish processing the block, we pop off entries and
147 remove the expressions from the global hash table until we hit the
148 marker. */
149 typedef struct expr_hash_elt * expr_hash_elt_t;
151 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
153 /* Structure for entries in the expression hash table. */
155 struct expr_hash_elt
157 /* The value (lhs) of this expression. */
158 tree lhs;
160 /* The expression (rhs) we want to record. */
161 struct hashable_expr expr;
163 /* The virtual operand associated with the nearest dominating stmt
164 loading from or storing to expr. */
165 tree vop;
167 /* The hash value for RHS. */
168 hashval_t hash;
170 /* A unique stamp, typically the address of the hash
171 element itself, used in removing entries from the table. */
172 struct expr_hash_elt *stamp;
175 /* Hashtable helpers. */
177 static bool hashable_expr_equal_p (const struct hashable_expr *,
178 const struct hashable_expr *);
179 static void free_expr_hash_elt (void *);
181 struct expr_elt_hasher
183 typedef expr_hash_elt *value_type;
184 typedef expr_hash_elt *compare_type;
185 typedef int store_values_directly;
186 static inline hashval_t hash (const value_type &);
187 static inline bool equal (const value_type &, const compare_type &);
188 static inline void remove (value_type &);
191 inline hashval_t
192 expr_elt_hasher::hash (const value_type &p)
194 return p->hash;
197 inline bool
198 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
200 const struct hashable_expr *expr1 = &p1->expr;
201 const struct expr_hash_elt *stamp1 = p1->stamp;
202 const struct hashable_expr *expr2 = &p2->expr;
203 const struct expr_hash_elt *stamp2 = p2->stamp;
205 /* This case should apply only when removing entries from the table. */
206 if (stamp1 == stamp2)
207 return true;
209 if (p1->hash != p2->hash)
210 return false;
212 /* In case of a collision, both RHS have to be identical and have the
213 same VUSE operands. */
214 if (hashable_expr_equal_p (expr1, expr2)
215 && types_compatible_p (expr1->type, expr2->type))
216 return true;
218 return false;
221 /* Delete an expr_hash_elt and reclaim its storage. */
223 inline void
224 expr_elt_hasher::remove (value_type &element)
226 free_expr_hash_elt (element);
229 /* Hash table with expressions made available during the renaming process.
230 When an assignment of the form X_i = EXPR is found, the statement is
231 stored in this table. If the same expression EXPR is later found on the
232 RHS of another statement, it is replaced with X_i (thus performing
233 global redundancy elimination). Similarly as we pass through conditionals
234 we record the conditional itself as having either a true or false value
235 in this table. */
236 static hash_table<expr_elt_hasher> *avail_exprs;
238 /* Stack of dest,src pairs that need to be restored during finalization.
240 A NULL entry is used to mark the end of pairs which need to be
241 restored during finalization of this block. */
242 static vec<tree> const_and_copies_stack;
244 /* Track whether or not we have changed the control flow graph. */
245 static bool cfg_altered;
247 /* Bitmap of blocks that have had EH statements cleaned. We should
248 remove their dead edges eventually. */
249 static bitmap need_eh_cleanup;
250 static vec<gimple> need_noreturn_fixup;
252 /* Statistics for dominator optimizations. */
253 struct opt_stats_d
255 long num_stmts;
256 long num_exprs_considered;
257 long num_re;
258 long num_const_prop;
259 long num_copy_prop;
262 static struct opt_stats_d opt_stats;
264 /* Local functions. */
265 static void optimize_stmt (basic_block, gimple_stmt_iterator);
266 static tree lookup_avail_expr (gimple, bool);
267 static hashval_t avail_expr_hash (const void *);
268 static void htab_statistics (FILE *,
269 const hash_table<expr_elt_hasher> &);
270 static void record_cond (cond_equivalence *);
271 static void record_const_or_copy (tree, tree);
272 static void record_equality (tree, tree);
273 static void record_equivalences_from_phis (basic_block);
274 static void record_equivalences_from_incoming_edge (basic_block);
275 static void eliminate_redundant_computations (gimple_stmt_iterator *);
276 static void record_equivalences_from_stmt (gimple, int);
277 static void remove_local_expressions_from_table (void);
278 static void restore_vars_to_original_value (void);
279 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
282 /* Given a statement STMT, initialize the hash table element pointed to
283 by ELEMENT. */
285 static void
286 initialize_hash_element (gimple stmt, tree lhs,
287 struct expr_hash_elt *element)
289 enum gimple_code code = gimple_code (stmt);
290 struct hashable_expr *expr = &element->expr;
292 if (code == GIMPLE_ASSIGN)
294 enum tree_code subcode = gimple_assign_rhs_code (stmt);
296 switch (get_gimple_rhs_class (subcode))
298 case GIMPLE_SINGLE_RHS:
299 expr->kind = EXPR_SINGLE;
300 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
301 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
302 break;
303 case GIMPLE_UNARY_RHS:
304 expr->kind = EXPR_UNARY;
305 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
306 if (CONVERT_EXPR_CODE_P (subcode))
307 subcode = NOP_EXPR;
308 expr->ops.unary.op = subcode;
309 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
310 break;
311 case GIMPLE_BINARY_RHS:
312 expr->kind = EXPR_BINARY;
313 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
314 expr->ops.binary.op = subcode;
315 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
316 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
317 break;
318 case GIMPLE_TERNARY_RHS:
319 expr->kind = EXPR_TERNARY;
320 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
321 expr->ops.ternary.op = subcode;
322 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
323 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
324 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
325 break;
326 default:
327 gcc_unreachable ();
330 else if (code == GIMPLE_COND)
332 expr->type = boolean_type_node;
333 expr->kind = EXPR_BINARY;
334 expr->ops.binary.op = gimple_cond_code (stmt);
335 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
336 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
338 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
340 size_t nargs = gimple_call_num_args (call_stmt);
341 size_t i;
343 gcc_assert (gimple_call_lhs (call_stmt));
345 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
346 expr->kind = EXPR_CALL;
347 expr->ops.call.fn_from = call_stmt;
349 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
350 expr->ops.call.pure = true;
351 else
352 expr->ops.call.pure = false;
354 expr->ops.call.nargs = nargs;
355 expr->ops.call.args = XCNEWVEC (tree, nargs);
356 for (i = 0; i < nargs; i++)
357 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
359 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
361 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
362 expr->kind = EXPR_SINGLE;
363 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
365 else if (code == GIMPLE_GOTO)
367 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
368 expr->kind = EXPR_SINGLE;
369 expr->ops.single.rhs = gimple_goto_dest (stmt);
371 else if (code == GIMPLE_PHI)
373 size_t nargs = gimple_phi_num_args (stmt);
374 size_t i;
376 expr->type = TREE_TYPE (gimple_phi_result (stmt));
377 expr->kind = EXPR_PHI;
378 expr->ops.phi.nargs = nargs;
379 expr->ops.phi.args = XCNEWVEC (tree, nargs);
381 for (i = 0; i < nargs; i++)
382 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
384 else
385 gcc_unreachable ();
387 element->lhs = lhs;
388 element->vop = gimple_vuse (stmt);
389 element->hash = avail_expr_hash (element);
390 element->stamp = element;
393 /* Given a conditional expression COND as a tree, initialize
394 a hashable_expr expression EXPR. The conditional must be a
395 comparison or logical negation. A constant or a variable is
396 not permitted. */
398 static void
399 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
401 expr->type = boolean_type_node;
403 if (COMPARISON_CLASS_P (cond))
405 expr->kind = EXPR_BINARY;
406 expr->ops.binary.op = TREE_CODE (cond);
407 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
408 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
410 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
412 expr->kind = EXPR_UNARY;
413 expr->ops.unary.op = TRUTH_NOT_EXPR;
414 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
416 else
417 gcc_unreachable ();
420 /* Given a hashable_expr expression EXPR and an LHS,
421 initialize the hash table element pointed to by ELEMENT. */
423 static void
424 initialize_hash_element_from_expr (struct hashable_expr *expr,
425 tree lhs,
426 struct expr_hash_elt *element)
428 element->expr = *expr;
429 element->lhs = lhs;
430 element->vop = NULL_TREE;
431 element->hash = avail_expr_hash (element);
432 element->stamp = element;
435 /* Compare two hashable_expr structures for equivalence.
436 They are considered equivalent when the the expressions
437 they denote must necessarily be equal. The logic is intended
438 to follow that of operand_equal_p in fold-const.c */
440 static bool
441 hashable_expr_equal_p (const struct hashable_expr *expr0,
442 const struct hashable_expr *expr1)
444 tree type0 = expr0->type;
445 tree type1 = expr1->type;
447 /* If either type is NULL, there is nothing to check. */
448 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
449 return false;
451 /* If both types don't have the same signedness, precision, and mode,
452 then we can't consider them equal. */
453 if (type0 != type1
454 && (TREE_CODE (type0) == ERROR_MARK
455 || TREE_CODE (type1) == ERROR_MARK
456 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
457 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
458 || TYPE_MODE (type0) != TYPE_MODE (type1)))
459 return false;
461 if (expr0->kind != expr1->kind)
462 return false;
464 switch (expr0->kind)
466 case EXPR_SINGLE:
467 return operand_equal_p (expr0->ops.single.rhs,
468 expr1->ops.single.rhs, 0);
470 case EXPR_UNARY:
471 if (expr0->ops.unary.op != expr1->ops.unary.op)
472 return false;
474 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
475 || expr0->ops.unary.op == NON_LVALUE_EXPR)
476 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
477 return false;
479 return operand_equal_p (expr0->ops.unary.opnd,
480 expr1->ops.unary.opnd, 0);
482 case EXPR_BINARY:
483 if (expr0->ops.binary.op != expr1->ops.binary.op)
484 return false;
486 if (operand_equal_p (expr0->ops.binary.opnd0,
487 expr1->ops.binary.opnd0, 0)
488 && operand_equal_p (expr0->ops.binary.opnd1,
489 expr1->ops.binary.opnd1, 0))
490 return true;
492 /* For commutative ops, allow the other order. */
493 return (commutative_tree_code (expr0->ops.binary.op)
494 && operand_equal_p (expr0->ops.binary.opnd0,
495 expr1->ops.binary.opnd1, 0)
496 && operand_equal_p (expr0->ops.binary.opnd1,
497 expr1->ops.binary.opnd0, 0));
499 case EXPR_TERNARY:
500 if (expr0->ops.ternary.op != expr1->ops.ternary.op
501 || !operand_equal_p (expr0->ops.ternary.opnd2,
502 expr1->ops.ternary.opnd2, 0))
503 return false;
505 if (operand_equal_p (expr0->ops.ternary.opnd0,
506 expr1->ops.ternary.opnd0, 0)
507 && operand_equal_p (expr0->ops.ternary.opnd1,
508 expr1->ops.ternary.opnd1, 0))
509 return true;
511 /* For commutative ops, allow the other order. */
512 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
513 && operand_equal_p (expr0->ops.ternary.opnd0,
514 expr1->ops.ternary.opnd1, 0)
515 && operand_equal_p (expr0->ops.ternary.opnd1,
516 expr1->ops.ternary.opnd0, 0));
518 case EXPR_CALL:
520 size_t i;
522 /* If the calls are to different functions, then they
523 clearly cannot be equal. */
524 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
525 expr1->ops.call.fn_from))
526 return false;
528 if (! expr0->ops.call.pure)
529 return false;
531 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
532 return false;
534 for (i = 0; i < expr0->ops.call.nargs; i++)
535 if (! operand_equal_p (expr0->ops.call.args[i],
536 expr1->ops.call.args[i], 0))
537 return false;
539 if (stmt_could_throw_p (expr0->ops.call.fn_from))
541 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
542 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
543 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
544 return false;
547 return true;
550 case EXPR_PHI:
552 size_t i;
554 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
555 return false;
557 for (i = 0; i < expr0->ops.phi.nargs; i++)
558 if (! operand_equal_p (expr0->ops.phi.args[i],
559 expr1->ops.phi.args[i], 0))
560 return false;
562 return true;
565 default:
566 gcc_unreachable ();
570 /* Generate a hash value for a pair of expressions. This can be used
571 iteratively by passing a previous result in HSTATE.
573 The same hash value is always returned for a given pair of expressions,
574 regardless of the order in which they are presented. This is useful in
575 hashing the operands of commutative functions. */
577 namespace inchash
580 static void
581 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
583 hash one, two;
585 inchash::add_expr (t1, one);
586 inchash::add_expr (t2, two);
587 hstate.add_commutative (one, two);
590 /* Compute a hash value for a hashable_expr value EXPR and a
591 previously accumulated hash value VAL. If two hashable_expr
592 values compare equal with hashable_expr_equal_p, they must
593 hash to the same value, given an identical value of VAL.
594 The logic is intended to follow inchash::add_expr in tree.c. */
596 static void
597 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
599 switch (expr->kind)
601 case EXPR_SINGLE:
602 inchash::add_expr (expr->ops.single.rhs, hstate);
603 break;
605 case EXPR_UNARY:
606 hstate.add_object (expr->ops.unary.op);
608 /* Make sure to include signedness in the hash computation.
609 Don't hash the type, that can lead to having nodes which
610 compare equal according to operand_equal_p, but which
611 have different hash codes. */
612 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
613 || expr->ops.unary.op == NON_LVALUE_EXPR)
614 hstate.add_int (TYPE_UNSIGNED (expr->type));
616 inchash::add_expr (expr->ops.unary.opnd, hstate);
617 break;
619 case EXPR_BINARY:
620 hstate.add_object (expr->ops.binary.op);
621 if (commutative_tree_code (expr->ops.binary.op))
622 inchash::add_expr_commutative (expr->ops.binary.opnd0,
623 expr->ops.binary.opnd1, hstate);
624 else
626 inchash::add_expr (expr->ops.binary.opnd0, hstate);
627 inchash::add_expr (expr->ops.binary.opnd1, hstate);
629 break;
631 case EXPR_TERNARY:
632 hstate.add_object (expr->ops.ternary.op);
633 if (commutative_ternary_tree_code (expr->ops.ternary.op))
634 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
635 expr->ops.ternary.opnd1, hstate);
636 else
638 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
639 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
641 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
642 break;
644 case EXPR_CALL:
646 size_t i;
647 enum tree_code code = CALL_EXPR;
648 gcall *fn_from;
650 hstate.add_object (code);
651 fn_from = expr->ops.call.fn_from;
652 if (gimple_call_internal_p (fn_from))
653 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
654 else
655 inchash::add_expr (gimple_call_fn (fn_from), hstate);
656 for (i = 0; i < expr->ops.call.nargs; i++)
657 inchash::add_expr (expr->ops.call.args[i], hstate);
659 break;
661 case EXPR_PHI:
663 size_t i;
665 for (i = 0; i < expr->ops.phi.nargs; i++)
666 inchash::add_expr (expr->ops.phi.args[i], hstate);
668 break;
670 default:
671 gcc_unreachable ();
677 /* Print a diagnostic dump of an expression hash table entry. */
679 static void
680 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
682 fprintf (stream, "STMT ");
684 if (element->lhs)
686 print_generic_expr (stream, element->lhs, 0);
687 fprintf (stream, " = ");
690 switch (element->expr.kind)
692 case EXPR_SINGLE:
693 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
694 break;
696 case EXPR_UNARY:
697 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
698 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
699 break;
701 case EXPR_BINARY:
702 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
703 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
704 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
705 break;
707 case EXPR_TERNARY:
708 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
709 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
710 fputs (", ", stream);
711 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
712 fputs (", ", stream);
713 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
714 fputs (">", stream);
715 break;
717 case EXPR_CALL:
719 size_t i;
720 size_t nargs = element->expr.ops.call.nargs;
721 gcall *fn_from;
723 fn_from = element->expr.ops.call.fn_from;
724 if (gimple_call_internal_p (fn_from))
725 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
726 stream);
727 else
728 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
729 fprintf (stream, " (");
730 for (i = 0; i < nargs; i++)
732 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
733 if (i + 1 < nargs)
734 fprintf (stream, ", ");
736 fprintf (stream, ")");
738 break;
740 case EXPR_PHI:
742 size_t i;
743 size_t nargs = element->expr.ops.phi.nargs;
745 fprintf (stream, "PHI <");
746 for (i = 0; i < nargs; i++)
748 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
749 if (i + 1 < nargs)
750 fprintf (stream, ", ");
752 fprintf (stream, ">");
754 break;
757 if (element->vop)
759 fprintf (stream, " with ");
760 print_generic_expr (stream, element->vop, 0);
763 fprintf (stream, "\n");
766 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
768 static void
769 free_expr_hash_elt_contents (struct expr_hash_elt *element)
771 if (element->expr.kind == EXPR_CALL)
772 free (element->expr.ops.call.args);
773 else if (element->expr.kind == EXPR_PHI)
774 free (element->expr.ops.phi.args);
777 /* Delete an expr_hash_elt and reclaim its storage. */
779 static void
780 free_expr_hash_elt (void *elt)
782 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
783 free_expr_hash_elt_contents (element);
784 free (element);
787 /* Allocate an EDGE_INFO for edge E and attach it to E.
788 Return the new EDGE_INFO structure. */
790 static struct edge_info *
791 allocate_edge_info (edge e)
793 struct edge_info *edge_info;
795 edge_info = XCNEW (struct edge_info);
797 e->aux = edge_info;
798 return edge_info;
801 /* Free all EDGE_INFO structures associated with edges in the CFG.
802 If a particular edge can be threaded, copy the redirection
803 target from the EDGE_INFO structure into the edge's AUX field
804 as required by code to update the CFG and SSA graph for
805 jump threading. */
807 static void
808 free_all_edge_infos (void)
810 basic_block bb;
811 edge_iterator ei;
812 edge e;
814 FOR_EACH_BB_FN (bb, cfun)
816 FOR_EACH_EDGE (e, ei, bb->preds)
818 struct edge_info *edge_info = (struct edge_info *) e->aux;
820 if (edge_info)
822 edge_info->cond_equivalences.release ();
823 free (edge_info);
824 e->aux = NULL;
830 class dom_opt_dom_walker : public dom_walker
832 public:
833 dom_opt_dom_walker (cdi_direction direction)
834 : dom_walker (direction), m_dummy_cond (NULL) {}
836 virtual void before_dom_children (basic_block);
837 virtual void after_dom_children (basic_block);
839 private:
840 void thread_across_edge (edge);
842 gcond *m_dummy_cond;
845 /* Jump threading, redundancy elimination and const/copy propagation.
847 This pass may expose new symbols that need to be renamed into SSA. For
848 every new symbol exposed, its corresponding bit will be set in
849 VARS_TO_RENAME. */
851 namespace {
853 const pass_data pass_data_dominator =
855 GIMPLE_PASS, /* type */
856 "dom", /* name */
857 OPTGROUP_NONE, /* optinfo_flags */
858 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
859 ( PROP_cfg | PROP_ssa ), /* properties_required */
860 0, /* properties_provided */
861 0, /* properties_destroyed */
862 0, /* todo_flags_start */
863 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
866 class pass_dominator : public gimple_opt_pass
868 public:
869 pass_dominator (gcc::context *ctxt)
870 : gimple_opt_pass (pass_data_dominator, ctxt)
873 /* opt_pass methods: */
874 opt_pass * clone () { return new pass_dominator (m_ctxt); }
875 virtual bool gate (function *) { return flag_tree_dom != 0; }
876 virtual unsigned int execute (function *);
878 }; // class pass_dominator
880 unsigned int
881 pass_dominator::execute (function *fun)
883 memset (&opt_stats, 0, sizeof (opt_stats));
885 /* Create our hash tables. */
886 avail_exprs = new hash_table<expr_elt_hasher> (1024);
887 avail_exprs_stack.create (20);
888 const_and_copies_stack.create (20);
889 need_eh_cleanup = BITMAP_ALLOC (NULL);
890 need_noreturn_fixup.create (0);
892 calculate_dominance_info (CDI_DOMINATORS);
893 cfg_altered = false;
895 /* We need to know loop structures in order to avoid destroying them
896 in jump threading. Note that we still can e.g. thread through loop
897 headers to an exit edge, or through loop header to the loop body, assuming
898 that we update the loop info.
900 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
901 to several overly conservative bail-outs in jump threading, case
902 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
903 missing. We should improve jump threading in future then
904 LOOPS_HAVE_PREHEADERS won't be needed here. */
905 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
907 /* Initialize the value-handle array. */
908 threadedge_initialize_values ();
910 /* We need accurate information regarding back edges in the CFG
911 for jump threading; this may include back edges that are not part of
912 a single loop. */
913 mark_dfs_back_edges ();
915 /* Recursively walk the dominator tree optimizing statements. */
916 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
919 gimple_stmt_iterator gsi;
920 basic_block bb;
921 FOR_EACH_BB_FN (bb, fun)
923 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
924 update_stmt_if_modified (gsi_stmt (gsi));
928 /* If we exposed any new variables, go ahead and put them into
929 SSA form now, before we handle jump threading. This simplifies
930 interactions between rewriting of _DECL nodes into SSA form
931 and rewriting SSA_NAME nodes into SSA form after block
932 duplication and CFG manipulation. */
933 update_ssa (TODO_update_ssa);
935 free_all_edge_infos ();
937 /* Thread jumps, creating duplicate blocks as needed. */
938 cfg_altered |= thread_through_all_blocks (first_pass_instance);
940 if (cfg_altered)
941 free_dominance_info (CDI_DOMINATORS);
943 /* Removal of statements may make some EH edges dead. Purge
944 such edges from the CFG as needed. */
945 if (!bitmap_empty_p (need_eh_cleanup))
947 unsigned i;
948 bitmap_iterator bi;
950 /* Jump threading may have created forwarder blocks from blocks
951 needing EH cleanup; the new successor of these blocks, which
952 has inherited from the original block, needs the cleanup.
953 Don't clear bits in the bitmap, as that can break the bitmap
954 iterator. */
955 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
957 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
958 if (bb == NULL)
959 continue;
960 while (single_succ_p (bb)
961 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
962 bb = single_succ (bb);
963 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
964 continue;
965 if ((unsigned) bb->index != i)
966 bitmap_set_bit (need_eh_cleanup, bb->index);
969 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
970 bitmap_clear (need_eh_cleanup);
973 /* Fixup stmts that became noreturn calls. This may require splitting
974 blocks and thus isn't possible during the dominator walk or before
975 jump threading finished. Do this in reverse order so we don't
976 inadvertedly remove a stmt we want to fixup by visiting a dominating
977 now noreturn call first. */
978 while (!need_noreturn_fixup.is_empty ())
980 gimple stmt = need_noreturn_fixup.pop ();
981 if (dump_file && dump_flags & TDF_DETAILS)
983 fprintf (dump_file, "Fixing up noreturn call ");
984 print_gimple_stmt (dump_file, stmt, 0, 0);
985 fprintf (dump_file, "\n");
987 fixup_noreturn_call (stmt);
990 statistics_counter_event (fun, "Redundant expressions eliminated",
991 opt_stats.num_re);
992 statistics_counter_event (fun, "Constants propagated",
993 opt_stats.num_const_prop);
994 statistics_counter_event (fun, "Copies propagated",
995 opt_stats.num_copy_prop);
997 /* Debugging dumps. */
998 if (dump_file && (dump_flags & TDF_STATS))
999 dump_dominator_optimization_stats (dump_file);
1001 loop_optimizer_finalize ();
1003 /* Delete our main hashtable. */
1004 delete avail_exprs;
1005 avail_exprs = NULL;
1007 /* Free asserted bitmaps and stacks. */
1008 BITMAP_FREE (need_eh_cleanup);
1009 need_noreturn_fixup.release ();
1010 avail_exprs_stack.release ();
1011 const_and_copies_stack.release ();
1013 /* Free the value-handle array. */
1014 threadedge_finalize_values ();
1016 return 0;
1019 } // anon namespace
1021 gimple_opt_pass *
1022 make_pass_dominator (gcc::context *ctxt)
1024 return new pass_dominator (ctxt);
1028 /* Given a conditional statement CONDSTMT, convert the
1029 condition to a canonical form. */
1031 static void
1032 canonicalize_comparison (gcond *condstmt)
1034 tree op0;
1035 tree op1;
1036 enum tree_code code;
1038 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1040 op0 = gimple_cond_lhs (condstmt);
1041 op1 = gimple_cond_rhs (condstmt);
1043 code = gimple_cond_code (condstmt);
1045 /* If it would be profitable to swap the operands, then do so to
1046 canonicalize the statement, enabling better optimization.
1048 By placing canonicalization of such expressions here we
1049 transparently keep statements in canonical form, even
1050 when the statement is modified. */
1051 if (tree_swap_operands_p (op0, op1, false))
1053 /* For relationals we need to swap the operands
1054 and change the code. */
1055 if (code == LT_EXPR
1056 || code == GT_EXPR
1057 || code == LE_EXPR
1058 || code == GE_EXPR)
1060 code = swap_tree_comparison (code);
1062 gimple_cond_set_code (condstmt, code);
1063 gimple_cond_set_lhs (condstmt, op1);
1064 gimple_cond_set_rhs (condstmt, op0);
1066 update_stmt (condstmt);
1071 /* Initialize local stacks for this optimizer and record equivalences
1072 upon entry to BB. Equivalences can come from the edge traversed to
1073 reach BB or they may come from PHI nodes at the start of BB. */
1075 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1076 LIMIT entries left in LOCALs. */
1078 static void
1079 remove_local_expressions_from_table (void)
1081 /* Remove all the expressions made available in this block. */
1082 while (avail_exprs_stack.length () > 0)
1084 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1085 = avail_exprs_stack.pop ();
1086 expr_hash_elt **slot;
1088 if (victim.first == NULL)
1089 break;
1091 /* This must precede the actual removal from the hash table,
1092 as ELEMENT and the table entry may share a call argument
1093 vector which will be freed during removal. */
1094 if (dump_file && (dump_flags & TDF_DETAILS))
1096 fprintf (dump_file, "<<<< ");
1097 print_expr_hash_elt (dump_file, victim.first);
1100 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1101 gcc_assert (slot && *slot == victim.first);
1102 if (victim.second != NULL)
1104 free_expr_hash_elt (*slot);
1105 *slot = victim.second;
1107 else
1108 avail_exprs->clear_slot (slot);
1112 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1113 CONST_AND_COPIES to its original state, stopping when we hit a
1114 NULL marker. */
1116 static void
1117 restore_vars_to_original_value (void)
1119 while (const_and_copies_stack.length () > 0)
1121 tree prev_value, dest;
1123 dest = const_and_copies_stack.pop ();
1125 if (dest == NULL)
1126 break;
1128 if (dump_file && (dump_flags & TDF_DETAILS))
1130 fprintf (dump_file, "<<<< COPY ");
1131 print_generic_expr (dump_file, dest, 0);
1132 fprintf (dump_file, " = ");
1133 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1134 fprintf (dump_file, "\n");
1137 prev_value = const_and_copies_stack.pop ();
1138 set_ssa_name_value (dest, prev_value);
1142 /* A trivial wrapper so that we can present the generic jump
1143 threading code with a simple API for simplifying statements. */
1144 static tree
1145 simplify_stmt_for_jump_threading (gimple stmt,
1146 gimple within_stmt ATTRIBUTE_UNUSED)
1148 return lookup_avail_expr (stmt, false);
1151 /* Record into the equivalence tables any equivalences implied by
1152 traversing edge E (which are cached in E->aux).
1154 Callers are responsible for managing the unwinding markers. */
1155 static void
1156 record_temporary_equivalences (edge e)
1158 int i;
1159 struct edge_info *edge_info = (struct edge_info *) e->aux;
1161 /* If we have info associated with this edge, record it into
1162 our equivalence tables. */
1163 if (edge_info)
1165 cond_equivalence *eq;
1166 tree lhs = edge_info->lhs;
1167 tree rhs = edge_info->rhs;
1169 /* If we have a simple NAME = VALUE equivalence, record it. */
1170 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1171 record_const_or_copy (lhs, rhs);
1173 /* If we have 0 = COND or 1 = COND equivalences, record them
1174 into our expression hash tables. */
1175 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1176 record_cond (eq);
1180 /* Wrapper for common code to attempt to thread an edge. For example,
1181 it handles lazily building the dummy condition and the bookkeeping
1182 when jump threading is successful. */
1184 void
1185 dom_opt_dom_walker::thread_across_edge (edge e)
1187 if (! m_dummy_cond)
1188 m_dummy_cond =
1189 gimple_build_cond (NE_EXPR,
1190 integer_zero_node, integer_zero_node,
1191 NULL, NULL);
1193 /* Push a marker on both stacks so we can unwind the tables back to their
1194 current state. */
1195 avail_exprs_stack.safe_push
1196 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1197 const_and_copies_stack.safe_push (NULL_TREE);
1199 /* Traversing E may result in equivalences we can utilize. */
1200 record_temporary_equivalences (e);
1202 /* With all the edge equivalences in the tables, go ahead and attempt
1203 to thread through E->dest. */
1204 ::thread_across_edge (m_dummy_cond, e, false,
1205 &const_and_copies_stack,
1206 simplify_stmt_for_jump_threading);
1208 /* And restore the various tables to their state before
1209 we threaded this edge.
1211 XXX The code in tree-ssa-threadedge.c will restore the state of
1212 the const_and_copies table. We we just have to restore the expression
1213 table. */
1214 remove_local_expressions_from_table ();
1217 /* PHI nodes can create equivalences too.
1219 Ignoring any alternatives which are the same as the result, if
1220 all the alternatives are equal, then the PHI node creates an
1221 equivalence. */
1223 static void
1224 record_equivalences_from_phis (basic_block bb)
1226 gphi_iterator gsi;
1228 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1230 gphi *phi = gsi.phi ();
1232 tree lhs = gimple_phi_result (phi);
1233 tree rhs = NULL;
1234 size_t i;
1236 for (i = 0; i < gimple_phi_num_args (phi); i++)
1238 tree t = gimple_phi_arg_def (phi, i);
1240 /* Ignore alternatives which are the same as our LHS. Since
1241 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1242 can simply compare pointers. */
1243 if (lhs == t)
1244 continue;
1246 /* If we have not processed an alternative yet, then set
1247 RHS to this alternative. */
1248 if (rhs == NULL)
1249 rhs = t;
1250 /* If we have processed an alternative (stored in RHS), then
1251 see if it is equal to this one. If it isn't, then stop
1252 the search. */
1253 else if (! operand_equal_for_phi_arg_p (rhs, t))
1254 break;
1257 /* If we had no interesting alternatives, then all the RHS alternatives
1258 must have been the same as LHS. */
1259 if (!rhs)
1260 rhs = lhs;
1262 /* If we managed to iterate through each PHI alternative without
1263 breaking out of the loop, then we have a PHI which may create
1264 a useful equivalence. We do not need to record unwind data for
1265 this, since this is a true assignment and not an equivalence
1266 inferred from a comparison. All uses of this ssa name are dominated
1267 by this assignment, so unwinding just costs time and space. */
1268 if (i == gimple_phi_num_args (phi)
1269 && may_propagate_copy (lhs, rhs))
1270 set_ssa_name_value (lhs, rhs);
1274 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1275 return that edge. Otherwise return NULL. */
1276 static edge
1277 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1279 edge retval = NULL;
1280 edge e;
1281 edge_iterator ei;
1283 FOR_EACH_EDGE (e, ei, bb->preds)
1285 /* A loop back edge can be identified by the destination of
1286 the edge dominating the source of the edge. */
1287 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1288 continue;
1290 /* If we have already seen a non-loop edge, then we must have
1291 multiple incoming non-loop edges and thus we return NULL. */
1292 if (retval)
1293 return NULL;
1295 /* This is the first non-loop incoming edge we have found. Record
1296 it. */
1297 retval = e;
1300 return retval;
1303 /* Record any equivalences created by the incoming edge to BB. If BB
1304 has more than one incoming edge, then no equivalence is created. */
1306 static void
1307 record_equivalences_from_incoming_edge (basic_block bb)
1309 edge e;
1310 basic_block parent;
1311 struct edge_info *edge_info;
1313 /* If our parent block ended with a control statement, then we may be
1314 able to record some equivalences based on which outgoing edge from
1315 the parent was followed. */
1316 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1318 e = single_incoming_edge_ignoring_loop_edges (bb);
1320 /* If we had a single incoming edge from our parent block, then enter
1321 any data associated with the edge into our tables. */
1322 if (e && e->src == parent)
1324 unsigned int i;
1326 edge_info = (struct edge_info *) e->aux;
1328 if (edge_info)
1330 tree lhs = edge_info->lhs;
1331 tree rhs = edge_info->rhs;
1332 cond_equivalence *eq;
1334 if (lhs)
1335 record_equality (lhs, rhs);
1337 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1338 set via a widening type conversion, then we may be able to record
1339 additional equivalences. */
1340 if (lhs
1341 && TREE_CODE (lhs) == SSA_NAME
1342 && is_gimple_constant (rhs)
1343 && TREE_CODE (rhs) == INTEGER_CST)
1345 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1347 if (defstmt
1348 && is_gimple_assign (defstmt)
1349 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1351 tree old_rhs = gimple_assign_rhs1 (defstmt);
1353 /* If the conversion widens the original value and
1354 the constant is in the range of the type of OLD_RHS,
1355 then convert the constant and record the equivalence.
1357 Note that int_fits_type_p does not check the precision
1358 if the upper and lower bounds are OK. */
1359 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1360 && (TYPE_PRECISION (TREE_TYPE (lhs))
1361 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1362 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1364 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1365 record_equality (old_rhs, newval);
1370 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1371 record_cond (eq);
1376 /* Dump SSA statistics on FILE. */
1378 void
1379 dump_dominator_optimization_stats (FILE *file)
1381 fprintf (file, "Total number of statements: %6ld\n\n",
1382 opt_stats.num_stmts);
1383 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1384 opt_stats.num_exprs_considered);
1386 fprintf (file, "\nHash table statistics:\n");
1388 fprintf (file, " avail_exprs: ");
1389 htab_statistics (file, *avail_exprs);
1393 /* Dump SSA statistics on stderr. */
1395 DEBUG_FUNCTION void
1396 debug_dominator_optimization_stats (void)
1398 dump_dominator_optimization_stats (stderr);
1402 /* Dump statistics for the hash table HTAB. */
1404 static void
1405 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1407 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1408 (long) htab.size (),
1409 (long) htab.elements (),
1410 htab.collisions ());
1414 /* Enter condition equivalence into the expression hash table.
1415 This indicates that a conditional expression has a known
1416 boolean value. */
1418 static void
1419 record_cond (cond_equivalence *p)
1421 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1422 expr_hash_elt **slot;
1424 initialize_hash_element_from_expr (&p->cond, p->value, element);
1426 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1427 if (*slot == NULL)
1429 *slot = element;
1431 if (dump_file && (dump_flags & TDF_DETAILS))
1433 fprintf (dump_file, "1>>> ");
1434 print_expr_hash_elt (dump_file, element);
1437 avail_exprs_stack.safe_push
1438 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1440 else
1441 free_expr_hash_elt (element);
1444 /* Build a cond_equivalence record indicating that the comparison
1445 CODE holds between operands OP0 and OP1 and push it to **P. */
1447 static void
1448 build_and_record_new_cond (enum tree_code code,
1449 tree op0, tree op1,
1450 vec<cond_equivalence> *p)
1452 cond_equivalence c;
1453 struct hashable_expr *cond = &c.cond;
1455 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1457 cond->type = boolean_type_node;
1458 cond->kind = EXPR_BINARY;
1459 cond->ops.binary.op = code;
1460 cond->ops.binary.opnd0 = op0;
1461 cond->ops.binary.opnd1 = op1;
1463 c.value = boolean_true_node;
1464 p->safe_push (c);
1467 /* Record that COND is true and INVERTED is false into the edge information
1468 structure. Also record that any conditions dominated by COND are true
1469 as well.
1471 For example, if a < b is true, then a <= b must also be true. */
1473 static void
1474 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1476 tree op0, op1;
1477 cond_equivalence c;
1479 if (!COMPARISON_CLASS_P (cond))
1480 return;
1482 op0 = TREE_OPERAND (cond, 0);
1483 op1 = TREE_OPERAND (cond, 1);
1485 switch (TREE_CODE (cond))
1487 case LT_EXPR:
1488 case GT_EXPR:
1489 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1491 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1492 &edge_info->cond_equivalences);
1493 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1494 &edge_info->cond_equivalences);
1497 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1498 ? LE_EXPR : GE_EXPR),
1499 op0, op1, &edge_info->cond_equivalences);
1500 build_and_record_new_cond (NE_EXPR, op0, op1,
1501 &edge_info->cond_equivalences);
1502 break;
1504 case GE_EXPR:
1505 case LE_EXPR:
1506 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1508 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1509 &edge_info->cond_equivalences);
1511 break;
1513 case EQ_EXPR:
1514 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1516 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1517 &edge_info->cond_equivalences);
1519 build_and_record_new_cond (LE_EXPR, op0, op1,
1520 &edge_info->cond_equivalences);
1521 build_and_record_new_cond (GE_EXPR, op0, op1,
1522 &edge_info->cond_equivalences);
1523 break;
1525 case UNORDERED_EXPR:
1526 build_and_record_new_cond (NE_EXPR, op0, op1,
1527 &edge_info->cond_equivalences);
1528 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1529 &edge_info->cond_equivalences);
1530 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1531 &edge_info->cond_equivalences);
1532 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1533 &edge_info->cond_equivalences);
1534 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1535 &edge_info->cond_equivalences);
1536 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1537 &edge_info->cond_equivalences);
1538 break;
1540 case UNLT_EXPR:
1541 case UNGT_EXPR:
1542 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1543 ? UNLE_EXPR : UNGE_EXPR),
1544 op0, op1, &edge_info->cond_equivalences);
1545 build_and_record_new_cond (NE_EXPR, op0, op1,
1546 &edge_info->cond_equivalences);
1547 break;
1549 case UNEQ_EXPR:
1550 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1551 &edge_info->cond_equivalences);
1552 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1553 &edge_info->cond_equivalences);
1554 break;
1556 case LTGT_EXPR:
1557 build_and_record_new_cond (NE_EXPR, op0, op1,
1558 &edge_info->cond_equivalences);
1559 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1560 &edge_info->cond_equivalences);
1561 break;
1563 default:
1564 break;
1567 /* Now store the original true and false conditions into the first
1568 two slots. */
1569 initialize_expr_from_cond (cond, &c.cond);
1570 c.value = boolean_true_node;
1571 edge_info->cond_equivalences.safe_push (c);
1573 /* It is possible for INVERTED to be the negation of a comparison,
1574 and not a valid RHS or GIMPLE_COND condition. This happens because
1575 invert_truthvalue may return such an expression when asked to invert
1576 a floating-point comparison. These comparisons are not assumed to
1577 obey the trichotomy law. */
1578 initialize_expr_from_cond (inverted, &c.cond);
1579 c.value = boolean_false_node;
1580 edge_info->cond_equivalences.safe_push (c);
1583 /* A helper function for record_const_or_copy and record_equality.
1584 Do the work of recording the value and undo info. */
1586 static void
1587 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1589 set_ssa_name_value (x, y);
1591 if (dump_file && (dump_flags & TDF_DETAILS))
1593 fprintf (dump_file, "0>>> COPY ");
1594 print_generic_expr (dump_file, x, 0);
1595 fprintf (dump_file, " = ");
1596 print_generic_expr (dump_file, y, 0);
1597 fprintf (dump_file, "\n");
1600 const_and_copies_stack.reserve (2);
1601 const_and_copies_stack.quick_push (prev_x);
1602 const_and_copies_stack.quick_push (x);
1605 /* Record that X is equal to Y in const_and_copies. Record undo
1606 information in the block-local vector. */
1608 static void
1609 record_const_or_copy (tree x, tree y)
1611 tree prev_x = SSA_NAME_VALUE (x);
1613 gcc_assert (TREE_CODE (x) == SSA_NAME);
1615 if (TREE_CODE (y) == SSA_NAME)
1617 tree tmp = SSA_NAME_VALUE (y);
1618 if (tmp)
1619 y = tmp;
1622 record_const_or_copy_1 (x, y, prev_x);
1625 /* Return the loop depth of the basic block of the defining statement of X.
1626 This number should not be treated as absolutely correct because the loop
1627 information may not be completely up-to-date when dom runs. However, it
1628 will be relatively correct, and as more passes are taught to keep loop info
1629 up to date, the result will become more and more accurate. */
1631 static int
1632 loop_depth_of_name (tree x)
1634 gimple defstmt;
1635 basic_block defbb;
1637 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1638 if (TREE_CODE (x) != SSA_NAME)
1639 return 0;
1641 /* Otherwise return the loop depth of the defining statement's bb.
1642 Note that there may not actually be a bb for this statement, if the
1643 ssa_name is live on entry. */
1644 defstmt = SSA_NAME_DEF_STMT (x);
1645 defbb = gimple_bb (defstmt);
1646 if (!defbb)
1647 return 0;
1649 return bb_loop_depth (defbb);
1652 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1653 This constrains the cases in which we may treat this as assignment. */
1655 static void
1656 record_equality (tree x, tree y)
1658 tree prev_x = NULL, prev_y = NULL;
1660 if (TREE_CODE (x) == SSA_NAME)
1661 prev_x = SSA_NAME_VALUE (x);
1662 if (TREE_CODE (y) == SSA_NAME)
1663 prev_y = SSA_NAME_VALUE (y);
1665 /* If one of the previous values is invariant, or invariant in more loops
1666 (by depth), then use that.
1667 Otherwise it doesn't matter which value we choose, just so
1668 long as we canonicalize on one value. */
1669 if (is_gimple_min_invariant (y))
1671 else if (is_gimple_min_invariant (x)
1672 /* ??? When threading over backedges the following is important
1673 for correctness. See PR61757. */
1674 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1675 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1676 else if (prev_x && is_gimple_min_invariant (prev_x))
1677 x = y, y = prev_x, prev_x = prev_y;
1678 else if (prev_y)
1679 y = prev_y;
1681 /* After the swapping, we must have one SSA_NAME. */
1682 if (TREE_CODE (x) != SSA_NAME)
1683 return;
1685 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1686 variable compared against zero. If we're honoring signed zeros,
1687 then we cannot record this value unless we know that the value is
1688 nonzero. */
1689 if (HONOR_SIGNED_ZEROS (x)
1690 && (TREE_CODE (y) != REAL_CST
1691 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1692 return;
1694 record_const_or_copy_1 (x, y, prev_x);
1697 /* Returns true when STMT is a simple iv increment. It detects the
1698 following situation:
1700 i_1 = phi (..., i_2)
1701 i_2 = i_1 +/- ... */
1703 bool
1704 simple_iv_increment_p (gimple stmt)
1706 enum tree_code code;
1707 tree lhs, preinc;
1708 gimple phi;
1709 size_t i;
1711 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1712 return false;
1714 lhs = gimple_assign_lhs (stmt);
1715 if (TREE_CODE (lhs) != SSA_NAME)
1716 return false;
1718 code = gimple_assign_rhs_code (stmt);
1719 if (code != PLUS_EXPR
1720 && code != MINUS_EXPR
1721 && code != POINTER_PLUS_EXPR)
1722 return false;
1724 preinc = gimple_assign_rhs1 (stmt);
1725 if (TREE_CODE (preinc) != SSA_NAME)
1726 return false;
1728 phi = SSA_NAME_DEF_STMT (preinc);
1729 if (gimple_code (phi) != GIMPLE_PHI)
1730 return false;
1732 for (i = 0; i < gimple_phi_num_args (phi); i++)
1733 if (gimple_phi_arg_def (phi, i) == lhs)
1734 return true;
1736 return false;
1739 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1740 known value for that SSA_NAME (or NULL if no value is known).
1742 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1743 successors of BB. */
1745 static void
1746 cprop_into_successor_phis (basic_block bb)
1748 edge e;
1749 edge_iterator ei;
1751 FOR_EACH_EDGE (e, ei, bb->succs)
1753 int indx;
1754 gphi_iterator gsi;
1756 /* If this is an abnormal edge, then we do not want to copy propagate
1757 into the PHI alternative associated with this edge. */
1758 if (e->flags & EDGE_ABNORMAL)
1759 continue;
1761 gsi = gsi_start_phis (e->dest);
1762 if (gsi_end_p (gsi))
1763 continue;
1765 /* We may have an equivalence associated with this edge. While
1766 we can not propagate it into non-dominated blocks, we can
1767 propagate them into PHIs in non-dominated blocks. */
1769 /* Push the unwind marker so we can reset the const and copies
1770 table back to its original state after processing this edge. */
1771 const_and_copies_stack.safe_push (NULL_TREE);
1773 /* Extract and record any simple NAME = VALUE equivalences.
1775 Don't bother with [01] = COND equivalences, they're not useful
1776 here. */
1777 struct edge_info *edge_info = (struct edge_info *) e->aux;
1778 if (edge_info)
1780 tree lhs = edge_info->lhs;
1781 tree rhs = edge_info->rhs;
1783 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1784 record_const_or_copy (lhs, rhs);
1787 indx = e->dest_idx;
1788 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1790 tree new_val;
1791 use_operand_p orig_p;
1792 tree orig_val;
1793 gphi *phi = gsi.phi ();
1795 /* The alternative may be associated with a constant, so verify
1796 it is an SSA_NAME before doing anything with it. */
1797 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1798 orig_val = get_use_from_ptr (orig_p);
1799 if (TREE_CODE (orig_val) != SSA_NAME)
1800 continue;
1802 /* If we have *ORIG_P in our constant/copy table, then replace
1803 ORIG_P with its value in our constant/copy table. */
1804 new_val = SSA_NAME_VALUE (orig_val);
1805 if (new_val
1806 && new_val != orig_val
1807 && (TREE_CODE (new_val) == SSA_NAME
1808 || is_gimple_min_invariant (new_val))
1809 && may_propagate_copy (orig_val, new_val))
1810 propagate_value (orig_p, new_val);
1813 restore_vars_to_original_value ();
1817 /* We have finished optimizing BB, record any information implied by
1818 taking a specific outgoing edge from BB. */
1820 static void
1821 record_edge_info (basic_block bb)
1823 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1824 struct edge_info *edge_info;
1826 if (! gsi_end_p (gsi))
1828 gimple stmt = gsi_stmt (gsi);
1829 location_t loc = gimple_location (stmt);
1831 if (gimple_code (stmt) == GIMPLE_SWITCH)
1833 gswitch *switch_stmt = as_a <gswitch *> (stmt);
1834 tree index = gimple_switch_index (switch_stmt);
1836 if (TREE_CODE (index) == SSA_NAME)
1838 int i;
1839 int n_labels = gimple_switch_num_labels (switch_stmt);
1840 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1841 edge e;
1842 edge_iterator ei;
1844 for (i = 0; i < n_labels; i++)
1846 tree label = gimple_switch_label (switch_stmt, i);
1847 basic_block target_bb = label_to_block (CASE_LABEL (label));
1848 if (CASE_HIGH (label)
1849 || !CASE_LOW (label)
1850 || info[target_bb->index])
1851 info[target_bb->index] = error_mark_node;
1852 else
1853 info[target_bb->index] = label;
1856 FOR_EACH_EDGE (e, ei, bb->succs)
1858 basic_block target_bb = e->dest;
1859 tree label = info[target_bb->index];
1861 if (label != NULL && label != error_mark_node)
1863 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1864 CASE_LOW (label));
1865 edge_info = allocate_edge_info (e);
1866 edge_info->lhs = index;
1867 edge_info->rhs = x;
1870 free (info);
1874 /* A COND_EXPR may create equivalences too. */
1875 if (gimple_code (stmt) == GIMPLE_COND)
1877 edge true_edge;
1878 edge false_edge;
1880 tree op0 = gimple_cond_lhs (stmt);
1881 tree op1 = gimple_cond_rhs (stmt);
1882 enum tree_code code = gimple_cond_code (stmt);
1884 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1886 /* Special case comparing booleans against a constant as we
1887 know the value of OP0 on both arms of the branch. i.e., we
1888 can record an equivalence for OP0 rather than COND. */
1889 if ((code == EQ_EXPR || code == NE_EXPR)
1890 && TREE_CODE (op0) == SSA_NAME
1891 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1892 && is_gimple_min_invariant (op1))
1894 if (code == EQ_EXPR)
1896 edge_info = allocate_edge_info (true_edge);
1897 edge_info->lhs = op0;
1898 edge_info->rhs = (integer_zerop (op1)
1899 ? boolean_false_node
1900 : boolean_true_node);
1902 edge_info = allocate_edge_info (false_edge);
1903 edge_info->lhs = op0;
1904 edge_info->rhs = (integer_zerop (op1)
1905 ? boolean_true_node
1906 : boolean_false_node);
1908 else
1910 edge_info = allocate_edge_info (true_edge);
1911 edge_info->lhs = op0;
1912 edge_info->rhs = (integer_zerop (op1)
1913 ? boolean_true_node
1914 : boolean_false_node);
1916 edge_info = allocate_edge_info (false_edge);
1917 edge_info->lhs = op0;
1918 edge_info->rhs = (integer_zerop (op1)
1919 ? boolean_false_node
1920 : boolean_true_node);
1923 else if (is_gimple_min_invariant (op0)
1924 && (TREE_CODE (op1) == SSA_NAME
1925 || is_gimple_min_invariant (op1)))
1927 tree cond = build2 (code, boolean_type_node, op0, op1);
1928 tree inverted = invert_truthvalue_loc (loc, cond);
1929 bool can_infer_simple_equiv
1930 = !(HONOR_SIGNED_ZEROS (op0)
1931 && real_zerop (op0));
1932 struct edge_info *edge_info;
1934 edge_info = allocate_edge_info (true_edge);
1935 record_conditions (edge_info, cond, inverted);
1937 if (can_infer_simple_equiv && code == EQ_EXPR)
1939 edge_info->lhs = op1;
1940 edge_info->rhs = op0;
1943 edge_info = allocate_edge_info (false_edge);
1944 record_conditions (edge_info, inverted, cond);
1946 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1948 edge_info->lhs = op1;
1949 edge_info->rhs = op0;
1953 else if (TREE_CODE (op0) == SSA_NAME
1954 && (TREE_CODE (op1) == SSA_NAME
1955 || is_gimple_min_invariant (op1)))
1957 tree cond = build2 (code, boolean_type_node, op0, op1);
1958 tree inverted = invert_truthvalue_loc (loc, cond);
1959 bool can_infer_simple_equiv
1960 = !(HONOR_SIGNED_ZEROS (op1)
1961 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1962 struct edge_info *edge_info;
1964 edge_info = allocate_edge_info (true_edge);
1965 record_conditions (edge_info, cond, inverted);
1967 if (can_infer_simple_equiv && code == EQ_EXPR)
1969 edge_info->lhs = op0;
1970 edge_info->rhs = op1;
1973 edge_info = allocate_edge_info (false_edge);
1974 record_conditions (edge_info, inverted, cond);
1976 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1978 edge_info->lhs = op0;
1979 edge_info->rhs = op1;
1984 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1988 void
1989 dom_opt_dom_walker::before_dom_children (basic_block bb)
1991 gimple_stmt_iterator gsi;
1993 if (dump_file && (dump_flags & TDF_DETAILS))
1994 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1996 /* Push a marker on the stacks of local information so that we know how
1997 far to unwind when we finalize this block. */
1998 avail_exprs_stack.safe_push
1999 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
2000 const_and_copies_stack.safe_push (NULL_TREE);
2002 record_equivalences_from_incoming_edge (bb);
2004 /* PHI nodes can create equivalences too. */
2005 record_equivalences_from_phis (bb);
2007 /* Create equivalences from redundant PHIs. PHIs are only truly
2008 redundant when they exist in the same block, so push another
2009 marker and unwind right afterwards. */
2010 avail_exprs_stack.safe_push
2011 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
2012 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2013 eliminate_redundant_computations (&gsi);
2014 remove_local_expressions_from_table ();
2016 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2017 optimize_stmt (bb, gsi);
2019 /* Now prepare to process dominated blocks. */
2020 record_edge_info (bb);
2021 cprop_into_successor_phis (bb);
2024 /* We have finished processing the dominator children of BB, perform
2025 any finalization actions in preparation for leaving this node in
2026 the dominator tree. */
2028 void
2029 dom_opt_dom_walker::after_dom_children (basic_block bb)
2031 gimple last;
2033 /* If we have an outgoing edge to a block with multiple incoming and
2034 outgoing edges, then we may be able to thread the edge, i.e., we
2035 may be able to statically determine which of the outgoing edges
2036 will be traversed when the incoming edge from BB is traversed. */
2037 if (single_succ_p (bb)
2038 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2039 && potentially_threadable_block (single_succ (bb)))
2041 thread_across_edge (single_succ_edge (bb));
2043 else if ((last = last_stmt (bb))
2044 && gimple_code (last) == GIMPLE_COND
2045 && EDGE_COUNT (bb->succs) == 2
2046 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2047 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2049 edge true_edge, false_edge;
2051 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2053 /* Only try to thread the edge if it reaches a target block with
2054 more than one predecessor and more than one successor. */
2055 if (potentially_threadable_block (true_edge->dest))
2056 thread_across_edge (true_edge);
2058 /* Similarly for the ELSE arm. */
2059 if (potentially_threadable_block (false_edge->dest))
2060 thread_across_edge (false_edge);
2064 /* These remove expressions local to BB from the tables. */
2065 remove_local_expressions_from_table ();
2066 restore_vars_to_original_value ();
2069 /* Search for redundant computations in STMT. If any are found, then
2070 replace them with the variable holding the result of the computation.
2072 If safe, record this expression into the available expression hash
2073 table. */
2075 static void
2076 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2078 tree expr_type;
2079 tree cached_lhs;
2080 tree def;
2081 bool insert = true;
2082 bool assigns_var_p = false;
2084 gimple stmt = gsi_stmt (*gsi);
2086 if (gimple_code (stmt) == GIMPLE_PHI)
2087 def = gimple_phi_result (stmt);
2088 else
2089 def = gimple_get_lhs (stmt);
2091 /* Certain expressions on the RHS can be optimized away, but can not
2092 themselves be entered into the hash tables. */
2093 if (! def
2094 || TREE_CODE (def) != SSA_NAME
2095 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2096 || gimple_vdef (stmt)
2097 /* Do not record equivalences for increments of ivs. This would create
2098 overlapping live ranges for a very questionable gain. */
2099 || simple_iv_increment_p (stmt))
2100 insert = false;
2102 /* Check if the expression has been computed before. */
2103 cached_lhs = lookup_avail_expr (stmt, insert);
2105 opt_stats.num_exprs_considered++;
2107 /* Get the type of the expression we are trying to optimize. */
2108 if (is_gimple_assign (stmt))
2110 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2111 assigns_var_p = true;
2113 else if (gimple_code (stmt) == GIMPLE_COND)
2114 expr_type = boolean_type_node;
2115 else if (is_gimple_call (stmt))
2117 gcc_assert (gimple_call_lhs (stmt));
2118 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2119 assigns_var_p = true;
2121 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2122 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2123 else if (gimple_code (stmt) == GIMPLE_PHI)
2124 /* We can't propagate into a phi, so the logic below doesn't apply.
2125 Instead record an equivalence between the cached LHS and the
2126 PHI result of this statement, provided they are in the same block.
2127 This should be sufficient to kill the redundant phi. */
2129 if (def && cached_lhs)
2130 record_const_or_copy (def, cached_lhs);
2131 return;
2133 else
2134 gcc_unreachable ();
2136 if (!cached_lhs)
2137 return;
2139 /* It is safe to ignore types here since we have already done
2140 type checking in the hashing and equality routines. In fact
2141 type checking here merely gets in the way of constant
2142 propagation. Also, make sure that it is safe to propagate
2143 CACHED_LHS into the expression in STMT. */
2144 if ((TREE_CODE (cached_lhs) != SSA_NAME
2145 && (assigns_var_p
2146 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2147 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2149 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2150 || is_gimple_min_invariant (cached_lhs));
2152 if (dump_file && (dump_flags & TDF_DETAILS))
2154 fprintf (dump_file, " Replaced redundant expr '");
2155 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2156 fprintf (dump_file, "' with '");
2157 print_generic_expr (dump_file, cached_lhs, dump_flags);
2158 fprintf (dump_file, "'\n");
2161 opt_stats.num_re++;
2163 if (assigns_var_p
2164 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2165 cached_lhs = fold_convert (expr_type, cached_lhs);
2167 propagate_tree_value_into_stmt (gsi, cached_lhs);
2169 /* Since it is always necessary to mark the result as modified,
2170 perhaps we should move this into propagate_tree_value_into_stmt
2171 itself. */
2172 gimple_set_modified (gsi_stmt (*gsi), true);
2176 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2177 the available expressions table or the const_and_copies table.
2178 Detect and record those equivalences. */
2179 /* We handle only very simple copy equivalences here. The heavy
2180 lifing is done by eliminate_redundant_computations. */
2182 static void
2183 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2185 tree lhs;
2186 enum tree_code lhs_code;
2188 gcc_assert (is_gimple_assign (stmt));
2190 lhs = gimple_assign_lhs (stmt);
2191 lhs_code = TREE_CODE (lhs);
2193 if (lhs_code == SSA_NAME
2194 && gimple_assign_single_p (stmt))
2196 tree rhs = gimple_assign_rhs1 (stmt);
2198 /* If the RHS of the assignment is a constant or another variable that
2199 may be propagated, register it in the CONST_AND_COPIES table. We
2200 do not need to record unwind data for this, since this is a true
2201 assignment and not an equivalence inferred from a comparison. All
2202 uses of this ssa name are dominated by this assignment, so unwinding
2203 just costs time and space. */
2204 if (may_optimize_p
2205 && (TREE_CODE (rhs) == SSA_NAME
2206 || is_gimple_min_invariant (rhs)))
2208 if (dump_file && (dump_flags & TDF_DETAILS))
2210 fprintf (dump_file, "==== ASGN ");
2211 print_generic_expr (dump_file, lhs, 0);
2212 fprintf (dump_file, " = ");
2213 print_generic_expr (dump_file, rhs, 0);
2214 fprintf (dump_file, "\n");
2217 set_ssa_name_value (lhs, rhs);
2221 /* Make sure we can propagate &x + CST. */
2222 if (lhs_code == SSA_NAME
2223 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2224 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2225 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2227 tree op0 = gimple_assign_rhs1 (stmt);
2228 tree op1 = gimple_assign_rhs2 (stmt);
2229 tree new_rhs
2230 = build_fold_addr_expr (fold_build2 (MEM_REF,
2231 TREE_TYPE (TREE_TYPE (op0)),
2232 unshare_expr (op0),
2233 fold_convert (ptr_type_node,
2234 op1)));
2235 if (dump_file && (dump_flags & TDF_DETAILS))
2237 fprintf (dump_file, "==== ASGN ");
2238 print_generic_expr (dump_file, lhs, 0);
2239 fprintf (dump_file, " = ");
2240 print_generic_expr (dump_file, new_rhs, 0);
2241 fprintf (dump_file, "\n");
2244 set_ssa_name_value (lhs, new_rhs);
2247 /* A memory store, even an aliased store, creates a useful
2248 equivalence. By exchanging the LHS and RHS, creating suitable
2249 vops and recording the result in the available expression table,
2250 we may be able to expose more redundant loads. */
2251 if (!gimple_has_volatile_ops (stmt)
2252 && gimple_references_memory_p (stmt)
2253 && gimple_assign_single_p (stmt)
2254 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2255 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2256 && !is_gimple_reg (lhs))
2258 tree rhs = gimple_assign_rhs1 (stmt);
2259 gassign *new_stmt;
2261 /* Build a new statement with the RHS and LHS exchanged. */
2262 if (TREE_CODE (rhs) == SSA_NAME)
2264 /* NOTE tuples. The call to gimple_build_assign below replaced
2265 a call to build_gimple_modify_stmt, which did not set the
2266 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2267 may cause an SSA validation failure, as the LHS may be a
2268 default-initialized name and should have no definition. I'm
2269 a bit dubious of this, as the artificial statement that we
2270 generate here may in fact be ill-formed, but it is simply
2271 used as an internal device in this pass, and never becomes
2272 part of the CFG. */
2273 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2274 new_stmt = gimple_build_assign (rhs, lhs);
2275 SSA_NAME_DEF_STMT (rhs) = defstmt;
2277 else
2278 new_stmt = gimple_build_assign (rhs, lhs);
2280 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2282 /* Finally enter the statement into the available expression
2283 table. */
2284 lookup_avail_expr (new_stmt, true);
2288 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2289 CONST_AND_COPIES. */
2291 static void
2292 cprop_operand (gimple stmt, use_operand_p op_p)
2294 tree val;
2295 tree op = USE_FROM_PTR (op_p);
2297 /* If the operand has a known constant value or it is known to be a
2298 copy of some other variable, use the value or copy stored in
2299 CONST_AND_COPIES. */
2300 val = SSA_NAME_VALUE (op);
2301 if (val && val != op)
2303 /* Do not replace hard register operands in asm statements. */
2304 if (gimple_code (stmt) == GIMPLE_ASM
2305 && !may_propagate_copy_into_asm (op))
2306 return;
2308 /* Certain operands are not allowed to be copy propagated due
2309 to their interaction with exception handling and some GCC
2310 extensions. */
2311 if (!may_propagate_copy (op, val))
2312 return;
2314 /* Do not propagate copies into BIVs.
2315 See PR23821 and PR62217 for how this can disturb IV and
2316 number of iteration analysis. */
2317 if (TREE_CODE (val) != INTEGER_CST)
2319 gimple def = SSA_NAME_DEF_STMT (op);
2320 if (gimple_code (def) == GIMPLE_PHI
2321 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2322 return;
2325 /* Dump details. */
2326 if (dump_file && (dump_flags & TDF_DETAILS))
2328 fprintf (dump_file, " Replaced '");
2329 print_generic_expr (dump_file, op, dump_flags);
2330 fprintf (dump_file, "' with %s '",
2331 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2332 print_generic_expr (dump_file, val, dump_flags);
2333 fprintf (dump_file, "'\n");
2336 if (TREE_CODE (val) != SSA_NAME)
2337 opt_stats.num_const_prop++;
2338 else
2339 opt_stats.num_copy_prop++;
2341 propagate_value (op_p, val);
2343 /* And note that we modified this statement. This is now
2344 safe, even if we changed virtual operands since we will
2345 rescan the statement and rewrite its operands again. */
2346 gimple_set_modified (stmt, true);
2350 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2351 known value for that SSA_NAME (or NULL if no value is known).
2353 Propagate values from CONST_AND_COPIES into the uses, vuses and
2354 vdef_ops of STMT. */
2356 static void
2357 cprop_into_stmt (gimple stmt)
2359 use_operand_p op_p;
2360 ssa_op_iter iter;
2362 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2363 cprop_operand (stmt, op_p);
2366 /* Optimize the statement pointed to by iterator SI.
2368 We try to perform some simplistic global redundancy elimination and
2369 constant propagation:
2371 1- To detect global redundancy, we keep track of expressions that have
2372 been computed in this block and its dominators. If we find that the
2373 same expression is computed more than once, we eliminate repeated
2374 computations by using the target of the first one.
2376 2- Constant values and copy assignments. This is used to do very
2377 simplistic constant and copy propagation. When a constant or copy
2378 assignment is found, we map the value on the RHS of the assignment to
2379 the variable in the LHS in the CONST_AND_COPIES table. */
2381 static void
2382 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2384 gimple stmt, old_stmt;
2385 bool may_optimize_p;
2386 bool modified_p = false;
2387 bool was_noreturn;
2389 old_stmt = stmt = gsi_stmt (si);
2390 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2392 if (dump_file && (dump_flags & TDF_DETAILS))
2394 fprintf (dump_file, "Optimizing statement ");
2395 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2398 if (gimple_code (stmt) == GIMPLE_COND)
2399 canonicalize_comparison (as_a <gcond *> (stmt));
2401 update_stmt_if_modified (stmt);
2402 opt_stats.num_stmts++;
2404 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2405 cprop_into_stmt (stmt);
2407 /* If the statement has been modified with constant replacements,
2408 fold its RHS before checking for redundant computations. */
2409 if (gimple_modified_p (stmt))
2411 tree rhs = NULL;
2413 /* Try to fold the statement making sure that STMT is kept
2414 up to date. */
2415 if (fold_stmt (&si))
2417 stmt = gsi_stmt (si);
2418 gimple_set_modified (stmt, true);
2420 if (dump_file && (dump_flags & TDF_DETAILS))
2422 fprintf (dump_file, " Folded to: ");
2423 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2427 /* We only need to consider cases that can yield a gimple operand. */
2428 if (gimple_assign_single_p (stmt))
2429 rhs = gimple_assign_rhs1 (stmt);
2430 else if (gimple_code (stmt) == GIMPLE_GOTO)
2431 rhs = gimple_goto_dest (stmt);
2432 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2433 /* This should never be an ADDR_EXPR. */
2434 rhs = gimple_switch_index (swtch_stmt);
2436 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2437 recompute_tree_invariant_for_addr_expr (rhs);
2439 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2440 even if fold_stmt updated the stmt already and thus cleared
2441 gimple_modified_p flag on it. */
2442 modified_p = true;
2445 /* Check for redundant computations. Do this optimization only
2446 for assignments that have no volatile ops and conditionals. */
2447 may_optimize_p = (!gimple_has_side_effects (stmt)
2448 && (is_gimple_assign (stmt)
2449 || (is_gimple_call (stmt)
2450 && gimple_call_lhs (stmt) != NULL_TREE)
2451 || gimple_code (stmt) == GIMPLE_COND
2452 || gimple_code (stmt) == GIMPLE_SWITCH));
2454 if (may_optimize_p)
2456 if (gimple_code (stmt) == GIMPLE_CALL)
2458 /* Resolve __builtin_constant_p. If it hasn't been
2459 folded to integer_one_node by now, it's fairly
2460 certain that the value simply isn't constant. */
2461 tree callee = gimple_call_fndecl (stmt);
2462 if (callee
2463 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2464 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2466 propagate_tree_value_into_stmt (&si, integer_zero_node);
2467 stmt = gsi_stmt (si);
2471 update_stmt_if_modified (stmt);
2472 eliminate_redundant_computations (&si);
2473 stmt = gsi_stmt (si);
2475 /* Perform simple redundant store elimination. */
2476 if (gimple_assign_single_p (stmt)
2477 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2479 tree lhs = gimple_assign_lhs (stmt);
2480 tree rhs = gimple_assign_rhs1 (stmt);
2481 tree cached_lhs;
2482 gassign *new_stmt;
2483 if (TREE_CODE (rhs) == SSA_NAME)
2485 tree tem = SSA_NAME_VALUE (rhs);
2486 if (tem)
2487 rhs = tem;
2489 /* Build a new statement with the RHS and LHS exchanged. */
2490 if (TREE_CODE (rhs) == SSA_NAME)
2492 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2493 new_stmt = gimple_build_assign (rhs, lhs);
2494 SSA_NAME_DEF_STMT (rhs) = defstmt;
2496 else
2497 new_stmt = gimple_build_assign (rhs, lhs);
2498 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2499 cached_lhs = lookup_avail_expr (new_stmt, false);
2500 if (cached_lhs
2501 && rhs == cached_lhs)
2503 basic_block bb = gimple_bb (stmt);
2504 unlink_stmt_vdef (stmt);
2505 if (gsi_remove (&si, true))
2507 bitmap_set_bit (need_eh_cleanup, bb->index);
2508 if (dump_file && (dump_flags & TDF_DETAILS))
2509 fprintf (dump_file, " Flagged to clear EH edges.\n");
2511 release_defs (stmt);
2512 return;
2517 /* Record any additional equivalences created by this statement. */
2518 if (is_gimple_assign (stmt))
2519 record_equivalences_from_stmt (stmt, may_optimize_p);
2521 /* If STMT is a COND_EXPR and it was modified, then we may know
2522 where it goes. If that is the case, then mark the CFG as altered.
2524 This will cause us to later call remove_unreachable_blocks and
2525 cleanup_tree_cfg when it is safe to do so. It is not safe to
2526 clean things up here since removal of edges and such can trigger
2527 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2528 the manager.
2530 That's all fine and good, except that once SSA_NAMEs are released
2531 to the manager, we must not call create_ssa_name until all references
2532 to released SSA_NAMEs have been eliminated.
2534 All references to the deleted SSA_NAMEs can not be eliminated until
2535 we remove unreachable blocks.
2537 We can not remove unreachable blocks until after we have completed
2538 any queued jump threading.
2540 We can not complete any queued jump threads until we have taken
2541 appropriate variables out of SSA form. Taking variables out of
2542 SSA form can call create_ssa_name and thus we lose.
2544 Ultimately I suspect we're going to need to change the interface
2545 into the SSA_NAME manager. */
2546 if (gimple_modified_p (stmt) || modified_p)
2548 tree val = NULL;
2550 update_stmt_if_modified (stmt);
2552 if (gimple_code (stmt) == GIMPLE_COND)
2553 val = fold_binary_loc (gimple_location (stmt),
2554 gimple_cond_code (stmt), boolean_type_node,
2555 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2556 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2557 val = gimple_switch_index (swtch_stmt);
2559 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2560 cfg_altered = true;
2562 /* If we simplified a statement in such a way as to be shown that it
2563 cannot trap, update the eh information and the cfg to match. */
2564 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2566 bitmap_set_bit (need_eh_cleanup, bb->index);
2567 if (dump_file && (dump_flags & TDF_DETAILS))
2568 fprintf (dump_file, " Flagged to clear EH edges.\n");
2571 if (!was_noreturn
2572 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2573 need_noreturn_fixup.safe_push (stmt);
2577 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2578 the desired memory state. */
2580 static void *
2581 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2583 tree vuse2 = (tree) data;
2584 if (vuse1 == vuse2)
2585 return data;
2587 /* This bounds the stmt walks we perform on reference lookups
2588 to O(1) instead of O(N) where N is the number of dominating
2589 stores leading to a candidate. We re-use the SCCVN param
2590 for this as it is basically the same complexity. */
2591 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2592 return (void *)-1;
2594 return NULL;
2597 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2598 If found, return its LHS. Otherwise insert STMT in the table and
2599 return NULL_TREE.
2601 Also, when an expression is first inserted in the table, it is also
2602 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2603 we finish processing this block and its children. */
2605 static tree
2606 lookup_avail_expr (gimple stmt, bool insert)
2608 expr_hash_elt **slot;
2609 tree lhs;
2610 tree temp;
2611 struct expr_hash_elt element;
2613 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2614 if (gimple_code (stmt) == GIMPLE_PHI)
2615 lhs = gimple_phi_result (stmt);
2616 else
2617 lhs = gimple_get_lhs (stmt);
2619 initialize_hash_element (stmt, lhs, &element);
2621 if (dump_file && (dump_flags & TDF_DETAILS))
2623 fprintf (dump_file, "LKUP ");
2624 print_expr_hash_elt (dump_file, &element);
2627 /* Don't bother remembering constant assignments and copy operations.
2628 Constants and copy operations are handled by the constant/copy propagator
2629 in optimize_stmt. */
2630 if (element.expr.kind == EXPR_SINGLE
2631 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2632 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2633 return NULL_TREE;
2635 /* Finally try to find the expression in the main expression hash table. */
2636 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2637 if (slot == NULL)
2639 free_expr_hash_elt_contents (&element);
2640 return NULL_TREE;
2642 else if (*slot == NULL)
2644 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2645 *element2 = element;
2646 element2->stamp = element2;
2647 *slot = element2;
2649 if (dump_file && (dump_flags & TDF_DETAILS))
2651 fprintf (dump_file, "2>>> ");
2652 print_expr_hash_elt (dump_file, element2);
2655 avail_exprs_stack.safe_push
2656 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2657 return NULL_TREE;
2660 /* If we found a redundant memory operation do an alias walk to
2661 check if we can re-use it. */
2662 if (gimple_vuse (stmt) != (*slot)->vop)
2664 tree vuse1 = (*slot)->vop;
2665 tree vuse2 = gimple_vuse (stmt);
2666 /* If we have a load of a register and a candidate in the
2667 hash with vuse1 then try to reach its stmt by walking
2668 up the virtual use-def chain using walk_non_aliased_vuses.
2669 But don't do this when removing expressions from the hash. */
2670 ao_ref ref;
2671 if (!(vuse1 && vuse2
2672 && gimple_assign_single_p (stmt)
2673 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2674 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2675 && walk_non_aliased_vuses (&ref, vuse2,
2676 vuse_eq, NULL, NULL, vuse1) != NULL))
2678 if (insert)
2680 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2681 *element2 = element;
2682 element2->stamp = element2;
2684 /* Insert the expr into the hash by replacing the current
2685 entry and recording the value to restore in the
2686 avail_exprs_stack. */
2687 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2688 *slot = element2;
2689 if (dump_file && (dump_flags & TDF_DETAILS))
2691 fprintf (dump_file, "2>>> ");
2692 print_expr_hash_elt (dump_file, *slot);
2695 return NULL_TREE;
2699 free_expr_hash_elt_contents (&element);
2701 /* Extract the LHS of the assignment so that it can be used as the current
2702 definition of another variable. */
2703 lhs = (*slot)->lhs;
2705 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2706 use the value from the const_and_copies table. */
2707 if (TREE_CODE (lhs) == SSA_NAME)
2709 temp = SSA_NAME_VALUE (lhs);
2710 if (temp)
2711 lhs = temp;
2714 if (dump_file && (dump_flags & TDF_DETAILS))
2716 fprintf (dump_file, "FIND: ");
2717 print_generic_expr (dump_file, lhs, 0);
2718 fprintf (dump_file, "\n");
2721 return lhs;
2724 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2725 for expressions using the code of the expression and the SSA numbers of
2726 its operands. */
2728 static hashval_t
2729 avail_expr_hash (const void *p)
2731 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2732 inchash::hash hstate;
2734 inchash::add_hashable_expr (expr, hstate);
2736 return hstate.end ();
2739 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2740 up degenerate PHIs created by or exposed by jump threading. */
2742 /* Given a statement STMT, which is either a PHI node or an assignment,
2743 remove it from the IL. */
2745 static void
2746 remove_stmt_or_phi (gimple stmt)
2748 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2750 if (gimple_code (stmt) == GIMPLE_PHI)
2751 remove_phi_node (&gsi, true);
2752 else
2754 gsi_remove (&gsi, true);
2755 release_defs (stmt);
2759 /* Given a statement STMT, which is either a PHI node or an assignment,
2760 return the "rhs" of the node, in the case of a non-degenerate
2761 phi, NULL is returned. */
2763 static tree
2764 get_rhs_or_phi_arg (gimple stmt)
2766 if (gimple_code (stmt) == GIMPLE_PHI)
2767 return degenerate_phi_result (as_a <gphi *> (stmt));
2768 else if (gimple_assign_single_p (stmt))
2769 return gimple_assign_rhs1 (stmt);
2770 else
2771 gcc_unreachable ();
2775 /* Given a statement STMT, which is either a PHI node or an assignment,
2776 return the "lhs" of the node. */
2778 static tree
2779 get_lhs_or_phi_result (gimple stmt)
2781 if (gimple_code (stmt) == GIMPLE_PHI)
2782 return gimple_phi_result (stmt);
2783 else if (is_gimple_assign (stmt))
2784 return gimple_assign_lhs (stmt);
2785 else
2786 gcc_unreachable ();
2789 /* Propagate RHS into all uses of LHS (when possible).
2791 RHS and LHS are derived from STMT, which is passed in solely so
2792 that we can remove it if propagation is successful.
2794 When propagating into a PHI node or into a statement which turns
2795 into a trivial copy or constant initialization, set the
2796 appropriate bit in INTERESTING_NAMEs so that we will visit those
2797 nodes as well in an effort to pick up secondary optimization
2798 opportunities. */
2800 static void
2801 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2803 /* First verify that propagation is valid. */
2804 if (may_propagate_copy (lhs, rhs))
2806 use_operand_p use_p;
2807 imm_use_iterator iter;
2808 gimple use_stmt;
2809 bool all = true;
2811 /* Dump details. */
2812 if (dump_file && (dump_flags & TDF_DETAILS))
2814 fprintf (dump_file, " Replacing '");
2815 print_generic_expr (dump_file, lhs, dump_flags);
2816 fprintf (dump_file, "' with %s '",
2817 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2818 print_generic_expr (dump_file, rhs, dump_flags);
2819 fprintf (dump_file, "'\n");
2822 /* Walk over every use of LHS and try to replace the use with RHS.
2823 At this point the only reason why such a propagation would not
2824 be successful would be if the use occurs in an ASM_EXPR. */
2825 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2827 /* Leave debug stmts alone. If we succeed in propagating
2828 all non-debug uses, we'll drop the DEF, and propagation
2829 into debug stmts will occur then. */
2830 if (gimple_debug_bind_p (use_stmt))
2831 continue;
2833 /* It's not always safe to propagate into an ASM_EXPR. */
2834 if (gimple_code (use_stmt) == GIMPLE_ASM
2835 && ! may_propagate_copy_into_asm (lhs))
2837 all = false;
2838 continue;
2841 /* It's not ok to propagate into the definition stmt of RHS.
2842 <bb 9>:
2843 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2844 g_67.1_6 = prephitmp.12_36;
2845 goto <bb 9>;
2846 While this is strictly all dead code we do not want to
2847 deal with this here. */
2848 if (TREE_CODE (rhs) == SSA_NAME
2849 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2851 all = false;
2852 continue;
2855 /* Dump details. */
2856 if (dump_file && (dump_flags & TDF_DETAILS))
2858 fprintf (dump_file, " Original statement:");
2859 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2862 /* Propagate the RHS into this use of the LHS. */
2863 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2864 propagate_value (use_p, rhs);
2866 /* Special cases to avoid useless calls into the folding
2867 routines, operand scanning, etc.
2869 Propagation into a PHI may cause the PHI to become
2870 a degenerate, so mark the PHI as interesting. No other
2871 actions are necessary. */
2872 if (gimple_code (use_stmt) == GIMPLE_PHI)
2874 tree result;
2876 /* Dump details. */
2877 if (dump_file && (dump_flags & TDF_DETAILS))
2879 fprintf (dump_file, " Updated statement:");
2880 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2883 result = get_lhs_or_phi_result (use_stmt);
2884 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2885 continue;
2888 /* From this point onward we are propagating into a
2889 real statement. Folding may (or may not) be possible,
2890 we may expose new operands, expose dead EH edges,
2891 etc. */
2892 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2893 cannot fold a call that simplifies to a constant,
2894 because the GIMPLE_CALL must be replaced by a
2895 GIMPLE_ASSIGN, and there is no way to effect such a
2896 transformation in-place. We might want to consider
2897 using the more general fold_stmt here. */
2899 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2900 fold_stmt_inplace (&gsi);
2903 /* Sometimes propagation can expose new operands to the
2904 renamer. */
2905 update_stmt (use_stmt);
2907 /* Dump details. */
2908 if (dump_file && (dump_flags & TDF_DETAILS))
2910 fprintf (dump_file, " Updated statement:");
2911 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2914 /* If we replaced a variable index with a constant, then
2915 we would need to update the invariant flag for ADDR_EXPRs. */
2916 if (gimple_assign_single_p (use_stmt)
2917 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2918 recompute_tree_invariant_for_addr_expr
2919 (gimple_assign_rhs1 (use_stmt));
2921 /* If we cleaned up EH information from the statement,
2922 mark its containing block as needing EH cleanups. */
2923 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2925 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2926 if (dump_file && (dump_flags & TDF_DETAILS))
2927 fprintf (dump_file, " Flagged to clear EH edges.\n");
2930 /* Propagation may expose new trivial copy/constant propagation
2931 opportunities. */
2932 if (gimple_assign_single_p (use_stmt)
2933 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2934 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2935 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2937 tree result = get_lhs_or_phi_result (use_stmt);
2938 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2941 /* Propagation into these nodes may make certain edges in
2942 the CFG unexecutable. We want to identify them as PHI nodes
2943 at the destination of those unexecutable edges may become
2944 degenerates. */
2945 else if (gimple_code (use_stmt) == GIMPLE_COND
2946 || gimple_code (use_stmt) == GIMPLE_SWITCH
2947 || gimple_code (use_stmt) == GIMPLE_GOTO)
2949 tree val;
2951 if (gimple_code (use_stmt) == GIMPLE_COND)
2952 val = fold_binary_loc (gimple_location (use_stmt),
2953 gimple_cond_code (use_stmt),
2954 boolean_type_node,
2955 gimple_cond_lhs (use_stmt),
2956 gimple_cond_rhs (use_stmt));
2957 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2958 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2959 else
2960 val = gimple_goto_dest (use_stmt);
2962 if (val && is_gimple_min_invariant (val))
2964 basic_block bb = gimple_bb (use_stmt);
2965 edge te = find_taken_edge (bb, val);
2966 edge_iterator ei;
2967 edge e;
2968 gimple_stmt_iterator gsi;
2969 gphi_iterator psi;
2971 /* Remove all outgoing edges except TE. */
2972 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2974 if (e != te)
2976 /* Mark all the PHI nodes at the destination of
2977 the unexecutable edge as interesting. */
2978 for (psi = gsi_start_phis (e->dest);
2979 !gsi_end_p (psi);
2980 gsi_next (&psi))
2982 gphi *phi = psi.phi ();
2984 tree result = gimple_phi_result (phi);
2985 int version = SSA_NAME_VERSION (result);
2987 bitmap_set_bit (interesting_names, version);
2990 te->probability += e->probability;
2992 te->count += e->count;
2993 remove_edge (e);
2994 cfg_altered = true;
2996 else
2997 ei_next (&ei);
3000 gsi = gsi_last_bb (gimple_bb (use_stmt));
3001 gsi_remove (&gsi, true);
3003 /* And fixup the flags on the single remaining edge. */
3004 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
3005 te->flags &= ~EDGE_ABNORMAL;
3006 te->flags |= EDGE_FALLTHRU;
3007 if (te->probability > REG_BR_PROB_BASE)
3008 te->probability = REG_BR_PROB_BASE;
3013 /* Ensure there is nothing else to do. */
3014 gcc_assert (!all || has_zero_uses (lhs));
3016 /* If we were able to propagate away all uses of LHS, then
3017 we can remove STMT. */
3018 if (all)
3019 remove_stmt_or_phi (stmt);
3023 /* STMT is either a PHI node (potentially a degenerate PHI node) or
3024 a statement that is a trivial copy or constant initialization.
3026 Attempt to eliminate T by propagating its RHS into all uses of
3027 its LHS. This may in turn set new bits in INTERESTING_NAMES
3028 for nodes we want to revisit later.
3030 All exit paths should clear INTERESTING_NAMES for the result
3031 of STMT. */
3033 static void
3034 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
3036 tree lhs = get_lhs_or_phi_result (stmt);
3037 tree rhs;
3038 int version = SSA_NAME_VERSION (lhs);
3040 /* If the LHS of this statement or PHI has no uses, then we can
3041 just eliminate it. This can occur if, for example, the PHI
3042 was created by block duplication due to threading and its only
3043 use was in the conditional at the end of the block which was
3044 deleted. */
3045 if (has_zero_uses (lhs))
3047 bitmap_clear_bit (interesting_names, version);
3048 remove_stmt_or_phi (stmt);
3049 return;
3052 /* Get the RHS of the assignment or PHI node if the PHI is a
3053 degenerate. */
3054 rhs = get_rhs_or_phi_arg (stmt);
3055 if (!rhs)
3057 bitmap_clear_bit (interesting_names, version);
3058 return;
3061 if (!virtual_operand_p (lhs))
3062 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3063 else
3065 gimple use_stmt;
3066 imm_use_iterator iter;
3067 use_operand_p use_p;
3068 /* For virtual operands we have to propagate into all uses as
3069 otherwise we will create overlapping life-ranges. */
3070 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3071 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3072 SET_USE (use_p, rhs);
3073 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3074 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3075 remove_stmt_or_phi (stmt);
3078 /* Note that STMT may well have been deleted by now, so do
3079 not access it, instead use the saved version # to clear
3080 T's entry in the worklist. */
3081 bitmap_clear_bit (interesting_names, version);
3084 /* The first phase in degenerate PHI elimination.
3086 Eliminate the degenerate PHIs in BB, then recurse on the
3087 dominator children of BB. */
3089 static void
3090 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3092 gphi_iterator gsi;
3093 basic_block son;
3095 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3097 gphi *phi = gsi.phi ();
3099 eliminate_const_or_copy (phi, interesting_names);
3102 /* Recurse into the dominator children of BB. */
3103 for (son = first_dom_son (CDI_DOMINATORS, bb);
3104 son;
3105 son = next_dom_son (CDI_DOMINATORS, son))
3106 eliminate_degenerate_phis_1 (son, interesting_names);
3110 /* A very simple pass to eliminate degenerate PHI nodes from the
3111 IL. This is meant to be fast enough to be able to be run several
3112 times in the optimization pipeline.
3114 Certain optimizations, particularly those which duplicate blocks
3115 or remove edges from the CFG can create or expose PHIs which are
3116 trivial copies or constant initializations.
3118 While we could pick up these optimizations in DOM or with the
3119 combination of copy-prop and CCP, those solutions are far too
3120 heavy-weight for our needs.
3122 This implementation has two phases so that we can efficiently
3123 eliminate the first order degenerate PHIs and second order
3124 degenerate PHIs.
3126 The first phase performs a dominator walk to identify and eliminate
3127 the vast majority of the degenerate PHIs. When a degenerate PHI
3128 is identified and eliminated any affected statements or PHIs
3129 are put on a worklist.
3131 The second phase eliminates degenerate PHIs and trivial copies
3132 or constant initializations using the worklist. This is how we
3133 pick up the secondary optimization opportunities with minimal
3134 cost. */
3136 namespace {
3138 const pass_data pass_data_phi_only_cprop =
3140 GIMPLE_PASS, /* type */
3141 "phicprop", /* name */
3142 OPTGROUP_NONE, /* optinfo_flags */
3143 TV_TREE_PHI_CPROP, /* tv_id */
3144 ( PROP_cfg | PROP_ssa ), /* properties_required */
3145 0, /* properties_provided */
3146 0, /* properties_destroyed */
3147 0, /* todo_flags_start */
3148 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3151 class pass_phi_only_cprop : public gimple_opt_pass
3153 public:
3154 pass_phi_only_cprop (gcc::context *ctxt)
3155 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3158 /* opt_pass methods: */
3159 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3160 virtual bool gate (function *) { return flag_tree_dom != 0; }
3161 virtual unsigned int execute (function *);
3163 }; // class pass_phi_only_cprop
3165 unsigned int
3166 pass_phi_only_cprop::execute (function *fun)
3168 bitmap interesting_names;
3169 bitmap interesting_names1;
3171 /* Bitmap of blocks which need EH information updated. We can not
3172 update it on-the-fly as doing so invalidates the dominator tree. */
3173 need_eh_cleanup = BITMAP_ALLOC (NULL);
3175 /* INTERESTING_NAMES is effectively our worklist, indexed by
3176 SSA_NAME_VERSION.
3178 A set bit indicates that the statement or PHI node which
3179 defines the SSA_NAME should be (re)examined to determine if
3180 it has become a degenerate PHI or trivial const/copy propagation
3181 opportunity.
3183 Experiments have show we generally get better compilation
3184 time behavior with bitmaps rather than sbitmaps. */
3185 interesting_names = BITMAP_ALLOC (NULL);
3186 interesting_names1 = BITMAP_ALLOC (NULL);
3188 calculate_dominance_info (CDI_DOMINATORS);
3189 cfg_altered = false;
3191 /* First phase. Eliminate degenerate PHIs via a dominator
3192 walk of the CFG.
3194 Experiments have indicated that we generally get better
3195 compile-time behavior by visiting blocks in the first
3196 phase in dominator order. Presumably this is because walking
3197 in dominator order leaves fewer PHIs for later examination
3198 by the worklist phase. */
3199 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3200 interesting_names);
3202 /* Second phase. Eliminate second order degenerate PHIs as well
3203 as trivial copies or constant initializations identified by
3204 the first phase or this phase. Basically we keep iterating
3205 until our set of INTERESTING_NAMEs is empty. */
3206 while (!bitmap_empty_p (interesting_names))
3208 unsigned int i;
3209 bitmap_iterator bi;
3211 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3212 changed during the loop. Copy it to another bitmap and
3213 use that. */
3214 bitmap_copy (interesting_names1, interesting_names);
3216 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3218 tree name = ssa_name (i);
3220 /* Ignore SSA_NAMEs that have been released because
3221 their defining statement was deleted (unreachable). */
3222 if (name)
3223 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3224 interesting_names);
3228 if (cfg_altered)
3230 free_dominance_info (CDI_DOMINATORS);
3231 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3232 loops_state_set (LOOPS_NEED_FIXUP);
3235 /* Propagation of const and copies may make some EH edges dead. Purge
3236 such edges from the CFG as needed. */
3237 if (!bitmap_empty_p (need_eh_cleanup))
3239 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3240 BITMAP_FREE (need_eh_cleanup);
3243 BITMAP_FREE (interesting_names);
3244 BITMAP_FREE (interesting_names1);
3245 return 0;
3248 } // anon namespace
3250 gimple_opt_pass *
3251 make_pass_phi_only_cprop (gcc::context *ctxt)
3253 return new pass_phi_only_cprop (ctxt);