2010-07-27 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc/alias-decl.git] / gcc / tree-ssa-dom.c
blob4715592746c36ecd6c6206976f69e86f0a29917c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "output.h"
32 #include "function.h"
33 #include "tree-pretty-print.h"
34 #include "gimple-pretty-print.h"
35 #include "timevar.h"
36 #include "tree-dump.h"
37 #include "tree-flow.h"
38 #include "domwalk.h"
39 #include "tree-pass.h"
40 #include "tree-ssa-propagate.h"
41 #include "langhooks.h"
42 #include "params.h"
44 /* This file implements optimizations on the dominator tree. */
46 /* Representation of a "naked" right-hand-side expression, to be used
47 in recording available expressions in the expression hash table. */
49 enum expr_kind
51 EXPR_SINGLE,
52 EXPR_UNARY,
53 EXPR_BINARY,
54 EXPR_TERNARY,
55 EXPR_CALL
58 struct hashable_expr
60 tree type;
61 enum expr_kind kind;
62 union {
63 struct { tree rhs; } single;
64 struct { enum tree_code op; tree opnd; } unary;
65 struct { enum tree_code op; tree opnd0, opnd1; } binary;
66 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
67 struct { tree fn; bool pure; size_t nargs; tree *args; } call;
68 } ops;
71 /* Structure for recording known values of a conditional expression
72 at the exits from its block. */
74 struct cond_equivalence
76 struct hashable_expr cond;
77 tree value;
80 /* Structure for recording edge equivalences as well as any pending
81 edge redirections during the dominator optimizer.
83 Computing and storing the edge equivalences instead of creating
84 them on-demand can save significant amounts of time, particularly
85 for pathological cases involving switch statements.
87 These structures live for a single iteration of the dominator
88 optimizer in the edge's AUX field. At the end of an iteration we
89 free each of these structures and update the AUX field to point
90 to any requested redirection target (the code for updating the
91 CFG and SSA graph for edge redirection expects redirection edge
92 targets to be in the AUX field for each edge. */
94 struct edge_info
96 /* If this edge creates a simple equivalence, the LHS and RHS of
97 the equivalence will be stored here. */
98 tree lhs;
99 tree rhs;
101 /* Traversing an edge may also indicate one or more particular conditions
102 are true or false. The number of recorded conditions can vary, but
103 can be determined by the condition's code. So we have an array
104 and its maximum index rather than use a varray. */
105 struct cond_equivalence *cond_equivalences;
106 unsigned int max_cond_equivalences;
109 /* Hash table with expressions made available during the renaming process.
110 When an assignment of the form X_i = EXPR is found, the statement is
111 stored in this table. If the same expression EXPR is later found on the
112 RHS of another statement, it is replaced with X_i (thus performing
113 global redundancy elimination). Similarly as we pass through conditionals
114 we record the conditional itself as having either a true or false value
115 in this table. */
116 static htab_t avail_exprs;
118 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
119 expressions it enters into the hash table along with a marker entry
120 (null). When we finish processing the block, we pop off entries and
121 remove the expressions from the global hash table until we hit the
122 marker. */
123 typedef struct expr_hash_elt * expr_hash_elt_t;
124 DEF_VEC_P(expr_hash_elt_t);
125 DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
127 static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
129 /* Structure for entries in the expression hash table. */
131 struct expr_hash_elt
133 /* The value (lhs) of this expression. */
134 tree lhs;
136 /* The expression (rhs) we want to record. */
137 struct hashable_expr expr;
139 /* The stmt pointer if this element corresponds to a statement. */
140 gimple stmt;
142 /* The hash value for RHS. */
143 hashval_t hash;
145 /* A unique stamp, typically the address of the hash
146 element itself, used in removing entries from the table. */
147 struct expr_hash_elt *stamp;
150 /* Stack of dest,src pairs that need to be restored during finalization.
152 A NULL entry is used to mark the end of pairs which need to be
153 restored during finalization of this block. */
154 static VEC(tree,heap) *const_and_copies_stack;
156 /* Track whether or not we have changed the control flow graph. */
157 static bool cfg_altered;
159 /* Bitmap of blocks that have had EH statements cleaned. We should
160 remove their dead edges eventually. */
161 static bitmap need_eh_cleanup;
163 /* Statistics for dominator optimizations. */
164 struct opt_stats_d
166 long num_stmts;
167 long num_exprs_considered;
168 long num_re;
169 long num_const_prop;
170 long num_copy_prop;
173 static struct opt_stats_d opt_stats;
175 /* Local functions. */
176 static void optimize_stmt (basic_block, gimple_stmt_iterator);
177 static tree lookup_avail_expr (gimple, bool);
178 static hashval_t avail_expr_hash (const void *);
179 static hashval_t real_avail_expr_hash (const void *);
180 static int avail_expr_eq (const void *, const void *);
181 static void htab_statistics (FILE *, htab_t);
182 static void record_cond (struct cond_equivalence *);
183 static void record_const_or_copy (tree, tree);
184 static void record_equality (tree, tree);
185 static void record_equivalences_from_phis (basic_block);
186 static void record_equivalences_from_incoming_edge (basic_block);
187 static void eliminate_redundant_computations (gimple_stmt_iterator *);
188 static void record_equivalences_from_stmt (gimple, int);
189 static void dom_thread_across_edge (struct dom_walk_data *, edge);
190 static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
191 static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
192 static void remove_local_expressions_from_table (void);
193 static void restore_vars_to_original_value (void);
194 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
197 /* Given a statement STMT, initialize the hash table element pointed to
198 by ELEMENT. */
200 static void
201 initialize_hash_element (gimple stmt, tree lhs,
202 struct expr_hash_elt *element)
204 enum gimple_code code = gimple_code (stmt);
205 struct hashable_expr *expr = &element->expr;
207 if (code == GIMPLE_ASSIGN)
209 enum tree_code subcode = gimple_assign_rhs_code (stmt);
211 expr->type = NULL_TREE;
213 switch (get_gimple_rhs_class (subcode))
215 case GIMPLE_SINGLE_RHS:
216 expr->kind = EXPR_SINGLE;
217 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
218 break;
219 case GIMPLE_UNARY_RHS:
220 expr->kind = EXPR_UNARY;
221 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
222 expr->ops.unary.op = subcode;
223 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
224 break;
225 case GIMPLE_BINARY_RHS:
226 expr->kind = EXPR_BINARY;
227 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
228 expr->ops.binary.op = subcode;
229 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
230 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
231 break;
232 case GIMPLE_TERNARY_RHS:
233 expr->kind = EXPR_TERNARY;
234 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
235 expr->ops.ternary.op = subcode;
236 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
237 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
238 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
239 break;
240 default:
241 gcc_unreachable ();
244 else if (code == GIMPLE_COND)
246 expr->type = boolean_type_node;
247 expr->kind = EXPR_BINARY;
248 expr->ops.binary.op = gimple_cond_code (stmt);
249 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
250 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
252 else if (code == GIMPLE_CALL)
254 size_t nargs = gimple_call_num_args (stmt);
255 size_t i;
257 gcc_assert (gimple_call_lhs (stmt));
259 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
260 expr->kind = EXPR_CALL;
261 expr->ops.call.fn = gimple_call_fn (stmt);
263 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
264 expr->ops.call.pure = true;
265 else
266 expr->ops.call.pure = false;
268 expr->ops.call.nargs = nargs;
269 expr->ops.call.args = (tree *) xcalloc (nargs, sizeof (tree));
270 for (i = 0; i < nargs; i++)
271 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
273 else if (code == GIMPLE_SWITCH)
275 expr->type = TREE_TYPE (gimple_switch_index (stmt));
276 expr->kind = EXPR_SINGLE;
277 expr->ops.single.rhs = gimple_switch_index (stmt);
279 else if (code == GIMPLE_GOTO)
281 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
282 expr->kind = EXPR_SINGLE;
283 expr->ops.single.rhs = gimple_goto_dest (stmt);
285 else
286 gcc_unreachable ();
288 element->lhs = lhs;
289 element->stmt = stmt;
290 element->hash = avail_expr_hash (element);
291 element->stamp = element;
294 /* Given a conditional expression COND as a tree, initialize
295 a hashable_expr expression EXPR. The conditional must be a
296 comparison or logical negation. A constant or a variable is
297 not permitted. */
299 static void
300 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
302 expr->type = boolean_type_node;
304 if (COMPARISON_CLASS_P (cond))
306 expr->kind = EXPR_BINARY;
307 expr->ops.binary.op = TREE_CODE (cond);
308 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
309 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
311 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
313 expr->kind = EXPR_UNARY;
314 expr->ops.unary.op = TRUTH_NOT_EXPR;
315 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
317 else
318 gcc_unreachable ();
321 /* Given a hashable_expr expression EXPR and an LHS,
322 initialize the hash table element pointed to by ELEMENT. */
324 static void
325 initialize_hash_element_from_expr (struct hashable_expr *expr,
326 tree lhs,
327 struct expr_hash_elt *element)
329 element->expr = *expr;
330 element->lhs = lhs;
331 element->stmt = NULL;
332 element->hash = avail_expr_hash (element);
333 element->stamp = element;
336 /* Compare two hashable_expr structures for equivalence.
337 They are considered equivalent when the the expressions
338 they denote must necessarily be equal. The logic is intended
339 to follow that of operand_equal_p in fold-const.c */
341 static bool
342 hashable_expr_equal_p (const struct hashable_expr *expr0,
343 const struct hashable_expr *expr1)
345 tree type0 = expr0->type;
346 tree type1 = expr1->type;
348 /* If either type is NULL, there is nothing to check. */
349 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
350 return false;
352 /* If both types don't have the same signedness, precision, and mode,
353 then we can't consider them equal. */
354 if (type0 != type1
355 && (TREE_CODE (type0) == ERROR_MARK
356 || TREE_CODE (type1) == ERROR_MARK
357 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
358 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
359 || TYPE_MODE (type0) != TYPE_MODE (type1)))
360 return false;
362 if (expr0->kind != expr1->kind)
363 return false;
365 switch (expr0->kind)
367 case EXPR_SINGLE:
368 return operand_equal_p (expr0->ops.single.rhs,
369 expr1->ops.single.rhs, 0);
371 case EXPR_UNARY:
372 if (expr0->ops.unary.op != expr1->ops.unary.op)
373 return false;
375 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
376 || expr0->ops.unary.op == NON_LVALUE_EXPR)
377 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
378 return false;
380 return operand_equal_p (expr0->ops.unary.opnd,
381 expr1->ops.unary.opnd, 0);
383 case EXPR_BINARY:
384 if (expr0->ops.binary.op != expr1->ops.binary.op)
385 return false;
387 if (operand_equal_p (expr0->ops.binary.opnd0,
388 expr1->ops.binary.opnd0, 0)
389 && operand_equal_p (expr0->ops.binary.opnd1,
390 expr1->ops.binary.opnd1, 0))
391 return true;
393 /* For commutative ops, allow the other order. */
394 return (commutative_tree_code (expr0->ops.binary.op)
395 && operand_equal_p (expr0->ops.binary.opnd0,
396 expr1->ops.binary.opnd1, 0)
397 && operand_equal_p (expr0->ops.binary.opnd1,
398 expr1->ops.binary.opnd0, 0));
400 case EXPR_TERNARY:
401 if (expr0->ops.ternary.op != expr1->ops.ternary.op
402 || !operand_equal_p (expr0->ops.ternary.opnd2,
403 expr1->ops.ternary.opnd2, 0))
404 return false;
406 if (operand_equal_p (expr0->ops.ternary.opnd0,
407 expr1->ops.ternary.opnd0, 0)
408 && operand_equal_p (expr0->ops.ternary.opnd1,
409 expr1->ops.ternary.opnd1, 0))
410 return true;
412 /* For commutative ops, allow the other order. */
413 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
414 && operand_equal_p (expr0->ops.ternary.opnd0,
415 expr1->ops.ternary.opnd1, 0)
416 && operand_equal_p (expr0->ops.ternary.opnd1,
417 expr1->ops.ternary.opnd0, 0));
419 case EXPR_CALL:
421 size_t i;
423 /* If the calls are to different functions, then they
424 clearly cannot be equal. */
425 if (! operand_equal_p (expr0->ops.call.fn,
426 expr1->ops.call.fn, 0))
427 return false;
429 if (! expr0->ops.call.pure)
430 return false;
432 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
433 return false;
435 for (i = 0; i < expr0->ops.call.nargs; i++)
436 if (! operand_equal_p (expr0->ops.call.args[i],
437 expr1->ops.call.args[i], 0))
438 return false;
440 return true;
443 default:
444 gcc_unreachable ();
448 /* Compute a hash value for a hashable_expr value EXPR and a
449 previously accumulated hash value VAL. If two hashable_expr
450 values compare equal with hashable_expr_equal_p, they must
451 hash to the same value, given an identical value of VAL.
452 The logic is intended to follow iterative_hash_expr in tree.c. */
454 static hashval_t
455 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
457 switch (expr->kind)
459 case EXPR_SINGLE:
460 val = iterative_hash_expr (expr->ops.single.rhs, val);
461 break;
463 case EXPR_UNARY:
464 val = iterative_hash_object (expr->ops.unary.op, val);
466 /* Make sure to include signedness in the hash computation.
467 Don't hash the type, that can lead to having nodes which
468 compare equal according to operand_equal_p, but which
469 have different hash codes. */
470 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
471 || expr->ops.unary.op == NON_LVALUE_EXPR)
472 val += TYPE_UNSIGNED (expr->type);
474 val = iterative_hash_expr (expr->ops.unary.opnd, val);
475 break;
477 case EXPR_BINARY:
478 val = iterative_hash_object (expr->ops.binary.op, val);
479 if (commutative_tree_code (expr->ops.binary.op))
480 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
481 expr->ops.binary.opnd1, val);
482 else
484 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
485 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
487 break;
489 case EXPR_TERNARY:
490 val = iterative_hash_object (expr->ops.ternary.op, val);
491 if (commutative_ternary_tree_code (expr->ops.ternary.op))
492 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
493 expr->ops.ternary.opnd1, val);
494 else
496 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
497 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
499 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
500 break;
502 case EXPR_CALL:
504 size_t i;
505 enum tree_code code = CALL_EXPR;
507 val = iterative_hash_object (code, val);
508 val = iterative_hash_expr (expr->ops.call.fn, val);
509 for (i = 0; i < expr->ops.call.nargs; i++)
510 val = iterative_hash_expr (expr->ops.call.args[i], val);
512 break;
514 default:
515 gcc_unreachable ();
518 return val;
521 /* Print a diagnostic dump of an expression hash table entry. */
523 static void
524 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
526 if (element->stmt)
527 fprintf (stream, "STMT ");
528 else
529 fprintf (stream, "COND ");
531 if (element->lhs)
533 print_generic_expr (stream, element->lhs, 0);
534 fprintf (stream, " = ");
537 switch (element->expr.kind)
539 case EXPR_SINGLE:
540 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
541 break;
543 case EXPR_UNARY:
544 fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
545 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
546 break;
548 case EXPR_BINARY:
549 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
550 fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
551 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
552 break;
554 case EXPR_TERNARY:
555 fprintf (stream, " %s <", tree_code_name[element->expr.ops.ternary.op]);
556 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
557 fputs (", ", stream);
558 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
559 fputs (", ", stream);
560 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
561 fputs (">", stream);
562 break;
564 case EXPR_CALL:
566 size_t i;
567 size_t nargs = element->expr.ops.call.nargs;
569 print_generic_expr (stream, element->expr.ops.call.fn, 0);
570 fprintf (stream, " (");
571 for (i = 0; i < nargs; i++)
573 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
574 if (i + 1 < nargs)
575 fprintf (stream, ", ");
577 fprintf (stream, ")");
579 break;
581 fprintf (stream, "\n");
583 if (element->stmt)
585 fprintf (stream, " ");
586 print_gimple_stmt (stream, element->stmt, 0, 0);
590 /* Delete an expr_hash_elt and reclaim its storage. */
592 static void
593 free_expr_hash_elt (void *elt)
595 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
597 if (element->expr.kind == EXPR_CALL)
598 free (element->expr.ops.call.args);
600 free (element);
603 /* Allocate an EDGE_INFO for edge E and attach it to E.
604 Return the new EDGE_INFO structure. */
606 static struct edge_info *
607 allocate_edge_info (edge e)
609 struct edge_info *edge_info;
611 edge_info = XCNEW (struct edge_info);
613 e->aux = edge_info;
614 return edge_info;
617 /* Free all EDGE_INFO structures associated with edges in the CFG.
618 If a particular edge can be threaded, copy the redirection
619 target from the EDGE_INFO structure into the edge's AUX field
620 as required by code to update the CFG and SSA graph for
621 jump threading. */
623 static void
624 free_all_edge_infos (void)
626 basic_block bb;
627 edge_iterator ei;
628 edge e;
630 FOR_EACH_BB (bb)
632 FOR_EACH_EDGE (e, ei, bb->preds)
634 struct edge_info *edge_info = (struct edge_info *) e->aux;
636 if (edge_info)
638 if (edge_info->cond_equivalences)
639 free (edge_info->cond_equivalences);
640 free (edge_info);
641 e->aux = NULL;
647 /* Jump threading, redundancy elimination and const/copy propagation.
649 This pass may expose new symbols that need to be renamed into SSA. For
650 every new symbol exposed, its corresponding bit will be set in
651 VARS_TO_RENAME. */
653 static unsigned int
654 tree_ssa_dominator_optimize (void)
656 struct dom_walk_data walk_data;
658 memset (&opt_stats, 0, sizeof (opt_stats));
660 /* Create our hash tables. */
661 avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
662 avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
663 const_and_copies_stack = VEC_alloc (tree, heap, 20);
664 need_eh_cleanup = BITMAP_ALLOC (NULL);
666 /* Setup callbacks for the generic dominator tree walker. */
667 walk_data.dom_direction = CDI_DOMINATORS;
668 walk_data.initialize_block_local_data = NULL;
669 walk_data.before_dom_children = dom_opt_enter_block;
670 walk_data.after_dom_children = dom_opt_leave_block;
671 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
672 When we attach more stuff we'll need to fill this out with a real
673 structure. */
674 walk_data.global_data = NULL;
675 walk_data.block_local_data_size = 0;
677 /* Now initialize the dominator walker. */
678 init_walk_dominator_tree (&walk_data);
680 calculate_dominance_info (CDI_DOMINATORS);
681 cfg_altered = false;
683 /* We need to know loop structures in order to avoid destroying them
684 in jump threading. Note that we still can e.g. thread through loop
685 headers to an exit edge, or through loop header to the loop body, assuming
686 that we update the loop info. */
687 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
689 /* Initialize the value-handle array. */
690 threadedge_initialize_values ();
692 /* We need accurate information regarding back edges in the CFG
693 for jump threading; this may include back edges that are not part of
694 a single loop. */
695 mark_dfs_back_edges ();
697 /* Recursively walk the dominator tree optimizing statements. */
698 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
701 gimple_stmt_iterator gsi;
702 basic_block bb;
703 FOR_EACH_BB (bb)
704 {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
705 update_stmt_if_modified (gsi_stmt (gsi));
709 /* If we exposed any new variables, go ahead and put them into
710 SSA form now, before we handle jump threading. This simplifies
711 interactions between rewriting of _DECL nodes into SSA form
712 and rewriting SSA_NAME nodes into SSA form after block
713 duplication and CFG manipulation. */
714 update_ssa (TODO_update_ssa);
716 free_all_edge_infos ();
718 /* Thread jumps, creating duplicate blocks as needed. */
719 cfg_altered |= thread_through_all_blocks (first_pass_instance);
721 if (cfg_altered)
722 free_dominance_info (CDI_DOMINATORS);
724 /* Removal of statements may make some EH edges dead. Purge
725 such edges from the CFG as needed. */
726 if (!bitmap_empty_p (need_eh_cleanup))
728 unsigned i;
729 bitmap_iterator bi;
731 /* Jump threading may have created forwarder blocks from blocks
732 needing EH cleanup; the new successor of these blocks, which
733 has inherited from the original block, needs the cleanup. */
734 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
736 basic_block bb = BASIC_BLOCK (i);
737 if (single_succ_p (bb) == 1
738 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
740 bitmap_clear_bit (need_eh_cleanup, i);
741 bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
745 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
746 bitmap_zero (need_eh_cleanup);
749 statistics_counter_event (cfun, "Redundant expressions eliminated",
750 opt_stats.num_re);
751 statistics_counter_event (cfun, "Constants propagated",
752 opt_stats.num_const_prop);
753 statistics_counter_event (cfun, "Copies propagated",
754 opt_stats.num_copy_prop);
756 /* Debugging dumps. */
757 if (dump_file && (dump_flags & TDF_STATS))
758 dump_dominator_optimization_stats (dump_file);
760 loop_optimizer_finalize ();
762 /* Delete our main hashtable. */
763 htab_delete (avail_exprs);
765 /* And finalize the dominator walker. */
766 fini_walk_dominator_tree (&walk_data);
768 /* Free asserted bitmaps and stacks. */
769 BITMAP_FREE (need_eh_cleanup);
771 VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
772 VEC_free (tree, heap, const_and_copies_stack);
774 /* Free the value-handle array. */
775 threadedge_finalize_values ();
776 ssa_name_values = NULL;
778 return 0;
781 static bool
782 gate_dominator (void)
784 return flag_tree_dom != 0;
787 struct gimple_opt_pass pass_dominator =
790 GIMPLE_PASS,
791 "dom", /* name */
792 gate_dominator, /* gate */
793 tree_ssa_dominator_optimize, /* execute */
794 NULL, /* sub */
795 NULL, /* next */
796 0, /* static_pass_number */
797 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
798 PROP_cfg | PROP_ssa, /* properties_required */
799 0, /* properties_provided */
800 0, /* properties_destroyed */
801 0, /* todo_flags_start */
802 TODO_dump_func
803 | TODO_update_ssa
804 | TODO_cleanup_cfg
805 | TODO_verify_ssa /* todo_flags_finish */
810 /* Given a conditional statement CONDSTMT, convert the
811 condition to a canonical form. */
813 static void
814 canonicalize_comparison (gimple condstmt)
816 tree op0;
817 tree op1;
818 enum tree_code code;
820 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
822 op0 = gimple_cond_lhs (condstmt);
823 op1 = gimple_cond_rhs (condstmt);
825 code = gimple_cond_code (condstmt);
827 /* If it would be profitable to swap the operands, then do so to
828 canonicalize the statement, enabling better optimization.
830 By placing canonicalization of such expressions here we
831 transparently keep statements in canonical form, even
832 when the statement is modified. */
833 if (tree_swap_operands_p (op0, op1, false))
835 /* For relationals we need to swap the operands
836 and change the code. */
837 if (code == LT_EXPR
838 || code == GT_EXPR
839 || code == LE_EXPR
840 || code == GE_EXPR)
842 code = swap_tree_comparison (code);
844 gimple_cond_set_code (condstmt, code);
845 gimple_cond_set_lhs (condstmt, op1);
846 gimple_cond_set_rhs (condstmt, op0);
848 update_stmt (condstmt);
853 /* Initialize local stacks for this optimizer and record equivalences
854 upon entry to BB. Equivalences can come from the edge traversed to
855 reach BB or they may come from PHI nodes at the start of BB. */
857 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
858 LIMIT entries left in LOCALs. */
860 static void
861 remove_local_expressions_from_table (void)
863 /* Remove all the expressions made available in this block. */
864 while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
866 expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
867 void **slot;
869 if (victim == NULL)
870 break;
872 /* This must precede the actual removal from the hash table,
873 as ELEMENT and the table entry may share a call argument
874 vector which will be freed during removal. */
875 if (dump_file && (dump_flags & TDF_DETAILS))
877 fprintf (dump_file, "<<<< ");
878 print_expr_hash_elt (dump_file, victim);
881 slot = htab_find_slot_with_hash (avail_exprs,
882 victim, victim->hash, NO_INSERT);
883 gcc_assert (slot && *slot == (void *) victim);
884 htab_clear_slot (avail_exprs, slot);
888 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
889 CONST_AND_COPIES to its original state, stopping when we hit a
890 NULL marker. */
892 static void
893 restore_vars_to_original_value (void)
895 while (VEC_length (tree, const_and_copies_stack) > 0)
897 tree prev_value, dest;
899 dest = VEC_pop (tree, const_and_copies_stack);
901 if (dest == NULL)
902 break;
904 if (dump_file && (dump_flags & TDF_DETAILS))
906 fprintf (dump_file, "<<<< COPY ");
907 print_generic_expr (dump_file, dest, 0);
908 fprintf (dump_file, " = ");
909 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
910 fprintf (dump_file, "\n");
913 prev_value = VEC_pop (tree, const_and_copies_stack);
914 set_ssa_name_value (dest, prev_value);
918 /* A trivial wrapper so that we can present the generic jump
919 threading code with a simple API for simplifying statements. */
920 static tree
921 simplify_stmt_for_jump_threading (gimple stmt,
922 gimple within_stmt ATTRIBUTE_UNUSED)
924 return lookup_avail_expr (stmt, false);
927 /* Wrapper for common code to attempt to thread an edge. For example,
928 it handles lazily building the dummy condition and the bookkeeping
929 when jump threading is successful. */
931 static void
932 dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
934 if (! walk_data->global_data)
936 gimple dummy_cond =
937 gimple_build_cond (NE_EXPR,
938 integer_zero_node, integer_zero_node,
939 NULL, NULL);
940 walk_data->global_data = dummy_cond;
943 thread_across_edge ((gimple) walk_data->global_data, e, false,
944 &const_and_copies_stack,
945 simplify_stmt_for_jump_threading);
948 /* PHI nodes can create equivalences too.
950 Ignoring any alternatives which are the same as the result, if
951 all the alternatives are equal, then the PHI node creates an
952 equivalence. */
954 static void
955 record_equivalences_from_phis (basic_block bb)
957 gimple_stmt_iterator gsi;
959 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
961 gimple phi = gsi_stmt (gsi);
963 tree lhs = gimple_phi_result (phi);
964 tree rhs = NULL;
965 size_t i;
967 for (i = 0; i < gimple_phi_num_args (phi); i++)
969 tree t = gimple_phi_arg_def (phi, i);
971 /* Ignore alternatives which are the same as our LHS. Since
972 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
973 can simply compare pointers. */
974 if (lhs == t)
975 continue;
977 /* If we have not processed an alternative yet, then set
978 RHS to this alternative. */
979 if (rhs == NULL)
980 rhs = t;
981 /* If we have processed an alternative (stored in RHS), then
982 see if it is equal to this one. If it isn't, then stop
983 the search. */
984 else if (! operand_equal_for_phi_arg_p (rhs, t))
985 break;
988 /* If we had no interesting alternatives, then all the RHS alternatives
989 must have been the same as LHS. */
990 if (!rhs)
991 rhs = lhs;
993 /* If we managed to iterate through each PHI alternative without
994 breaking out of the loop, then we have a PHI which may create
995 a useful equivalence. We do not need to record unwind data for
996 this, since this is a true assignment and not an equivalence
997 inferred from a comparison. All uses of this ssa name are dominated
998 by this assignment, so unwinding just costs time and space. */
999 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1000 set_ssa_name_value (lhs, rhs);
1004 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1005 return that edge. Otherwise return NULL. */
1006 static edge
1007 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1009 edge retval = NULL;
1010 edge e;
1011 edge_iterator ei;
1013 FOR_EACH_EDGE (e, ei, bb->preds)
1015 /* A loop back edge can be identified by the destination of
1016 the edge dominating the source of the edge. */
1017 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1018 continue;
1020 /* If we have already seen a non-loop edge, then we must have
1021 multiple incoming non-loop edges and thus we return NULL. */
1022 if (retval)
1023 return NULL;
1025 /* This is the first non-loop incoming edge we have found. Record
1026 it. */
1027 retval = e;
1030 return retval;
1033 /* Record any equivalences created by the incoming edge to BB. If BB
1034 has more than one incoming edge, then no equivalence is created. */
1036 static void
1037 record_equivalences_from_incoming_edge (basic_block bb)
1039 edge e;
1040 basic_block parent;
1041 struct edge_info *edge_info;
1043 /* If our parent block ended with a control statement, then we may be
1044 able to record some equivalences based on which outgoing edge from
1045 the parent was followed. */
1046 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1048 e = single_incoming_edge_ignoring_loop_edges (bb);
1050 /* If we had a single incoming edge from our parent block, then enter
1051 any data associated with the edge into our tables. */
1052 if (e && e->src == parent)
1054 unsigned int i;
1056 edge_info = (struct edge_info *) e->aux;
1058 if (edge_info)
1060 tree lhs = edge_info->lhs;
1061 tree rhs = edge_info->rhs;
1062 struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1064 if (lhs)
1065 record_equality (lhs, rhs);
1067 if (cond_equivalences)
1068 for (i = 0; i < edge_info->max_cond_equivalences; i++)
1069 record_cond (&cond_equivalences[i]);
1074 /* Dump SSA statistics on FILE. */
1076 void
1077 dump_dominator_optimization_stats (FILE *file)
1079 fprintf (file, "Total number of statements: %6ld\n\n",
1080 opt_stats.num_stmts);
1081 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1082 opt_stats.num_exprs_considered);
1084 fprintf (file, "\nHash table statistics:\n");
1086 fprintf (file, " avail_exprs: ");
1087 htab_statistics (file, avail_exprs);
1091 /* Dump SSA statistics on stderr. */
1093 DEBUG_FUNCTION void
1094 debug_dominator_optimization_stats (void)
1096 dump_dominator_optimization_stats (stderr);
1100 /* Dump statistics for the hash table HTAB. */
1102 static void
1103 htab_statistics (FILE *file, htab_t htab)
1105 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1106 (long) htab_size (htab),
1107 (long) htab_elements (htab),
1108 htab_collisions (htab));
1112 /* Enter condition equivalence into the expression hash table.
1113 This indicates that a conditional expression has a known
1114 boolean value. */
1116 static void
1117 record_cond (struct cond_equivalence *p)
1119 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1120 void **slot;
1122 initialize_hash_element_from_expr (&p->cond, p->value, element);
1124 slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
1125 element->hash, INSERT);
1126 if (*slot == NULL)
1128 *slot = (void *) element;
1130 if (dump_file && (dump_flags & TDF_DETAILS))
1132 fprintf (dump_file, "1>>> ");
1133 print_expr_hash_elt (dump_file, element);
1136 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
1138 else
1139 free (element);
1142 /* Build a cond_equivalence record indicating that the comparison
1143 CODE holds between operands OP0 and OP1. */
1145 static void
1146 build_and_record_new_cond (enum tree_code code,
1147 tree op0, tree op1,
1148 struct cond_equivalence *p)
1150 struct hashable_expr *cond = &p->cond;
1152 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1154 cond->type = boolean_type_node;
1155 cond->kind = EXPR_BINARY;
1156 cond->ops.binary.op = code;
1157 cond->ops.binary.opnd0 = op0;
1158 cond->ops.binary.opnd1 = op1;
1160 p->value = boolean_true_node;
1163 /* Record that COND is true and INVERTED is false into the edge information
1164 structure. Also record that any conditions dominated by COND are true
1165 as well.
1167 For example, if a < b is true, then a <= b must also be true. */
1169 static void
1170 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1172 tree op0, op1;
1174 if (!COMPARISON_CLASS_P (cond))
1175 return;
1177 op0 = TREE_OPERAND (cond, 0);
1178 op1 = TREE_OPERAND (cond, 1);
1180 switch (TREE_CODE (cond))
1182 case LT_EXPR:
1183 case GT_EXPR:
1184 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1186 edge_info->max_cond_equivalences = 6;
1187 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 6);
1188 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1189 &edge_info->cond_equivalences[4]);
1190 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1191 &edge_info->cond_equivalences[5]);
1193 else
1195 edge_info->max_cond_equivalences = 4;
1196 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1199 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1200 ? LE_EXPR : GE_EXPR),
1201 op0, op1, &edge_info->cond_equivalences[2]);
1202 build_and_record_new_cond (NE_EXPR, op0, op1,
1203 &edge_info->cond_equivalences[3]);
1204 break;
1206 case GE_EXPR:
1207 case LE_EXPR:
1208 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1210 edge_info->max_cond_equivalences = 3;
1211 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 3);
1212 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1213 &edge_info->cond_equivalences[2]);
1215 else
1217 edge_info->max_cond_equivalences = 2;
1218 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
1220 break;
1222 case EQ_EXPR:
1223 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1225 edge_info->max_cond_equivalences = 5;
1226 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 5);
1227 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1228 &edge_info->cond_equivalences[4]);
1230 else
1232 edge_info->max_cond_equivalences = 4;
1233 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1235 build_and_record_new_cond (LE_EXPR, op0, op1,
1236 &edge_info->cond_equivalences[2]);
1237 build_and_record_new_cond (GE_EXPR, op0, op1,
1238 &edge_info->cond_equivalences[3]);
1239 break;
1241 case UNORDERED_EXPR:
1242 edge_info->max_cond_equivalences = 8;
1243 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 8);
1244 build_and_record_new_cond (NE_EXPR, op0, op1,
1245 &edge_info->cond_equivalences[2]);
1246 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1247 &edge_info->cond_equivalences[3]);
1248 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1249 &edge_info->cond_equivalences[4]);
1250 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1251 &edge_info->cond_equivalences[5]);
1252 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1253 &edge_info->cond_equivalences[6]);
1254 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1255 &edge_info->cond_equivalences[7]);
1256 break;
1258 case UNLT_EXPR:
1259 case UNGT_EXPR:
1260 edge_info->max_cond_equivalences = 4;
1261 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1262 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1263 ? UNLE_EXPR : UNGE_EXPR),
1264 op0, op1, &edge_info->cond_equivalences[2]);
1265 build_and_record_new_cond (NE_EXPR, op0, op1,
1266 &edge_info->cond_equivalences[3]);
1267 break;
1269 case UNEQ_EXPR:
1270 edge_info->max_cond_equivalences = 4;
1271 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1272 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1273 &edge_info->cond_equivalences[2]);
1274 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1275 &edge_info->cond_equivalences[3]);
1276 break;
1278 case LTGT_EXPR:
1279 edge_info->max_cond_equivalences = 4;
1280 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1281 build_and_record_new_cond (NE_EXPR, op0, op1,
1282 &edge_info->cond_equivalences[2]);
1283 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1284 &edge_info->cond_equivalences[3]);
1285 break;
1287 default:
1288 edge_info->max_cond_equivalences = 2;
1289 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
1290 break;
1293 /* Now store the original true and false conditions into the first
1294 two slots. */
1295 initialize_expr_from_cond (cond, &edge_info->cond_equivalences[0].cond);
1296 edge_info->cond_equivalences[0].value = boolean_true_node;
1298 /* It is possible for INVERTED to be the negation of a comparison,
1299 and not a valid RHS or GIMPLE_COND condition. This happens because
1300 invert_truthvalue may return such an expression when asked to invert
1301 a floating-point comparison. These comparisons are not assumed to
1302 obey the trichotomy law. */
1303 initialize_expr_from_cond (inverted, &edge_info->cond_equivalences[1].cond);
1304 edge_info->cond_equivalences[1].value = boolean_false_node;
1307 /* A helper function for record_const_or_copy and record_equality.
1308 Do the work of recording the value and undo info. */
1310 static void
1311 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1313 set_ssa_name_value (x, y);
1315 if (dump_file && (dump_flags & TDF_DETAILS))
1317 fprintf (dump_file, "0>>> COPY ");
1318 print_generic_expr (dump_file, x, 0);
1319 fprintf (dump_file, " = ");
1320 print_generic_expr (dump_file, y, 0);
1321 fprintf (dump_file, "\n");
1324 VEC_reserve (tree, heap, const_and_copies_stack, 2);
1325 VEC_quick_push (tree, const_and_copies_stack, prev_x);
1326 VEC_quick_push (tree, const_and_copies_stack, x);
1329 /* Return the loop depth of the basic block of the defining statement of X.
1330 This number should not be treated as absolutely correct because the loop
1331 information may not be completely up-to-date when dom runs. However, it
1332 will be relatively correct, and as more passes are taught to keep loop info
1333 up to date, the result will become more and more accurate. */
1336 loop_depth_of_name (tree x)
1338 gimple defstmt;
1339 basic_block defbb;
1341 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1342 if (TREE_CODE (x) != SSA_NAME)
1343 return 0;
1345 /* Otherwise return the loop depth of the defining statement's bb.
1346 Note that there may not actually be a bb for this statement, if the
1347 ssa_name is live on entry. */
1348 defstmt = SSA_NAME_DEF_STMT (x);
1349 defbb = gimple_bb (defstmt);
1350 if (!defbb)
1351 return 0;
1353 return defbb->loop_depth;
1356 /* Record that X is equal to Y in const_and_copies. Record undo
1357 information in the block-local vector. */
1359 static void
1360 record_const_or_copy (tree x, tree y)
1362 tree prev_x = SSA_NAME_VALUE (x);
1364 gcc_assert (TREE_CODE (x) == SSA_NAME);
1366 if (TREE_CODE (y) == SSA_NAME)
1368 tree tmp = SSA_NAME_VALUE (y);
1369 if (tmp)
1370 y = tmp;
1373 record_const_or_copy_1 (x, y, prev_x);
1376 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1377 This constrains the cases in which we may treat this as assignment. */
1379 static void
1380 record_equality (tree x, tree y)
1382 tree prev_x = NULL, prev_y = NULL;
1384 if (TREE_CODE (x) == SSA_NAME)
1385 prev_x = SSA_NAME_VALUE (x);
1386 if (TREE_CODE (y) == SSA_NAME)
1387 prev_y = SSA_NAME_VALUE (y);
1389 /* If one of the previous values is invariant, or invariant in more loops
1390 (by depth), then use that.
1391 Otherwise it doesn't matter which value we choose, just so
1392 long as we canonicalize on one value. */
1393 if (is_gimple_min_invariant (y))
1395 else if (is_gimple_min_invariant (x)
1396 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1397 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1398 else if (prev_x && is_gimple_min_invariant (prev_x))
1399 x = y, y = prev_x, prev_x = prev_y;
1400 else if (prev_y)
1401 y = prev_y;
1403 /* After the swapping, we must have one SSA_NAME. */
1404 if (TREE_CODE (x) != SSA_NAME)
1405 return;
1407 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1408 variable compared against zero. If we're honoring signed zeros,
1409 then we cannot record this value unless we know that the value is
1410 nonzero. */
1411 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1412 && (TREE_CODE (y) != REAL_CST
1413 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1414 return;
1416 record_const_or_copy_1 (x, y, prev_x);
1419 /* Returns true when STMT is a simple iv increment. It detects the
1420 following situation:
1422 i_1 = phi (..., i_2)
1423 i_2 = i_1 +/- ... */
1425 static bool
1426 simple_iv_increment_p (gimple stmt)
1428 tree lhs, preinc;
1429 gimple phi;
1430 size_t i;
1432 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1433 return false;
1435 lhs = gimple_assign_lhs (stmt);
1436 if (TREE_CODE (lhs) != SSA_NAME)
1437 return false;
1439 if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
1440 && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
1441 return false;
1443 preinc = gimple_assign_rhs1 (stmt);
1445 if (TREE_CODE (preinc) != SSA_NAME)
1446 return false;
1448 phi = SSA_NAME_DEF_STMT (preinc);
1449 if (gimple_code (phi) != GIMPLE_PHI)
1450 return false;
1452 for (i = 0; i < gimple_phi_num_args (phi); i++)
1453 if (gimple_phi_arg_def (phi, i) == lhs)
1454 return true;
1456 return false;
1459 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1460 known value for that SSA_NAME (or NULL if no value is known).
1462 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1463 successors of BB. */
1465 static void
1466 cprop_into_successor_phis (basic_block bb)
1468 edge e;
1469 edge_iterator ei;
1471 FOR_EACH_EDGE (e, ei, bb->succs)
1473 int indx;
1474 gimple_stmt_iterator gsi;
1476 /* If this is an abnormal edge, then we do not want to copy propagate
1477 into the PHI alternative associated with this edge. */
1478 if (e->flags & EDGE_ABNORMAL)
1479 continue;
1481 gsi = gsi_start_phis (e->dest);
1482 if (gsi_end_p (gsi))
1483 continue;
1485 indx = e->dest_idx;
1486 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1488 tree new_val;
1489 use_operand_p orig_p;
1490 tree orig_val;
1491 gimple phi = gsi_stmt (gsi);
1493 /* The alternative may be associated with a constant, so verify
1494 it is an SSA_NAME before doing anything with it. */
1495 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1496 orig_val = get_use_from_ptr (orig_p);
1497 if (TREE_CODE (orig_val) != SSA_NAME)
1498 continue;
1500 /* If we have *ORIG_P in our constant/copy table, then replace
1501 ORIG_P with its value in our constant/copy table. */
1502 new_val = SSA_NAME_VALUE (orig_val);
1503 if (new_val
1504 && new_val != orig_val
1505 && (TREE_CODE (new_val) == SSA_NAME
1506 || is_gimple_min_invariant (new_val))
1507 && may_propagate_copy (orig_val, new_val))
1508 propagate_value (orig_p, new_val);
1513 /* We have finished optimizing BB, record any information implied by
1514 taking a specific outgoing edge from BB. */
1516 static void
1517 record_edge_info (basic_block bb)
1519 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1520 struct edge_info *edge_info;
1522 if (! gsi_end_p (gsi))
1524 gimple stmt = gsi_stmt (gsi);
1525 location_t loc = gimple_location (stmt);
1527 if (gimple_code (stmt) == GIMPLE_SWITCH)
1529 tree index = gimple_switch_index (stmt);
1531 if (TREE_CODE (index) == SSA_NAME)
1533 int i;
1534 int n_labels = gimple_switch_num_labels (stmt);
1535 tree *info = XCNEWVEC (tree, last_basic_block);
1536 edge e;
1537 edge_iterator ei;
1539 for (i = 0; i < n_labels; i++)
1541 tree label = gimple_switch_label (stmt, i);
1542 basic_block target_bb = label_to_block (CASE_LABEL (label));
1543 if (CASE_HIGH (label)
1544 || !CASE_LOW (label)
1545 || info[target_bb->index])
1546 info[target_bb->index] = error_mark_node;
1547 else
1548 info[target_bb->index] = label;
1551 FOR_EACH_EDGE (e, ei, bb->succs)
1553 basic_block target_bb = e->dest;
1554 tree label = info[target_bb->index];
1556 if (label != NULL && label != error_mark_node)
1558 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1559 CASE_LOW (label));
1560 edge_info = allocate_edge_info (e);
1561 edge_info->lhs = index;
1562 edge_info->rhs = x;
1565 free (info);
1569 /* A COND_EXPR may create equivalences too. */
1570 if (gimple_code (stmt) == GIMPLE_COND)
1572 edge true_edge;
1573 edge false_edge;
1575 tree op0 = gimple_cond_lhs (stmt);
1576 tree op1 = gimple_cond_rhs (stmt);
1577 enum tree_code code = gimple_cond_code (stmt);
1579 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1581 /* Special case comparing booleans against a constant as we
1582 know the value of OP0 on both arms of the branch. i.e., we
1583 can record an equivalence for OP0 rather than COND. */
1584 if ((code == EQ_EXPR || code == NE_EXPR)
1585 && TREE_CODE (op0) == SSA_NAME
1586 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1587 && is_gimple_min_invariant (op1))
1589 if (code == EQ_EXPR)
1591 edge_info = allocate_edge_info (true_edge);
1592 edge_info->lhs = op0;
1593 edge_info->rhs = (integer_zerop (op1)
1594 ? boolean_false_node
1595 : boolean_true_node);
1597 edge_info = allocate_edge_info (false_edge);
1598 edge_info->lhs = op0;
1599 edge_info->rhs = (integer_zerop (op1)
1600 ? boolean_true_node
1601 : boolean_false_node);
1603 else
1605 edge_info = allocate_edge_info (true_edge);
1606 edge_info->lhs = op0;
1607 edge_info->rhs = (integer_zerop (op1)
1608 ? boolean_true_node
1609 : boolean_false_node);
1611 edge_info = allocate_edge_info (false_edge);
1612 edge_info->lhs = op0;
1613 edge_info->rhs = (integer_zerop (op1)
1614 ? boolean_false_node
1615 : boolean_true_node);
1618 else if (is_gimple_min_invariant (op0)
1619 && (TREE_CODE (op1) == SSA_NAME
1620 || is_gimple_min_invariant (op1)))
1622 tree cond = build2 (code, boolean_type_node, op0, op1);
1623 tree inverted = invert_truthvalue_loc (loc, cond);
1624 struct edge_info *edge_info;
1626 edge_info = allocate_edge_info (true_edge);
1627 record_conditions (edge_info, cond, inverted);
1629 if (code == EQ_EXPR)
1631 edge_info->lhs = op1;
1632 edge_info->rhs = op0;
1635 edge_info = allocate_edge_info (false_edge);
1636 record_conditions (edge_info, inverted, cond);
1638 if (TREE_CODE (inverted) == EQ_EXPR)
1640 edge_info->lhs = op1;
1641 edge_info->rhs = op0;
1645 else if (TREE_CODE (op0) == SSA_NAME
1646 && (is_gimple_min_invariant (op1)
1647 || TREE_CODE (op1) == SSA_NAME))
1649 tree cond = build2 (code, boolean_type_node, op0, op1);
1650 tree inverted = invert_truthvalue_loc (loc, cond);
1651 struct edge_info *edge_info;
1653 edge_info = allocate_edge_info (true_edge);
1654 record_conditions (edge_info, cond, inverted);
1656 if (code == EQ_EXPR)
1658 edge_info->lhs = op0;
1659 edge_info->rhs = op1;
1662 edge_info = allocate_edge_info (false_edge);
1663 record_conditions (edge_info, inverted, cond);
1665 if (TREE_CODE (inverted) == EQ_EXPR)
1667 edge_info->lhs = op0;
1668 edge_info->rhs = op1;
1673 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1677 static void
1678 dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1679 basic_block bb)
1681 gimple_stmt_iterator gsi;
1683 if (dump_file && (dump_flags & TDF_DETAILS))
1684 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1686 /* Push a marker on the stacks of local information so that we know how
1687 far to unwind when we finalize this block. */
1688 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1689 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1691 record_equivalences_from_incoming_edge (bb);
1693 /* PHI nodes can create equivalences too. */
1694 record_equivalences_from_phis (bb);
1696 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1697 optimize_stmt (bb, gsi);
1699 /* Now prepare to process dominated blocks. */
1700 record_edge_info (bb);
1701 cprop_into_successor_phis (bb);
1704 /* We have finished processing the dominator children of BB, perform
1705 any finalization actions in preparation for leaving this node in
1706 the dominator tree. */
1708 static void
1709 dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1711 gimple last;
1713 /* If we have an outgoing edge to a block with multiple incoming and
1714 outgoing edges, then we may be able to thread the edge, i.e., we
1715 may be able to statically determine which of the outgoing edges
1716 will be traversed when the incoming edge from BB is traversed. */
1717 if (single_succ_p (bb)
1718 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1719 && potentially_threadable_block (single_succ (bb)))
1721 dom_thread_across_edge (walk_data, single_succ_edge (bb));
1723 else if ((last = last_stmt (bb))
1724 && gimple_code (last) == GIMPLE_COND
1725 && EDGE_COUNT (bb->succs) == 2
1726 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1727 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1729 edge true_edge, false_edge;
1731 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1733 /* Only try to thread the edge if it reaches a target block with
1734 more than one predecessor and more than one successor. */
1735 if (potentially_threadable_block (true_edge->dest))
1737 struct edge_info *edge_info;
1738 unsigned int i;
1740 /* Push a marker onto the available expression stack so that we
1741 unwind any expressions related to the TRUE arm before processing
1742 the false arm below. */
1743 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1744 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1746 edge_info = (struct edge_info *) true_edge->aux;
1748 /* If we have info associated with this edge, record it into
1749 our equivalence tables. */
1750 if (edge_info)
1752 struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1753 tree lhs = edge_info->lhs;
1754 tree rhs = edge_info->rhs;
1756 /* If we have a simple NAME = VALUE equivalence, record it. */
1757 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1758 record_const_or_copy (lhs, rhs);
1760 /* If we have 0 = COND or 1 = COND equivalences, record them
1761 into our expression hash tables. */
1762 if (cond_equivalences)
1763 for (i = 0; i < edge_info->max_cond_equivalences; i++)
1764 record_cond (&cond_equivalences[i]);
1767 dom_thread_across_edge (walk_data, true_edge);
1769 /* And restore the various tables to their state before
1770 we threaded this edge. */
1771 remove_local_expressions_from_table ();
1774 /* Similarly for the ELSE arm. */
1775 if (potentially_threadable_block (false_edge->dest))
1777 struct edge_info *edge_info;
1778 unsigned int i;
1780 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1781 edge_info = (struct edge_info *) false_edge->aux;
1783 /* If we have info associated with this edge, record it into
1784 our equivalence tables. */
1785 if (edge_info)
1787 struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1788 tree lhs = edge_info->lhs;
1789 tree rhs = edge_info->rhs;
1791 /* If we have a simple NAME = VALUE equivalence, record it. */
1792 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1793 record_const_or_copy (lhs, rhs);
1795 /* If we have 0 = COND or 1 = COND equivalences, record them
1796 into our expression hash tables. */
1797 if (cond_equivalences)
1798 for (i = 0; i < edge_info->max_cond_equivalences; i++)
1799 record_cond (&cond_equivalences[i]);
1802 /* Now thread the edge. */
1803 dom_thread_across_edge (walk_data, false_edge);
1805 /* No need to remove local expressions from our tables
1806 or restore vars to their original value as that will
1807 be done immediately below. */
1811 remove_local_expressions_from_table ();
1812 restore_vars_to_original_value ();
1815 /* Search for redundant computations in STMT. If any are found, then
1816 replace them with the variable holding the result of the computation.
1818 If safe, record this expression into the available expression hash
1819 table. */
1821 static void
1822 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1824 tree expr_type;
1825 tree cached_lhs;
1826 bool insert = true;
1827 bool assigns_var_p = false;
1829 gimple stmt = gsi_stmt (*gsi);
1831 tree def = gimple_get_lhs (stmt);
1833 /* Certain expressions on the RHS can be optimized away, but can not
1834 themselves be entered into the hash tables. */
1835 if (! def
1836 || TREE_CODE (def) != SSA_NAME
1837 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1838 || gimple_vdef (stmt)
1839 /* Do not record equivalences for increments of ivs. This would create
1840 overlapping live ranges for a very questionable gain. */
1841 || simple_iv_increment_p (stmt))
1842 insert = false;
1844 /* Check if the expression has been computed before. */
1845 cached_lhs = lookup_avail_expr (stmt, insert);
1847 opt_stats.num_exprs_considered++;
1849 /* Get the type of the expression we are trying to optimize. */
1850 if (is_gimple_assign (stmt))
1852 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1853 assigns_var_p = true;
1855 else if (gimple_code (stmt) == GIMPLE_COND)
1856 expr_type = boolean_type_node;
1857 else if (is_gimple_call (stmt))
1859 gcc_assert (gimple_call_lhs (stmt));
1860 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1861 assigns_var_p = true;
1863 else if (gimple_code (stmt) == GIMPLE_SWITCH)
1864 expr_type = TREE_TYPE (gimple_switch_index (stmt));
1865 else
1866 gcc_unreachable ();
1868 if (!cached_lhs)
1869 return;
1871 /* It is safe to ignore types here since we have already done
1872 type checking in the hashing and equality routines. In fact
1873 type checking here merely gets in the way of constant
1874 propagation. Also, make sure that it is safe to propagate
1875 CACHED_LHS into the expression in STMT. */
1876 if ((TREE_CODE (cached_lhs) != SSA_NAME
1877 && (assigns_var_p
1878 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1879 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1881 #if defined ENABLE_CHECKING
1882 gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME
1883 || is_gimple_min_invariant (cached_lhs));
1884 #endif
1886 if (dump_file && (dump_flags & TDF_DETAILS))
1888 fprintf (dump_file, " Replaced redundant expr '");
1889 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1890 fprintf (dump_file, "' with '");
1891 print_generic_expr (dump_file, cached_lhs, dump_flags);
1892 fprintf (dump_file, "'\n");
1895 opt_stats.num_re++;
1897 if (assigns_var_p
1898 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1899 cached_lhs = fold_convert (expr_type, cached_lhs);
1901 propagate_tree_value_into_stmt (gsi, cached_lhs);
1903 /* Since it is always necessary to mark the result as modified,
1904 perhaps we should move this into propagate_tree_value_into_stmt
1905 itself. */
1906 gimple_set_modified (gsi_stmt (*gsi), true);
1910 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1911 the available expressions table or the const_and_copies table.
1912 Detect and record those equivalences. */
1913 /* We handle only very simple copy equivalences here. The heavy
1914 lifing is done by eliminate_redundant_computations. */
1916 static void
1917 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1919 tree lhs;
1920 enum tree_code lhs_code;
1922 gcc_assert (is_gimple_assign (stmt));
1924 lhs = gimple_assign_lhs (stmt);
1925 lhs_code = TREE_CODE (lhs);
1927 if (lhs_code == SSA_NAME
1928 && gimple_assign_single_p (stmt))
1930 tree rhs = gimple_assign_rhs1 (stmt);
1932 /* If the RHS of the assignment is a constant or another variable that
1933 may be propagated, register it in the CONST_AND_COPIES table. We
1934 do not need to record unwind data for this, since this is a true
1935 assignment and not an equivalence inferred from a comparison. All
1936 uses of this ssa name are dominated by this assignment, so unwinding
1937 just costs time and space. */
1938 if (may_optimize_p
1939 && (TREE_CODE (rhs) == SSA_NAME
1940 || is_gimple_min_invariant (rhs)))
1942 if (dump_file && (dump_flags & TDF_DETAILS))
1944 fprintf (dump_file, "==== ASGN ");
1945 print_generic_expr (dump_file, lhs, 0);
1946 fprintf (dump_file, " = ");
1947 print_generic_expr (dump_file, rhs, 0);
1948 fprintf (dump_file, "\n");
1951 set_ssa_name_value (lhs, rhs);
1955 /* A memory store, even an aliased store, creates a useful
1956 equivalence. By exchanging the LHS and RHS, creating suitable
1957 vops and recording the result in the available expression table,
1958 we may be able to expose more redundant loads. */
1959 if (!gimple_has_volatile_ops (stmt)
1960 && gimple_references_memory_p (stmt)
1961 && gimple_assign_single_p (stmt)
1962 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1963 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1964 && !is_gimple_reg (lhs))
1966 tree rhs = gimple_assign_rhs1 (stmt);
1967 gimple new_stmt;
1969 /* Build a new statement with the RHS and LHS exchanged. */
1970 if (TREE_CODE (rhs) == SSA_NAME)
1972 /* NOTE tuples. The call to gimple_build_assign below replaced
1973 a call to build_gimple_modify_stmt, which did not set the
1974 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1975 may cause an SSA validation failure, as the LHS may be a
1976 default-initialized name and should have no definition. I'm
1977 a bit dubious of this, as the artificial statement that we
1978 generate here may in fact be ill-formed, but it is simply
1979 used as an internal device in this pass, and never becomes
1980 part of the CFG. */
1981 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1982 new_stmt = gimple_build_assign (rhs, lhs);
1983 SSA_NAME_DEF_STMT (rhs) = defstmt;
1985 else
1986 new_stmt = gimple_build_assign (rhs, lhs);
1988 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1990 /* Finally enter the statement into the available expression
1991 table. */
1992 lookup_avail_expr (new_stmt, true);
1996 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1997 CONST_AND_COPIES. */
1999 static void
2000 cprop_operand (gimple stmt, use_operand_p op_p)
2002 tree val;
2003 tree op = USE_FROM_PTR (op_p);
2005 /* If the operand has a known constant value or it is known to be a
2006 copy of some other variable, use the value or copy stored in
2007 CONST_AND_COPIES. */
2008 val = SSA_NAME_VALUE (op);
2009 if (val && val != op)
2011 /* Do not change the base variable in the virtual operand
2012 tables. That would make it impossible to reconstruct
2013 the renamed virtual operand if we later modify this
2014 statement. Also only allow the new value to be an SSA_NAME
2015 for propagation into virtual operands. */
2016 if (!is_gimple_reg (op)
2017 && (TREE_CODE (val) != SSA_NAME
2018 || is_gimple_reg (val)
2019 || get_virtual_var (val) != get_virtual_var (op)))
2020 return;
2022 /* Do not replace hard register operands in asm statements. */
2023 if (gimple_code (stmt) == GIMPLE_ASM
2024 && !may_propagate_copy_into_asm (op))
2025 return;
2027 /* Certain operands are not allowed to be copy propagated due
2028 to their interaction with exception handling and some GCC
2029 extensions. */
2030 if (!may_propagate_copy (op, val))
2031 return;
2033 /* Do not propagate addresses that point to volatiles into memory
2034 stmts without volatile operands. */
2035 if (POINTER_TYPE_P (TREE_TYPE (val))
2036 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2037 && gimple_has_mem_ops (stmt)
2038 && !gimple_has_volatile_ops (stmt))
2039 return;
2041 /* Do not propagate copies if the propagated value is at a deeper loop
2042 depth than the propagatee. Otherwise, this may move loop variant
2043 variables outside of their loops and prevent coalescing
2044 opportunities. If the value was loop invariant, it will be hoisted
2045 by LICM and exposed for copy propagation. */
2046 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2047 return;
2049 /* Do not propagate copies into simple IV increment statements.
2050 See PR23821 for how this can disturb IV analysis. */
2051 if (TREE_CODE (val) != INTEGER_CST
2052 && simple_iv_increment_p (stmt))
2053 return;
2055 /* Dump details. */
2056 if (dump_file && (dump_flags & TDF_DETAILS))
2058 fprintf (dump_file, " Replaced '");
2059 print_generic_expr (dump_file, op, dump_flags);
2060 fprintf (dump_file, "' with %s '",
2061 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2062 print_generic_expr (dump_file, val, dump_flags);
2063 fprintf (dump_file, "'\n");
2066 if (TREE_CODE (val) != SSA_NAME)
2067 opt_stats.num_const_prop++;
2068 else
2069 opt_stats.num_copy_prop++;
2071 propagate_value (op_p, val);
2073 /* And note that we modified this statement. This is now
2074 safe, even if we changed virtual operands since we will
2075 rescan the statement and rewrite its operands again. */
2076 gimple_set_modified (stmt, true);
2080 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2081 known value for that SSA_NAME (or NULL if no value is known).
2083 Propagate values from CONST_AND_COPIES into the uses, vuses and
2084 vdef_ops of STMT. */
2086 static void
2087 cprop_into_stmt (gimple stmt)
2089 use_operand_p op_p;
2090 ssa_op_iter iter;
2092 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
2094 if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
2095 cprop_operand (stmt, op_p);
2099 /* Optimize the statement pointed to by iterator SI.
2101 We try to perform some simplistic global redundancy elimination and
2102 constant propagation:
2104 1- To detect global redundancy, we keep track of expressions that have
2105 been computed in this block and its dominators. If we find that the
2106 same expression is computed more than once, we eliminate repeated
2107 computations by using the target of the first one.
2109 2- Constant values and copy assignments. This is used to do very
2110 simplistic constant and copy propagation. When a constant or copy
2111 assignment is found, we map the value on the RHS of the assignment to
2112 the variable in the LHS in the CONST_AND_COPIES table. */
2114 static void
2115 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2117 gimple stmt, old_stmt;
2118 bool may_optimize_p;
2119 bool modified_p = false;
2121 old_stmt = stmt = gsi_stmt (si);
2123 if (gimple_code (stmt) == GIMPLE_COND)
2124 canonicalize_comparison (stmt);
2126 update_stmt_if_modified (stmt);
2127 opt_stats.num_stmts++;
2129 if (dump_file && (dump_flags & TDF_DETAILS))
2131 fprintf (dump_file, "Optimizing statement ");
2132 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2135 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2136 cprop_into_stmt (stmt);
2138 /* If the statement has been modified with constant replacements,
2139 fold its RHS before checking for redundant computations. */
2140 if (gimple_modified_p (stmt))
2142 tree rhs = NULL;
2144 /* Try to fold the statement making sure that STMT is kept
2145 up to date. */
2146 if (fold_stmt (&si))
2148 stmt = gsi_stmt (si);
2149 gimple_set_modified (stmt, true);
2151 if (dump_file && (dump_flags & TDF_DETAILS))
2153 fprintf (dump_file, " Folded to: ");
2154 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2158 /* We only need to consider cases that can yield a gimple operand. */
2159 if (gimple_assign_single_p (stmt))
2160 rhs = gimple_assign_rhs1 (stmt);
2161 else if (gimple_code (stmt) == GIMPLE_GOTO)
2162 rhs = gimple_goto_dest (stmt);
2163 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2164 /* This should never be an ADDR_EXPR. */
2165 rhs = gimple_switch_index (stmt);
2167 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2168 recompute_tree_invariant_for_addr_expr (rhs);
2170 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2171 even if fold_stmt updated the stmt already and thus cleared
2172 gimple_modified_p flag on it. */
2173 modified_p = true;
2176 /* Check for redundant computations. Do this optimization only
2177 for assignments that have no volatile ops and conditionals. */
2178 may_optimize_p = (!gimple_has_volatile_ops (stmt)
2179 && ((is_gimple_assign (stmt)
2180 && !gimple_rhs_has_side_effects (stmt))
2181 || (is_gimple_call (stmt)
2182 && gimple_call_lhs (stmt) != NULL_TREE
2183 && !gimple_rhs_has_side_effects (stmt))
2184 || gimple_code (stmt) == GIMPLE_COND
2185 || gimple_code (stmt) == GIMPLE_SWITCH));
2187 if (may_optimize_p)
2189 if (gimple_code (stmt) == GIMPLE_CALL)
2191 /* Resolve __builtin_constant_p. If it hasn't been
2192 folded to integer_one_node by now, it's fairly
2193 certain that the value simply isn't constant. */
2194 tree callee = gimple_call_fndecl (stmt);
2195 if (callee
2196 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2197 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2199 propagate_tree_value_into_stmt (&si, integer_zero_node);
2200 stmt = gsi_stmt (si);
2204 update_stmt_if_modified (stmt);
2205 eliminate_redundant_computations (&si);
2206 stmt = gsi_stmt (si);
2209 /* Record any additional equivalences created by this statement. */
2210 if (is_gimple_assign (stmt))
2211 record_equivalences_from_stmt (stmt, may_optimize_p);
2213 /* If STMT is a COND_EXPR and it was modified, then we may know
2214 where it goes. If that is the case, then mark the CFG as altered.
2216 This will cause us to later call remove_unreachable_blocks and
2217 cleanup_tree_cfg when it is safe to do so. It is not safe to
2218 clean things up here since removal of edges and such can trigger
2219 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2220 the manager.
2222 That's all fine and good, except that once SSA_NAMEs are released
2223 to the manager, we must not call create_ssa_name until all references
2224 to released SSA_NAMEs have been eliminated.
2226 All references to the deleted SSA_NAMEs can not be eliminated until
2227 we remove unreachable blocks.
2229 We can not remove unreachable blocks until after we have completed
2230 any queued jump threading.
2232 We can not complete any queued jump threads until we have taken
2233 appropriate variables out of SSA form. Taking variables out of
2234 SSA form can call create_ssa_name and thus we lose.
2236 Ultimately I suspect we're going to need to change the interface
2237 into the SSA_NAME manager. */
2238 if (gimple_modified_p (stmt) || modified_p)
2240 tree val = NULL;
2242 update_stmt_if_modified (stmt);
2244 if (gimple_code (stmt) == GIMPLE_COND)
2245 val = fold_binary_loc (gimple_location (stmt),
2246 gimple_cond_code (stmt), boolean_type_node,
2247 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2248 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2249 val = gimple_switch_index (stmt);
2251 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2252 cfg_altered = true;
2254 /* If we simplified a statement in such a way as to be shown that it
2255 cannot trap, update the eh information and the cfg to match. */
2256 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2258 bitmap_set_bit (need_eh_cleanup, bb->index);
2259 if (dump_file && (dump_flags & TDF_DETAILS))
2260 fprintf (dump_file, " Flagged to clear EH edges.\n");
2265 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2266 If found, return its LHS. Otherwise insert STMT in the table and
2267 return NULL_TREE.
2269 Also, when an expression is first inserted in the table, it is also
2270 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2271 we finish processing this block and its children. */
2273 static tree
2274 lookup_avail_expr (gimple stmt, bool insert)
2276 void **slot;
2277 tree lhs;
2278 tree temp;
2279 struct expr_hash_elt element;
2281 /* Get LHS of assignment or call, else NULL_TREE. */
2282 lhs = gimple_get_lhs (stmt);
2284 initialize_hash_element (stmt, lhs, &element);
2286 if (dump_file && (dump_flags & TDF_DETAILS))
2288 fprintf (dump_file, "LKUP ");
2289 print_expr_hash_elt (dump_file, &element);
2292 /* Don't bother remembering constant assignments and copy operations.
2293 Constants and copy operations are handled by the constant/copy propagator
2294 in optimize_stmt. */
2295 if (element.expr.kind == EXPR_SINGLE
2296 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2297 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2298 return NULL_TREE;
2300 /* Finally try to find the expression in the main expression hash table. */
2301 slot = htab_find_slot_with_hash (avail_exprs, &element, element.hash,
2302 (insert ? INSERT : NO_INSERT));
2303 if (slot == NULL)
2304 return NULL_TREE;
2306 if (*slot == NULL)
2308 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2309 *element2 = element;
2310 element2->stamp = element2;
2311 *slot = (void *) element2;
2313 if (dump_file && (dump_flags & TDF_DETAILS))
2315 fprintf (dump_file, "2>>> ");
2316 print_expr_hash_elt (dump_file, element2);
2319 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
2320 return NULL_TREE;
2323 /* Extract the LHS of the assignment so that it can be used as the current
2324 definition of another variable. */
2325 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2327 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2328 use the value from the const_and_copies table. */
2329 if (TREE_CODE (lhs) == SSA_NAME)
2331 temp = SSA_NAME_VALUE (lhs);
2332 if (temp)
2333 lhs = temp;
2336 if (dump_file && (dump_flags & TDF_DETAILS))
2338 fprintf (dump_file, "FIND: ");
2339 print_generic_expr (dump_file, lhs, 0);
2340 fprintf (dump_file, "\n");
2343 return lhs;
2346 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2347 for expressions using the code of the expression and the SSA numbers of
2348 its operands. */
2350 static hashval_t
2351 avail_expr_hash (const void *p)
2353 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2354 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2355 tree vuse;
2356 hashval_t val = 0;
2358 val = iterative_hash_hashable_expr (expr, val);
2360 /* If the hash table entry is not associated with a statement, then we
2361 can just hash the expression and not worry about virtual operands
2362 and such. */
2363 if (!stmt)
2364 return val;
2366 /* Add the SSA version numbers of the vuse operand. This is important
2367 because compound variables like arrays are not renamed in the
2368 operands. Rather, the rename is done on the virtual variable
2369 representing all the elements of the array. */
2370 if ((vuse = gimple_vuse (stmt)))
2371 val = iterative_hash_expr (vuse, val);
2373 return val;
2376 static hashval_t
2377 real_avail_expr_hash (const void *p)
2379 return ((const struct expr_hash_elt *)p)->hash;
2382 static int
2383 avail_expr_eq (const void *p1, const void *p2)
2385 gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
2386 const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
2387 const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
2388 gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
2389 const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
2390 const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
2392 /* This case should apply only when removing entries from the table. */
2393 if (stamp1 == stamp2)
2394 return true;
2396 /* FIXME tuples:
2397 We add stmts to a hash table and them modify them. To detect the case
2398 that we modify a stmt and then search for it, we assume that the hash
2399 is always modified by that change.
2400 We have to fully check why this doesn't happen on trunk or rewrite
2401 this in a more reliable (and easier to understand) way. */
2402 if (((const struct expr_hash_elt *)p1)->hash
2403 != ((const struct expr_hash_elt *)p2)->hash)
2404 return false;
2406 /* In case of a collision, both RHS have to be identical and have the
2407 same VUSE operands. */
2408 if (hashable_expr_equal_p (expr1, expr2)
2409 && types_compatible_p (expr1->type, expr2->type))
2411 /* Note that STMT1 and/or STMT2 may be NULL. */
2412 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
2413 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
2416 return false;
2419 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2420 up degenerate PHIs created by or exposed by jump threading. */
2422 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2423 NULL. */
2425 tree
2426 degenerate_phi_result (gimple phi)
2428 tree lhs = gimple_phi_result (phi);
2429 tree val = NULL;
2430 size_t i;
2432 /* Ignoring arguments which are the same as LHS, if all the remaining
2433 arguments are the same, then the PHI is a degenerate and has the
2434 value of that common argument. */
2435 for (i = 0; i < gimple_phi_num_args (phi); i++)
2437 tree arg = gimple_phi_arg_def (phi, i);
2439 if (arg == lhs)
2440 continue;
2441 else if (!arg)
2442 break;
2443 else if (!val)
2444 val = arg;
2445 else if (arg == val)
2446 continue;
2447 /* We bring in some of operand_equal_p not only to speed things
2448 up, but also to avoid crashing when dereferencing the type of
2449 a released SSA name. */
2450 else if (TREE_CODE (val) != TREE_CODE (arg)
2451 || TREE_CODE (val) == SSA_NAME
2452 || !operand_equal_p (arg, val, 0))
2453 break;
2455 return (i == gimple_phi_num_args (phi) ? val : NULL);
2458 /* Given a statement STMT, which is either a PHI node or an assignment,
2459 remove it from the IL. */
2461 static void
2462 remove_stmt_or_phi (gimple stmt)
2464 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2466 if (gimple_code (stmt) == GIMPLE_PHI)
2467 remove_phi_node (&gsi, true);
2468 else
2470 gsi_remove (&gsi, true);
2471 release_defs (stmt);
2475 /* Given a statement STMT, which is either a PHI node or an assignment,
2476 return the "rhs" of the node, in the case of a non-degenerate
2477 phi, NULL is returned. */
2479 static tree
2480 get_rhs_or_phi_arg (gimple stmt)
2482 if (gimple_code (stmt) == GIMPLE_PHI)
2483 return degenerate_phi_result (stmt);
2484 else if (gimple_assign_single_p (stmt))
2485 return gimple_assign_rhs1 (stmt);
2486 else
2487 gcc_unreachable ();
2491 /* Given a statement STMT, which is either a PHI node or an assignment,
2492 return the "lhs" of the node. */
2494 static tree
2495 get_lhs_or_phi_result (gimple stmt)
2497 if (gimple_code (stmt) == GIMPLE_PHI)
2498 return gimple_phi_result (stmt);
2499 else if (is_gimple_assign (stmt))
2500 return gimple_assign_lhs (stmt);
2501 else
2502 gcc_unreachable ();
2505 /* Propagate RHS into all uses of LHS (when possible).
2507 RHS and LHS are derived from STMT, which is passed in solely so
2508 that we can remove it if propagation is successful.
2510 When propagating into a PHI node or into a statement which turns
2511 into a trivial copy or constant initialization, set the
2512 appropriate bit in INTERESTING_NAMEs so that we will visit those
2513 nodes as well in an effort to pick up secondary optimization
2514 opportunities. */
2516 static void
2517 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2519 /* First verify that propagation is valid and isn't going to move a
2520 loop variant variable outside its loop. */
2521 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2522 && (TREE_CODE (rhs) != SSA_NAME
2523 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2524 && may_propagate_copy (lhs, rhs)
2525 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2527 use_operand_p use_p;
2528 imm_use_iterator iter;
2529 gimple use_stmt;
2530 bool all = true;
2532 /* Dump details. */
2533 if (dump_file && (dump_flags & TDF_DETAILS))
2535 fprintf (dump_file, " Replacing '");
2536 print_generic_expr (dump_file, lhs, dump_flags);
2537 fprintf (dump_file, "' with %s '",
2538 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2539 print_generic_expr (dump_file, rhs, dump_flags);
2540 fprintf (dump_file, "'\n");
2543 /* Walk over every use of LHS and try to replace the use with RHS.
2544 At this point the only reason why such a propagation would not
2545 be successful would be if the use occurs in an ASM_EXPR. */
2546 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2548 /* Leave debug stmts alone. If we succeed in propagating
2549 all non-debug uses, we'll drop the DEF, and propagation
2550 into debug stmts will occur then. */
2551 if (gimple_debug_bind_p (use_stmt))
2552 continue;
2554 /* It's not always safe to propagate into an ASM_EXPR. */
2555 if (gimple_code (use_stmt) == GIMPLE_ASM
2556 && ! may_propagate_copy_into_asm (lhs))
2558 all = false;
2559 continue;
2562 /* It's not ok to propagate into the definition stmt of RHS.
2563 <bb 9>:
2564 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2565 g_67.1_6 = prephitmp.12_36;
2566 goto <bb 9>;
2567 While this is strictly all dead code we do not want to
2568 deal with this here. */
2569 if (TREE_CODE (rhs) == SSA_NAME
2570 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2572 all = false;
2573 continue;
2576 /* Dump details. */
2577 if (dump_file && (dump_flags & TDF_DETAILS))
2579 fprintf (dump_file, " Original statement:");
2580 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2583 /* Propagate the RHS into this use of the LHS. */
2584 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2585 propagate_value (use_p, rhs);
2587 /* Special cases to avoid useless calls into the folding
2588 routines, operand scanning, etc.
2590 First, propagation into a PHI may cause the PHI to become
2591 a degenerate, so mark the PHI as interesting. No other
2592 actions are necessary.
2594 Second, if we're propagating a virtual operand and the
2595 propagation does not change the underlying _DECL node for
2596 the virtual operand, then no further actions are necessary. */
2597 if (gimple_code (use_stmt) == GIMPLE_PHI
2598 || (! is_gimple_reg (lhs)
2599 && TREE_CODE (rhs) == SSA_NAME
2600 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
2602 /* Dump details. */
2603 if (dump_file && (dump_flags & TDF_DETAILS))
2605 fprintf (dump_file, " Updated statement:");
2606 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2609 /* Propagation into a PHI may expose new degenerate PHIs,
2610 so mark the result of the PHI as interesting. */
2611 if (gimple_code (use_stmt) == GIMPLE_PHI)
2613 tree result = get_lhs_or_phi_result (use_stmt);
2614 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2617 continue;
2620 /* From this point onward we are propagating into a
2621 real statement. Folding may (or may not) be possible,
2622 we may expose new operands, expose dead EH edges,
2623 etc. */
2624 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2625 cannot fold a call that simplifies to a constant,
2626 because the GIMPLE_CALL must be replaced by a
2627 GIMPLE_ASSIGN, and there is no way to effect such a
2628 transformation in-place. We might want to consider
2629 using the more general fold_stmt here. */
2630 fold_stmt_inplace (use_stmt);
2632 /* Sometimes propagation can expose new operands to the
2633 renamer. */
2634 update_stmt (use_stmt);
2636 /* Dump details. */
2637 if (dump_file && (dump_flags & TDF_DETAILS))
2639 fprintf (dump_file, " Updated statement:");
2640 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2643 /* If we replaced a variable index with a constant, then
2644 we would need to update the invariant flag for ADDR_EXPRs. */
2645 if (gimple_assign_single_p (use_stmt)
2646 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2647 recompute_tree_invariant_for_addr_expr
2648 (gimple_assign_rhs1 (use_stmt));
2650 /* If we cleaned up EH information from the statement,
2651 mark its containing block as needing EH cleanups. */
2652 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2654 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2655 if (dump_file && (dump_flags & TDF_DETAILS))
2656 fprintf (dump_file, " Flagged to clear EH edges.\n");
2659 /* Propagation may expose new trivial copy/constant propagation
2660 opportunities. */
2661 if (gimple_assign_single_p (use_stmt)
2662 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2663 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2664 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2666 tree result = get_lhs_or_phi_result (use_stmt);
2667 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2670 /* Propagation into these nodes may make certain edges in
2671 the CFG unexecutable. We want to identify them as PHI nodes
2672 at the destination of those unexecutable edges may become
2673 degenerates. */
2674 else if (gimple_code (use_stmt) == GIMPLE_COND
2675 || gimple_code (use_stmt) == GIMPLE_SWITCH
2676 || gimple_code (use_stmt) == GIMPLE_GOTO)
2678 tree val;
2680 if (gimple_code (use_stmt) == GIMPLE_COND)
2681 val = fold_binary_loc (gimple_location (use_stmt),
2682 gimple_cond_code (use_stmt),
2683 boolean_type_node,
2684 gimple_cond_lhs (use_stmt),
2685 gimple_cond_rhs (use_stmt));
2686 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2687 val = gimple_switch_index (use_stmt);
2688 else
2689 val = gimple_goto_dest (use_stmt);
2691 if (val && is_gimple_min_invariant (val))
2693 basic_block bb = gimple_bb (use_stmt);
2694 edge te = find_taken_edge (bb, val);
2695 edge_iterator ei;
2696 edge e;
2697 gimple_stmt_iterator gsi, psi;
2699 /* Remove all outgoing edges except TE. */
2700 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2702 if (e != te)
2704 /* Mark all the PHI nodes at the destination of
2705 the unexecutable edge as interesting. */
2706 for (psi = gsi_start_phis (e->dest);
2707 !gsi_end_p (psi);
2708 gsi_next (&psi))
2710 gimple phi = gsi_stmt (psi);
2712 tree result = gimple_phi_result (phi);
2713 int version = SSA_NAME_VERSION (result);
2715 bitmap_set_bit (interesting_names, version);
2718 te->probability += e->probability;
2720 te->count += e->count;
2721 remove_edge (e);
2722 cfg_altered = true;
2724 else
2725 ei_next (&ei);
2728 gsi = gsi_last_bb (gimple_bb (use_stmt));
2729 gsi_remove (&gsi, true);
2731 /* And fixup the flags on the single remaining edge. */
2732 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2733 te->flags &= ~EDGE_ABNORMAL;
2734 te->flags |= EDGE_FALLTHRU;
2735 if (te->probability > REG_BR_PROB_BASE)
2736 te->probability = REG_BR_PROB_BASE;
2741 /* Ensure there is nothing else to do. */
2742 gcc_assert (!all || has_zero_uses (lhs));
2744 /* If we were able to propagate away all uses of LHS, then
2745 we can remove STMT. */
2746 if (all)
2747 remove_stmt_or_phi (stmt);
2751 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2752 a statement that is a trivial copy or constant initialization.
2754 Attempt to eliminate T by propagating its RHS into all uses of
2755 its LHS. This may in turn set new bits in INTERESTING_NAMES
2756 for nodes we want to revisit later.
2758 All exit paths should clear INTERESTING_NAMES for the result
2759 of STMT. */
2761 static void
2762 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2764 tree lhs = get_lhs_or_phi_result (stmt);
2765 tree rhs;
2766 int version = SSA_NAME_VERSION (lhs);
2768 /* If the LHS of this statement or PHI has no uses, then we can
2769 just eliminate it. This can occur if, for example, the PHI
2770 was created by block duplication due to threading and its only
2771 use was in the conditional at the end of the block which was
2772 deleted. */
2773 if (has_zero_uses (lhs))
2775 bitmap_clear_bit (interesting_names, version);
2776 remove_stmt_or_phi (stmt);
2777 return;
2780 /* Get the RHS of the assignment or PHI node if the PHI is a
2781 degenerate. */
2782 rhs = get_rhs_or_phi_arg (stmt);
2783 if (!rhs)
2785 bitmap_clear_bit (interesting_names, version);
2786 return;
2789 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2791 /* Note that STMT may well have been deleted by now, so do
2792 not access it, instead use the saved version # to clear
2793 T's entry in the worklist. */
2794 bitmap_clear_bit (interesting_names, version);
2797 /* The first phase in degenerate PHI elimination.
2799 Eliminate the degenerate PHIs in BB, then recurse on the
2800 dominator children of BB. */
2802 static void
2803 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2805 gimple_stmt_iterator gsi;
2806 basic_block son;
2808 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2810 gimple phi = gsi_stmt (gsi);
2812 eliminate_const_or_copy (phi, interesting_names);
2815 /* Recurse into the dominator children of BB. */
2816 for (son = first_dom_son (CDI_DOMINATORS, bb);
2817 son;
2818 son = next_dom_son (CDI_DOMINATORS, son))
2819 eliminate_degenerate_phis_1 (son, interesting_names);
2823 /* A very simple pass to eliminate degenerate PHI nodes from the
2824 IL. This is meant to be fast enough to be able to be run several
2825 times in the optimization pipeline.
2827 Certain optimizations, particularly those which duplicate blocks
2828 or remove edges from the CFG can create or expose PHIs which are
2829 trivial copies or constant initializations.
2831 While we could pick up these optimizations in DOM or with the
2832 combination of copy-prop and CCP, those solutions are far too
2833 heavy-weight for our needs.
2835 This implementation has two phases so that we can efficiently
2836 eliminate the first order degenerate PHIs and second order
2837 degenerate PHIs.
2839 The first phase performs a dominator walk to identify and eliminate
2840 the vast majority of the degenerate PHIs. When a degenerate PHI
2841 is identified and eliminated any affected statements or PHIs
2842 are put on a worklist.
2844 The second phase eliminates degenerate PHIs and trivial copies
2845 or constant initializations using the worklist. This is how we
2846 pick up the secondary optimization opportunities with minimal
2847 cost. */
2849 static unsigned int
2850 eliminate_degenerate_phis (void)
2852 bitmap interesting_names;
2853 bitmap interesting_names1;
2855 /* Bitmap of blocks which need EH information updated. We can not
2856 update it on-the-fly as doing so invalidates the dominator tree. */
2857 need_eh_cleanup = BITMAP_ALLOC (NULL);
2859 /* INTERESTING_NAMES is effectively our worklist, indexed by
2860 SSA_NAME_VERSION.
2862 A set bit indicates that the statement or PHI node which
2863 defines the SSA_NAME should be (re)examined to determine if
2864 it has become a degenerate PHI or trivial const/copy propagation
2865 opportunity.
2867 Experiments have show we generally get better compilation
2868 time behavior with bitmaps rather than sbitmaps. */
2869 interesting_names = BITMAP_ALLOC (NULL);
2870 interesting_names1 = BITMAP_ALLOC (NULL);
2872 calculate_dominance_info (CDI_DOMINATORS);
2873 cfg_altered = false;
2875 /* First phase. Eliminate degenerate PHIs via a dominator
2876 walk of the CFG.
2878 Experiments have indicated that we generally get better
2879 compile-time behavior by visiting blocks in the first
2880 phase in dominator order. Presumably this is because walking
2881 in dominator order leaves fewer PHIs for later examination
2882 by the worklist phase. */
2883 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
2885 /* Second phase. Eliminate second order degenerate PHIs as well
2886 as trivial copies or constant initializations identified by
2887 the first phase or this phase. Basically we keep iterating
2888 until our set of INTERESTING_NAMEs is empty. */
2889 while (!bitmap_empty_p (interesting_names))
2891 unsigned int i;
2892 bitmap_iterator bi;
2894 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2895 changed during the loop. Copy it to another bitmap and
2896 use that. */
2897 bitmap_copy (interesting_names1, interesting_names);
2899 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
2901 tree name = ssa_name (i);
2903 /* Ignore SSA_NAMEs that have been released because
2904 their defining statement was deleted (unreachable). */
2905 if (name)
2906 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
2907 interesting_names);
2911 if (cfg_altered)
2912 free_dominance_info (CDI_DOMINATORS);
2914 /* Propagation of const and copies may make some EH edges dead. Purge
2915 such edges from the CFG as needed. */
2916 if (!bitmap_empty_p (need_eh_cleanup))
2918 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
2919 BITMAP_FREE (need_eh_cleanup);
2922 BITMAP_FREE (interesting_names);
2923 BITMAP_FREE (interesting_names1);
2924 return 0;
2927 struct gimple_opt_pass pass_phi_only_cprop =
2930 GIMPLE_PASS,
2931 "phicprop", /* name */
2932 gate_dominator, /* gate */
2933 eliminate_degenerate_phis, /* execute */
2934 NULL, /* sub */
2935 NULL, /* next */
2936 0, /* static_pass_number */
2937 TV_TREE_PHI_CPROP, /* tv_id */
2938 PROP_cfg | PROP_ssa, /* properties_required */
2939 0, /* properties_provided */
2940 0, /* properties_destroyed */
2941 0, /* todo_flags_start */
2942 TODO_cleanup_cfg
2943 | TODO_dump_func
2944 | TODO_ggc_collect
2945 | TODO_verify_ssa
2946 | TODO_verify_stmts
2947 | TODO_update_ssa /* todo_flags_finish */