* auto-profile.c (afdo_annotate_cfg): Use update_max_bb_count.
[official-gcc.git] / gcc / tree-ssa-dom.c
blobeb85b4a09ad763200d840a1c767a4a62d0f90da3
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "tree-inline.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-into-ssa.h"
39 #include "domwalk.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "params.h"
43 #include "tree-ssa-scopedtables.h"
44 #include "tree-ssa-threadedge.h"
45 #include "tree-ssa-dom.h"
46 #include "gimplify.h"
47 #include "tree-cfgcleanup.h"
48 #include "dbgcnt.h"
50 /* This file implements optimizations on the dominator tree. */
52 /* Structure for recording edge equivalences.
54 Computing and storing the edge equivalences instead of creating
55 them on-demand can save significant amounts of time, particularly
56 for pathological cases involving switch statements.
58 These structures live for a single iteration of the dominator
59 optimizer in the edge's AUX field. At the end of an iteration we
60 free each of these structures. */
61 class edge_info
63 public:
64 typedef std::pair <tree, tree> equiv_pair;
65 edge_info (edge);
66 ~edge_info ();
68 /* Record a simple LHS = RHS equivalence. This may trigger
69 calls to derive_equivalences. */
70 void record_simple_equiv (tree, tree);
72 /* If traversing this edge creates simple equivalences, we store
73 them as LHS/RHS pairs within this vector. */
74 vec<equiv_pair> simple_equivalences;
76 /* Traversing an edge may also indicate one or more particular conditions
77 are true or false. */
78 vec<cond_equivalence> cond_equivalences;
80 private:
81 /* Derive equivalences by walking the use-def chains. */
82 void derive_equivalences (tree, tree, int);
85 /* Track whether or not we have changed the control flow graph. */
86 static bool cfg_altered;
88 /* Bitmap of blocks that have had EH statements cleaned. We should
89 remove their dead edges eventually. */
90 static bitmap need_eh_cleanup;
91 static vec<gimple *> need_noreturn_fixup;
93 /* Statistics for dominator optimizations. */
94 struct opt_stats_d
96 long num_stmts;
97 long num_exprs_considered;
98 long num_re;
99 long num_const_prop;
100 long num_copy_prop;
103 static struct opt_stats_d opt_stats;
105 /* Local functions. */
106 static void record_equality (tree, tree, class const_and_copies *);
107 static void record_equivalences_from_phis (basic_block);
108 static void record_equivalences_from_incoming_edge (basic_block,
109 class const_and_copies *,
110 class avail_exprs_stack *);
111 static void eliminate_redundant_computations (gimple_stmt_iterator *,
112 class const_and_copies *,
113 class avail_exprs_stack *);
114 static void record_equivalences_from_stmt (gimple *, int,
115 class avail_exprs_stack *);
116 static void dump_dominator_optimization_stats (FILE *file,
117 hash_table<expr_elt_hasher> *);
119 /* Constructor for EDGE_INFO. An EDGE_INFO instance is always
120 associated with an edge E. */
122 edge_info::edge_info (edge e)
124 /* Free the old one associated with E, if it exists and
125 associate our new object with E. */
126 free_dom_edge_info (e);
127 e->aux = this;
129 /* And initialize the embedded vectors. */
130 simple_equivalences = vNULL;
131 cond_equivalences = vNULL;
134 /* Destructor just needs to release the vectors. */
136 edge_info::~edge_info (void)
138 this->cond_equivalences.release ();
139 this->simple_equivalences.release ();
142 /* NAME is known to have the value VALUE, which must be a constant.
144 Walk through its use-def chain to see if there are other equivalences
145 we might be able to derive.
147 RECURSION_LIMIT controls how far back we recurse through the use-def
148 chains. */
150 void
151 edge_info::derive_equivalences (tree name, tree value, int recursion_limit)
153 if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST)
154 return;
156 /* This records the equivalence for the toplevel object. Do
157 this before checking the recursion limit. */
158 simple_equivalences.safe_push (equiv_pair (name, value));
160 /* Limit how far up the use-def chains we are willing to walk. */
161 if (recursion_limit == 0)
162 return;
164 /* We can walk up the use-def chains to potentially find more
165 equivalences. */
166 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
167 if (is_gimple_assign (def_stmt))
169 /* We know the result of DEF_STMT was zero. See if that allows
170 us to deduce anything about the SSA_NAMEs used on the RHS. */
171 enum tree_code code = gimple_assign_rhs_code (def_stmt);
172 switch (code)
174 case BIT_IOR_EXPR:
175 if (integer_zerop (value))
177 tree rhs1 = gimple_assign_rhs1 (def_stmt);
178 tree rhs2 = gimple_assign_rhs2 (def_stmt);
180 value = build_zero_cst (TREE_TYPE (rhs1));
181 derive_equivalences (rhs1, value, recursion_limit - 1);
182 value = build_zero_cst (TREE_TYPE (rhs2));
183 derive_equivalences (rhs2, value, recursion_limit - 1);
185 break;
187 /* We know the result of DEF_STMT was one. See if that allows
188 us to deduce anything about the SSA_NAMEs used on the RHS. */
189 case BIT_AND_EXPR:
190 if (!integer_zerop (value))
192 tree rhs1 = gimple_assign_rhs1 (def_stmt);
193 tree rhs2 = gimple_assign_rhs2 (def_stmt);
195 /* If either operand has a boolean range, then we
196 know its value must be one, otherwise we just know it
197 is nonzero. The former is clearly useful, I haven't
198 seen cases where the latter is helpful yet. */
199 if (TREE_CODE (rhs1) == SSA_NAME)
201 if (ssa_name_has_boolean_range (rhs1))
203 value = build_one_cst (TREE_TYPE (rhs1));
204 derive_equivalences (rhs1, value, recursion_limit - 1);
207 if (TREE_CODE (rhs2) == SSA_NAME)
209 if (ssa_name_has_boolean_range (rhs2))
211 value = build_one_cst (TREE_TYPE (rhs2));
212 derive_equivalences (rhs2, value, recursion_limit - 1);
216 break;
218 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
219 set via a widening type conversion, then we may be able to record
220 additional equivalences. */
221 case NOP_EXPR:
222 case CONVERT_EXPR:
224 tree rhs = gimple_assign_rhs1 (def_stmt);
225 tree rhs_type = TREE_TYPE (rhs);
226 if (INTEGRAL_TYPE_P (rhs_type)
227 && (TYPE_PRECISION (TREE_TYPE (name))
228 >= TYPE_PRECISION (rhs_type))
229 && int_fits_type_p (value, rhs_type))
230 derive_equivalences (rhs,
231 fold_convert (rhs_type, value),
232 recursion_limit - 1);
233 break;
236 /* We can invert the operation of these codes trivially if
237 one of the RHS operands is a constant to produce a known
238 value for the other RHS operand. */
239 case POINTER_PLUS_EXPR:
240 case PLUS_EXPR:
242 tree rhs1 = gimple_assign_rhs1 (def_stmt);
243 tree rhs2 = gimple_assign_rhs2 (def_stmt);
245 /* If either argument is a constant, then we can compute
246 a constant value for the nonconstant argument. */
247 if (TREE_CODE (rhs1) == INTEGER_CST
248 && TREE_CODE (rhs2) == SSA_NAME)
249 derive_equivalences (rhs2,
250 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
251 value, rhs1),
252 recursion_limit - 1);
253 else if (TREE_CODE (rhs2) == INTEGER_CST
254 && TREE_CODE (rhs1) == SSA_NAME)
255 derive_equivalences (rhs1,
256 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
257 value, rhs2),
258 recursion_limit - 1);
259 break;
262 /* If one of the operands is a constant, then we can compute
263 the value of the other operand. If both operands are
264 SSA_NAMEs, then they must be equal if the result is zero. */
265 case MINUS_EXPR:
267 tree rhs1 = gimple_assign_rhs1 (def_stmt);
268 tree rhs2 = gimple_assign_rhs2 (def_stmt);
270 /* If either argument is a constant, then we can compute
271 a constant value for the nonconstant argument. */
272 if (TREE_CODE (rhs1) == INTEGER_CST
273 && TREE_CODE (rhs2) == SSA_NAME)
274 derive_equivalences (rhs2,
275 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
276 rhs1, value),
277 recursion_limit - 1);
278 else if (TREE_CODE (rhs2) == INTEGER_CST
279 && TREE_CODE (rhs1) == SSA_NAME)
280 derive_equivalences (rhs1,
281 fold_binary (PLUS_EXPR, TREE_TYPE (rhs1),
282 value, rhs2),
283 recursion_limit - 1);
284 else if (integer_zerop (value))
286 tree cond = build2 (EQ_EXPR, boolean_type_node,
287 gimple_assign_rhs1 (def_stmt),
288 gimple_assign_rhs2 (def_stmt));
289 tree inverted = invert_truthvalue (cond);
290 record_conditions (&this->cond_equivalences, cond, inverted);
292 break;
296 case EQ_EXPR:
297 case NE_EXPR:
299 if ((code == EQ_EXPR && integer_onep (value))
300 || (code == NE_EXPR && integer_zerop (value)))
302 tree rhs1 = gimple_assign_rhs1 (def_stmt);
303 tree rhs2 = gimple_assign_rhs2 (def_stmt);
305 /* If either argument is a constant, then record the
306 other argument as being the same as that constant.
308 If neither operand is a constant, then we have a
309 conditional name == name equivalence. */
310 if (TREE_CODE (rhs1) == INTEGER_CST)
311 derive_equivalences (rhs2, rhs1, recursion_limit - 1);
312 else if (TREE_CODE (rhs2) == INTEGER_CST)
313 derive_equivalences (rhs1, rhs2, recursion_limit - 1);
315 else
317 tree cond = build2 (code, boolean_type_node,
318 gimple_assign_rhs1 (def_stmt),
319 gimple_assign_rhs2 (def_stmt));
320 tree inverted = invert_truthvalue (cond);
321 if (integer_zerop (value))
322 std::swap (cond, inverted);
323 record_conditions (&this->cond_equivalences, cond, inverted);
325 break;
328 /* For BIT_NOT and NEGATE, we can just apply the operation to the
329 VALUE to get the new equivalence. It will always be a constant
330 so we can recurse. */
331 case BIT_NOT_EXPR:
332 case NEGATE_EXPR:
334 tree rhs = gimple_assign_rhs1 (def_stmt);
335 tree res = fold_build1 (code, TREE_TYPE (rhs), value);
336 derive_equivalences (rhs, res, recursion_limit - 1);
337 break;
340 default:
342 if (TREE_CODE_CLASS (code) == tcc_comparison)
344 tree cond = build2 (code, boolean_type_node,
345 gimple_assign_rhs1 (def_stmt),
346 gimple_assign_rhs2 (def_stmt));
347 tree inverted = invert_truthvalue (cond);
348 if (integer_zerop (value))
349 std::swap (cond, inverted);
350 record_conditions (&this->cond_equivalences, cond, inverted);
351 break;
353 break;
359 void
360 edge_info::record_simple_equiv (tree lhs, tree rhs)
362 /* If the RHS is a constant, then we may be able to derive
363 further equivalences. Else just record the name = name
364 equivalence. */
365 if (TREE_CODE (rhs) == INTEGER_CST)
366 derive_equivalences (lhs, rhs, 4);
367 else
368 simple_equivalences.safe_push (equiv_pair (lhs, rhs));
371 /* Free the edge_info data attached to E, if it exists. */
373 void
374 free_dom_edge_info (edge e)
376 class edge_info *edge_info = (struct edge_info *)e->aux;
378 if (edge_info)
379 delete edge_info;
382 /* Free all EDGE_INFO structures associated with edges in the CFG.
383 If a particular edge can be threaded, copy the redirection
384 target from the EDGE_INFO structure into the edge's AUX field
385 as required by code to update the CFG and SSA graph for
386 jump threading. */
388 static void
389 free_all_edge_infos (void)
391 basic_block bb;
392 edge_iterator ei;
393 edge e;
395 FOR_EACH_BB_FN (bb, cfun)
397 FOR_EACH_EDGE (e, ei, bb->preds)
399 free_dom_edge_info (e);
400 e->aux = NULL;
405 /* We have finished optimizing BB, record any information implied by
406 taking a specific outgoing edge from BB. */
408 static void
409 record_edge_info (basic_block bb)
411 gimple_stmt_iterator gsi = gsi_last_bb (bb);
412 class edge_info *edge_info;
414 if (! gsi_end_p (gsi))
416 gimple *stmt = gsi_stmt (gsi);
417 location_t loc = gimple_location (stmt);
419 if (gimple_code (stmt) == GIMPLE_SWITCH)
421 gswitch *switch_stmt = as_a <gswitch *> (stmt);
422 tree index = gimple_switch_index (switch_stmt);
424 if (TREE_CODE (index) == SSA_NAME)
426 int i;
427 int n_labels = gimple_switch_num_labels (switch_stmt);
428 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
429 edge e;
430 edge_iterator ei;
432 for (i = 0; i < n_labels; i++)
434 tree label = gimple_switch_label (switch_stmt, i);
435 basic_block target_bb = label_to_block (CASE_LABEL (label));
436 if (CASE_HIGH (label)
437 || !CASE_LOW (label)
438 || info[target_bb->index])
439 info[target_bb->index] = error_mark_node;
440 else
441 info[target_bb->index] = label;
444 FOR_EACH_EDGE (e, ei, bb->succs)
446 basic_block target_bb = e->dest;
447 tree label = info[target_bb->index];
449 if (label != NULL && label != error_mark_node)
451 tree x = fold_convert_loc (loc, TREE_TYPE (index),
452 CASE_LOW (label));
453 edge_info = new class edge_info (e);
454 edge_info->record_simple_equiv (index, x);
457 free (info);
461 /* A COND_EXPR may create equivalences too. */
462 if (gimple_code (stmt) == GIMPLE_COND)
464 edge true_edge;
465 edge false_edge;
467 tree op0 = gimple_cond_lhs (stmt);
468 tree op1 = gimple_cond_rhs (stmt);
469 enum tree_code code = gimple_cond_code (stmt);
471 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
473 /* Special case comparing booleans against a constant as we
474 know the value of OP0 on both arms of the branch. i.e., we
475 can record an equivalence for OP0 rather than COND.
477 However, don't do this if the constant isn't zero or one.
478 Such conditionals will get optimized more thoroughly during
479 the domwalk. */
480 if ((code == EQ_EXPR || code == NE_EXPR)
481 && TREE_CODE (op0) == SSA_NAME
482 && ssa_name_has_boolean_range (op0)
483 && is_gimple_min_invariant (op1)
484 && (integer_zerop (op1) || integer_onep (op1)))
486 tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
487 tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
489 if (code == EQ_EXPR)
491 edge_info = new class edge_info (true_edge);
492 edge_info->record_simple_equiv (op0,
493 (integer_zerop (op1)
494 ? false_val : true_val));
495 edge_info = new class edge_info (false_edge);
496 edge_info->record_simple_equiv (op0,
497 (integer_zerop (op1)
498 ? true_val : false_val));
500 else
502 edge_info = new class edge_info (true_edge);
503 edge_info->record_simple_equiv (op0,
504 (integer_zerop (op1)
505 ? true_val : false_val));
506 edge_info = new class edge_info (false_edge);
507 edge_info->record_simple_equiv (op0,
508 (integer_zerop (op1)
509 ? false_val : true_val));
512 /* This can show up in the IL as a result of copy propagation
513 it will eventually be canonicalized, but we have to cope
514 with this case within the pass. */
515 else if (is_gimple_min_invariant (op0)
516 && TREE_CODE (op1) == SSA_NAME)
518 tree cond = build2 (code, boolean_type_node, op0, op1);
519 tree inverted = invert_truthvalue_loc (loc, cond);
520 bool can_infer_simple_equiv
521 = !(HONOR_SIGNED_ZEROS (op0)
522 && real_zerop (op0));
523 struct edge_info *edge_info;
525 edge_info = new class edge_info (true_edge);
526 record_conditions (&edge_info->cond_equivalences, cond, inverted);
528 if (can_infer_simple_equiv && code == EQ_EXPR)
529 edge_info->record_simple_equiv (op1, op0);
531 edge_info = new class edge_info (false_edge);
532 record_conditions (&edge_info->cond_equivalences, inverted, cond);
534 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
535 edge_info->record_simple_equiv (op1, op0);
538 else if (TREE_CODE (op0) == SSA_NAME
539 && (TREE_CODE (op1) == SSA_NAME
540 || is_gimple_min_invariant (op1)))
542 tree cond = build2 (code, boolean_type_node, op0, op1);
543 tree inverted = invert_truthvalue_loc (loc, cond);
544 bool can_infer_simple_equiv
545 = !(HONOR_SIGNED_ZEROS (op1)
546 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
547 struct edge_info *edge_info;
549 edge_info = new class edge_info (true_edge);
550 record_conditions (&edge_info->cond_equivalences, cond, inverted);
552 if (can_infer_simple_equiv && code == EQ_EXPR)
553 edge_info->record_simple_equiv (op0, op1);
555 edge_info = new class edge_info (false_edge);
556 record_conditions (&edge_info->cond_equivalences, inverted, cond);
558 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
559 edge_info->record_simple_equiv (op0, op1);
566 class dom_opt_dom_walker : public dom_walker
568 public:
569 dom_opt_dom_walker (cdi_direction direction,
570 class const_and_copies *const_and_copies,
571 class avail_exprs_stack *avail_exprs_stack,
572 gcond *dummy_cond)
573 : dom_walker (direction, true),
574 m_const_and_copies (const_and_copies),
575 m_avail_exprs_stack (avail_exprs_stack),
576 m_dummy_cond (dummy_cond) { }
578 virtual edge before_dom_children (basic_block);
579 virtual void after_dom_children (basic_block);
581 private:
583 /* Unwindable equivalences, both const/copy and expression varieties. */
584 class const_and_copies *m_const_and_copies;
585 class avail_exprs_stack *m_avail_exprs_stack;
587 /* Dummy condition to avoid creating lots of throw away statements. */
588 gcond *m_dummy_cond;
590 /* Optimize a single statement within a basic block using the
591 various tables mantained by DOM. Returns the taken edge if
592 the statement is a conditional with a statically determined
593 value. */
594 edge optimize_stmt (basic_block, gimple_stmt_iterator);
597 /* Jump threading, redundancy elimination and const/copy propagation.
599 This pass may expose new symbols that need to be renamed into SSA. For
600 every new symbol exposed, its corresponding bit will be set in
601 VARS_TO_RENAME. */
603 namespace {
605 const pass_data pass_data_dominator =
607 GIMPLE_PASS, /* type */
608 "dom", /* name */
609 OPTGROUP_NONE, /* optinfo_flags */
610 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
611 ( PROP_cfg | PROP_ssa ), /* properties_required */
612 0, /* properties_provided */
613 0, /* properties_destroyed */
614 0, /* todo_flags_start */
615 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
618 class pass_dominator : public gimple_opt_pass
620 public:
621 pass_dominator (gcc::context *ctxt)
622 : gimple_opt_pass (pass_data_dominator, ctxt),
623 may_peel_loop_headers_p (false)
626 /* opt_pass methods: */
627 opt_pass * clone () { return new pass_dominator (m_ctxt); }
628 void set_pass_param (unsigned int n, bool param)
630 gcc_assert (n == 0);
631 may_peel_loop_headers_p = param;
633 virtual bool gate (function *) { return flag_tree_dom != 0; }
634 virtual unsigned int execute (function *);
636 private:
637 /* This flag is used to prevent loops from being peeled repeatedly in jump
638 threading; it will be removed once we preserve loop structures throughout
639 the compilation -- we will be able to mark the affected loops directly in
640 jump threading, and avoid peeling them next time. */
641 bool may_peel_loop_headers_p;
642 }; // class pass_dominator
644 unsigned int
645 pass_dominator::execute (function *fun)
647 memset (&opt_stats, 0, sizeof (opt_stats));
649 /* Create our hash tables. */
650 hash_table<expr_elt_hasher> *avail_exprs
651 = new hash_table<expr_elt_hasher> (1024);
652 class avail_exprs_stack *avail_exprs_stack
653 = new class avail_exprs_stack (avail_exprs);
654 class const_and_copies *const_and_copies = new class const_and_copies ();
655 need_eh_cleanup = BITMAP_ALLOC (NULL);
656 need_noreturn_fixup.create (0);
658 calculate_dominance_info (CDI_DOMINATORS);
659 cfg_altered = false;
661 /* We need to know loop structures in order to avoid destroying them
662 in jump threading. Note that we still can e.g. thread through loop
663 headers to an exit edge, or through loop header to the loop body, assuming
664 that we update the loop info.
666 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
667 to several overly conservative bail-outs in jump threading, case
668 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
669 missing. We should improve jump threading in future then
670 LOOPS_HAVE_PREHEADERS won't be needed here. */
671 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
673 /* Initialize the value-handle array. */
674 threadedge_initialize_values ();
676 /* We need accurate information regarding back edges in the CFG
677 for jump threading; this may include back edges that are not part of
678 a single loop. */
679 mark_dfs_back_edges ();
681 /* We want to create the edge info structures before the dominator walk
682 so that they'll be in place for the jump threader, particularly when
683 threading through a join block.
685 The conditions will be lazily updated with global equivalences as
686 we reach them during the dominator walk. */
687 basic_block bb;
688 FOR_EACH_BB_FN (bb, fun)
689 record_edge_info (bb);
691 gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
692 integer_zero_node, NULL, NULL);
694 /* Recursively walk the dominator tree optimizing statements. */
695 dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies,
696 avail_exprs_stack, dummy_cond);
697 walker.walk (fun->cfg->x_entry_block_ptr);
699 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
700 edge. When found, remove jump threads which contain any outgoing
701 edge from the affected block. */
702 if (cfg_altered)
704 FOR_EACH_BB_FN (bb, fun)
706 edge_iterator ei;
707 edge e;
709 /* First see if there are any edges without EDGE_EXECUTABLE
710 set. */
711 bool found = false;
712 FOR_EACH_EDGE (e, ei, bb->succs)
714 if ((e->flags & EDGE_EXECUTABLE) == 0)
716 found = true;
717 break;
721 /* If there were any such edges found, then remove jump threads
722 containing any edge leaving BB. */
723 if (found)
724 FOR_EACH_EDGE (e, ei, bb->succs)
725 remove_jump_threads_including (e);
730 gimple_stmt_iterator gsi;
731 basic_block bb;
732 FOR_EACH_BB_FN (bb, fun)
734 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
735 update_stmt_if_modified (gsi_stmt (gsi));
739 /* If we exposed any new variables, go ahead and put them into
740 SSA form now, before we handle jump threading. This simplifies
741 interactions between rewriting of _DECL nodes into SSA form
742 and rewriting SSA_NAME nodes into SSA form after block
743 duplication and CFG manipulation. */
744 update_ssa (TODO_update_ssa);
746 free_all_edge_infos ();
748 /* Thread jumps, creating duplicate blocks as needed. */
749 cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
751 if (cfg_altered)
752 free_dominance_info (CDI_DOMINATORS);
754 /* Removal of statements may make some EH edges dead. Purge
755 such edges from the CFG as needed. */
756 if (!bitmap_empty_p (need_eh_cleanup))
758 unsigned i;
759 bitmap_iterator bi;
761 /* Jump threading may have created forwarder blocks from blocks
762 needing EH cleanup; the new successor of these blocks, which
763 has inherited from the original block, needs the cleanup.
764 Don't clear bits in the bitmap, as that can break the bitmap
765 iterator. */
766 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
768 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
769 if (bb == NULL)
770 continue;
771 while (single_succ_p (bb)
772 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
773 bb = single_succ (bb);
774 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
775 continue;
776 if ((unsigned) bb->index != i)
777 bitmap_set_bit (need_eh_cleanup, bb->index);
780 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
781 bitmap_clear (need_eh_cleanup);
784 /* Fixup stmts that became noreturn calls. This may require splitting
785 blocks and thus isn't possible during the dominator walk or before
786 jump threading finished. Do this in reverse order so we don't
787 inadvertedly remove a stmt we want to fixup by visiting a dominating
788 now noreturn call first. */
789 while (!need_noreturn_fixup.is_empty ())
791 gimple *stmt = need_noreturn_fixup.pop ();
792 if (dump_file && dump_flags & TDF_DETAILS)
794 fprintf (dump_file, "Fixing up noreturn call ");
795 print_gimple_stmt (dump_file, stmt, 0);
796 fprintf (dump_file, "\n");
798 fixup_noreturn_call (stmt);
801 statistics_counter_event (fun, "Redundant expressions eliminated",
802 opt_stats.num_re);
803 statistics_counter_event (fun, "Constants propagated",
804 opt_stats.num_const_prop);
805 statistics_counter_event (fun, "Copies propagated",
806 opt_stats.num_copy_prop);
808 /* Debugging dumps. */
809 if (dump_file && (dump_flags & TDF_STATS))
810 dump_dominator_optimization_stats (dump_file, avail_exprs);
812 loop_optimizer_finalize ();
814 /* Delete our main hashtable. */
815 delete avail_exprs;
816 avail_exprs = NULL;
818 /* Free asserted bitmaps and stacks. */
819 BITMAP_FREE (need_eh_cleanup);
820 need_noreturn_fixup.release ();
821 delete avail_exprs_stack;
822 delete const_and_copies;
824 /* Free the value-handle array. */
825 threadedge_finalize_values ();
827 return 0;
830 } // anon namespace
832 gimple_opt_pass *
833 make_pass_dominator (gcc::context *ctxt)
835 return new pass_dominator (ctxt);
839 /* A trivial wrapper so that we can present the generic jump
840 threading code with a simple API for simplifying statements. */
841 static tree
842 simplify_stmt_for_jump_threading (gimple *stmt,
843 gimple *within_stmt ATTRIBUTE_UNUSED,
844 class avail_exprs_stack *avail_exprs_stack,
845 basic_block bb ATTRIBUTE_UNUSED)
847 return avail_exprs_stack->lookup_avail_expr (stmt, false, true);
850 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
852 static tree
853 dom_valueize (tree t)
855 if (TREE_CODE (t) == SSA_NAME)
857 tree tem = SSA_NAME_VALUE (t);
858 if (tem)
859 return tem;
861 return t;
864 /* We have just found an equivalence for LHS on an edge E.
865 Look backwards to other uses of LHS and see if we can derive
866 additional equivalences that are valid on edge E. */
867 static void
868 back_propagate_equivalences (tree lhs, edge e,
869 class const_and_copies *const_and_copies)
871 use_operand_p use_p;
872 imm_use_iterator iter;
873 bitmap domby = NULL;
874 basic_block dest = e->dest;
876 /* Iterate over the uses of LHS to see if any dominate E->dest.
877 If so, they may create useful equivalences too.
879 ??? If the code gets re-organized to a worklist to catch more
880 indirect opportunities and it is made to handle PHIs then this
881 should only consider use_stmts in basic-blocks we have already visited. */
882 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
884 gimple *use_stmt = USE_STMT (use_p);
886 /* Often the use is in DEST, which we trivially know we can't use.
887 This is cheaper than the dominator set tests below. */
888 if (dest == gimple_bb (use_stmt))
889 continue;
891 /* Filter out statements that can never produce a useful
892 equivalence. */
893 tree lhs2 = gimple_get_lhs (use_stmt);
894 if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
895 continue;
897 /* Profiling has shown the domination tests here can be fairly
898 expensive. We get significant improvements by building the
899 set of blocks that dominate BB. We can then just test
900 for set membership below.
902 We also initialize the set lazily since often the only uses
903 are going to be in the same block as DEST. */
904 if (!domby)
906 domby = BITMAP_ALLOC (NULL);
907 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
908 while (bb)
910 bitmap_set_bit (domby, bb->index);
911 bb = get_immediate_dominator (CDI_DOMINATORS, bb);
915 /* This tests if USE_STMT does not dominate DEST. */
916 if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
917 continue;
919 /* At this point USE_STMT dominates DEST and may result in a
920 useful equivalence. Try to simplify its RHS to a constant
921 or SSA_NAME. */
922 tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
923 no_follow_ssa_edges);
924 if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
925 record_equality (lhs2, res, const_and_copies);
928 if (domby)
929 BITMAP_FREE (domby);
932 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
933 by traversing edge E (which are cached in E->aux).
935 Callers are responsible for managing the unwinding markers. */
936 void
937 record_temporary_equivalences (edge e,
938 class const_and_copies *const_and_copies,
939 class avail_exprs_stack *avail_exprs_stack)
941 int i;
942 class edge_info *edge_info = (class edge_info *) e->aux;
944 /* If we have info associated with this edge, record it into
945 our equivalence tables. */
946 if (edge_info)
948 cond_equivalence *eq;
949 /* If we have 0 = COND or 1 = COND equivalences, record them
950 into our expression hash tables. */
951 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
952 avail_exprs_stack->record_cond (eq);
954 edge_info::equiv_pair *seq;
955 for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
957 tree lhs = seq->first;
958 if (!lhs || TREE_CODE (lhs) != SSA_NAME)
959 continue;
961 /* Record the simple NAME = VALUE equivalence. */
962 tree rhs = seq->second;
964 /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is
965 cheaper to compute than the other, then set up the equivalence
966 such that we replace the expensive one with the cheap one.
968 If they are the same cost to compute, then do not record
969 anything. */
970 if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME)
972 gimple *rhs_def = SSA_NAME_DEF_STMT (rhs);
973 int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights);
975 gimple *lhs_def = SSA_NAME_DEF_STMT (lhs);
976 int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights);
978 if (rhs_cost > lhs_cost)
979 record_equality (rhs, lhs, const_and_copies);
980 else if (rhs_cost < lhs_cost)
981 record_equality (lhs, rhs, const_and_copies);
983 else
984 record_equality (lhs, rhs, const_and_copies);
987 /* Any equivalence found for LHS may result in additional
988 equivalences for other uses of LHS that we have already
989 processed. */
990 back_propagate_equivalences (lhs, e, const_and_copies);
995 /* PHI nodes can create equivalences too.
997 Ignoring any alternatives which are the same as the result, if
998 all the alternatives are equal, then the PHI node creates an
999 equivalence. */
1001 static void
1002 record_equivalences_from_phis (basic_block bb)
1004 gphi_iterator gsi;
1006 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1008 gphi *phi = gsi.phi ();
1010 tree lhs = gimple_phi_result (phi);
1011 tree rhs = NULL;
1012 size_t i;
1014 for (i = 0; i < gimple_phi_num_args (phi); i++)
1016 tree t = gimple_phi_arg_def (phi, i);
1018 /* Ignore alternatives which are the same as our LHS. Since
1019 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1020 can simply compare pointers. */
1021 if (lhs == t)
1022 continue;
1024 /* If the associated edge is not marked as executable, then it
1025 can be ignored. */
1026 if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
1027 continue;
1029 t = dom_valueize (t);
1031 /* If we have not processed an alternative yet, then set
1032 RHS to this alternative. */
1033 if (rhs == NULL)
1034 rhs = t;
1035 /* If we have processed an alternative (stored in RHS), then
1036 see if it is equal to this one. If it isn't, then stop
1037 the search. */
1038 else if (! operand_equal_for_phi_arg_p (rhs, t))
1039 break;
1042 /* If we had no interesting alternatives, then all the RHS alternatives
1043 must have been the same as LHS. */
1044 if (!rhs)
1045 rhs = lhs;
1047 /* If we managed to iterate through each PHI alternative without
1048 breaking out of the loop, then we have a PHI which may create
1049 a useful equivalence. We do not need to record unwind data for
1050 this, since this is a true assignment and not an equivalence
1051 inferred from a comparison. All uses of this ssa name are dominated
1052 by this assignment, so unwinding just costs time and space. */
1053 if (i == gimple_phi_num_args (phi)
1054 && may_propagate_copy (lhs, rhs))
1055 set_ssa_name_value (lhs, rhs);
1059 /* Record any equivalences created by the incoming edge to BB into
1060 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
1061 incoming edge, then no equivalence is created. */
1063 static void
1064 record_equivalences_from_incoming_edge (basic_block bb,
1065 class const_and_copies *const_and_copies,
1066 class avail_exprs_stack *avail_exprs_stack)
1068 edge e;
1069 basic_block parent;
1071 /* If our parent block ended with a control statement, then we may be
1072 able to record some equivalences based on which outgoing edge from
1073 the parent was followed. */
1074 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1076 e = single_pred_edge_ignoring_loop_edges (bb, true);
1078 /* If we had a single incoming edge from our parent block, then enter
1079 any data associated with the edge into our tables. */
1080 if (e && e->src == parent)
1081 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1084 /* Dump statistics for the hash table HTAB. */
1086 static void
1087 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1089 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1090 (long) htab.size (),
1091 (long) htab.elements (),
1092 htab.collisions ());
1095 /* Dump SSA statistics on FILE. */
1097 static void
1098 dump_dominator_optimization_stats (FILE *file,
1099 hash_table<expr_elt_hasher> *avail_exprs)
1101 fprintf (file, "Total number of statements: %6ld\n\n",
1102 opt_stats.num_stmts);
1103 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1104 opt_stats.num_exprs_considered);
1106 fprintf (file, "\nHash table statistics:\n");
1108 fprintf (file, " avail_exprs: ");
1109 htab_statistics (file, *avail_exprs);
1113 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1114 This constrains the cases in which we may treat this as assignment. */
1116 static void
1117 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
1119 tree prev_x = NULL, prev_y = NULL;
1121 if (tree_swap_operands_p (x, y))
1122 std::swap (x, y);
1124 /* Most of the time tree_swap_operands_p does what we want. But there
1125 are cases where we know one operand is better for copy propagation than
1126 the other. Given no other code cares about ordering of equality
1127 comparison operators for that purpose, we just handle the special cases
1128 here. */
1129 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1131 /* If one operand is a single use operand, then make it
1132 X. This will preserve its single use properly and if this
1133 conditional is eliminated, the computation of X can be
1134 eliminated as well. */
1135 if (has_single_use (y) && ! has_single_use (x))
1136 std::swap (x, y);
1138 if (TREE_CODE (x) == SSA_NAME)
1139 prev_x = SSA_NAME_VALUE (x);
1140 if (TREE_CODE (y) == SSA_NAME)
1141 prev_y = SSA_NAME_VALUE (y);
1143 /* If one of the previous values is invariant, or invariant in more loops
1144 (by depth), then use that.
1145 Otherwise it doesn't matter which value we choose, just so
1146 long as we canonicalize on one value. */
1147 if (is_gimple_min_invariant (y))
1149 else if (is_gimple_min_invariant (x))
1150 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1151 else if (prev_x && is_gimple_min_invariant (prev_x))
1152 x = y, y = prev_x, prev_x = prev_y;
1153 else if (prev_y)
1154 y = prev_y;
1156 /* After the swapping, we must have one SSA_NAME. */
1157 if (TREE_CODE (x) != SSA_NAME)
1158 return;
1160 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1161 variable compared against zero. If we're honoring signed zeros,
1162 then we cannot record this value unless we know that the value is
1163 nonzero. */
1164 if (HONOR_SIGNED_ZEROS (x)
1165 && (TREE_CODE (y) != REAL_CST
1166 || real_equal (&dconst0, &TREE_REAL_CST (y))))
1167 return;
1169 const_and_copies->record_const_or_copy (x, y, prev_x);
1172 /* Returns true when STMT is a simple iv increment. It detects the
1173 following situation:
1175 i_1 = phi (..., i_2)
1176 i_2 = i_1 +/- ... */
1178 bool
1179 simple_iv_increment_p (gimple *stmt)
1181 enum tree_code code;
1182 tree lhs, preinc;
1183 gimple *phi;
1184 size_t i;
1186 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1187 return false;
1189 lhs = gimple_assign_lhs (stmt);
1190 if (TREE_CODE (lhs) != SSA_NAME)
1191 return false;
1193 code = gimple_assign_rhs_code (stmt);
1194 if (code != PLUS_EXPR
1195 && code != MINUS_EXPR
1196 && code != POINTER_PLUS_EXPR)
1197 return false;
1199 preinc = gimple_assign_rhs1 (stmt);
1200 if (TREE_CODE (preinc) != SSA_NAME)
1201 return false;
1203 phi = SSA_NAME_DEF_STMT (preinc);
1204 if (gimple_code (phi) != GIMPLE_PHI)
1205 return false;
1207 for (i = 0; i < gimple_phi_num_args (phi); i++)
1208 if (gimple_phi_arg_def (phi, i) == lhs)
1209 return true;
1211 return false;
1214 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1215 successors of BB. */
1217 static void
1218 cprop_into_successor_phis (basic_block bb,
1219 class const_and_copies *const_and_copies)
1221 edge e;
1222 edge_iterator ei;
1224 FOR_EACH_EDGE (e, ei, bb->succs)
1226 int indx;
1227 gphi_iterator gsi;
1229 /* If this is an abnormal edge, then we do not want to copy propagate
1230 into the PHI alternative associated with this edge. */
1231 if (e->flags & EDGE_ABNORMAL)
1232 continue;
1234 gsi = gsi_start_phis (e->dest);
1235 if (gsi_end_p (gsi))
1236 continue;
1238 /* We may have an equivalence associated with this edge. While
1239 we can not propagate it into non-dominated blocks, we can
1240 propagate them into PHIs in non-dominated blocks. */
1242 /* Push the unwind marker so we can reset the const and copies
1243 table back to its original state after processing this edge. */
1244 const_and_copies->push_marker ();
1246 /* Extract and record any simple NAME = VALUE equivalences.
1248 Don't bother with [01] = COND equivalences, they're not useful
1249 here. */
1250 class edge_info *edge_info = (class edge_info *) e->aux;
1252 if (edge_info)
1254 edge_info::equiv_pair *seq;
1255 for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1257 tree lhs = seq->first;
1258 tree rhs = seq->second;
1260 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1261 const_and_copies->record_const_or_copy (lhs, rhs);
1266 indx = e->dest_idx;
1267 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1269 tree new_val;
1270 use_operand_p orig_p;
1271 tree orig_val;
1272 gphi *phi = gsi.phi ();
1274 /* The alternative may be associated with a constant, so verify
1275 it is an SSA_NAME before doing anything with it. */
1276 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1277 orig_val = get_use_from_ptr (orig_p);
1278 if (TREE_CODE (orig_val) != SSA_NAME)
1279 continue;
1281 /* If we have *ORIG_P in our constant/copy table, then replace
1282 ORIG_P with its value in our constant/copy table. */
1283 new_val = SSA_NAME_VALUE (orig_val);
1284 if (new_val
1285 && new_val != orig_val
1286 && may_propagate_copy (orig_val, new_val))
1287 propagate_value (orig_p, new_val);
1290 const_and_copies->pop_to_marker ();
1294 edge
1295 dom_opt_dom_walker::before_dom_children (basic_block bb)
1297 gimple_stmt_iterator gsi;
1299 if (dump_file && (dump_flags & TDF_DETAILS))
1300 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1302 /* Push a marker on the stacks of local information so that we know how
1303 far to unwind when we finalize this block. */
1304 m_avail_exprs_stack->push_marker ();
1305 m_const_and_copies->push_marker ();
1307 record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1308 m_avail_exprs_stack);
1310 /* PHI nodes can create equivalences too. */
1311 record_equivalences_from_phis (bb);
1313 /* Create equivalences from redundant PHIs. PHIs are only truly
1314 redundant when they exist in the same block, so push another
1315 marker and unwind right afterwards. */
1316 m_avail_exprs_stack->push_marker ();
1317 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1318 eliminate_redundant_computations (&gsi, m_const_and_copies,
1319 m_avail_exprs_stack);
1320 m_avail_exprs_stack->pop_to_marker ();
1322 edge taken_edge = NULL;
1323 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1324 taken_edge = this->optimize_stmt (bb, gsi);
1326 /* Now prepare to process dominated blocks. */
1327 record_edge_info (bb);
1328 cprop_into_successor_phis (bb, m_const_and_copies);
1329 if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1330 return NULL;
1332 return taken_edge;
1335 /* We have finished processing the dominator children of BB, perform
1336 any finalization actions in preparation for leaving this node in
1337 the dominator tree. */
1339 void
1340 dom_opt_dom_walker::after_dom_children (basic_block bb)
1342 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1343 m_avail_exprs_stack,
1344 simplify_stmt_for_jump_threading);
1346 /* These remove expressions local to BB from the tables. */
1347 m_avail_exprs_stack->pop_to_marker ();
1348 m_const_and_copies->pop_to_marker ();
1351 /* Search for redundant computations in STMT. If any are found, then
1352 replace them with the variable holding the result of the computation.
1354 If safe, record this expression into AVAIL_EXPRS_STACK and
1355 CONST_AND_COPIES. */
1357 static void
1358 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1359 class const_and_copies *const_and_copies,
1360 class avail_exprs_stack *avail_exprs_stack)
1362 tree expr_type;
1363 tree cached_lhs;
1364 tree def;
1365 bool insert = true;
1366 bool assigns_var_p = false;
1368 gimple *stmt = gsi_stmt (*gsi);
1370 if (gimple_code (stmt) == GIMPLE_PHI)
1371 def = gimple_phi_result (stmt);
1372 else
1373 def = gimple_get_lhs (stmt);
1375 /* Certain expressions on the RHS can be optimized away, but can not
1376 themselves be entered into the hash tables. */
1377 if (! def
1378 || TREE_CODE (def) != SSA_NAME
1379 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1380 || gimple_vdef (stmt)
1381 /* Do not record equivalences for increments of ivs. This would create
1382 overlapping live ranges for a very questionable gain. */
1383 || simple_iv_increment_p (stmt))
1384 insert = false;
1386 /* Check if the expression has been computed before. */
1387 cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
1389 opt_stats.num_exprs_considered++;
1391 /* Get the type of the expression we are trying to optimize. */
1392 if (is_gimple_assign (stmt))
1394 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1395 assigns_var_p = true;
1397 else if (gimple_code (stmt) == GIMPLE_COND)
1398 expr_type = boolean_type_node;
1399 else if (is_gimple_call (stmt))
1401 gcc_assert (gimple_call_lhs (stmt));
1402 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1403 assigns_var_p = true;
1405 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1406 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1407 else if (gimple_code (stmt) == GIMPLE_PHI)
1408 /* We can't propagate into a phi, so the logic below doesn't apply.
1409 Instead record an equivalence between the cached LHS and the
1410 PHI result of this statement, provided they are in the same block.
1411 This should be sufficient to kill the redundant phi. */
1413 if (def && cached_lhs)
1414 const_and_copies->record_const_or_copy (def, cached_lhs);
1415 return;
1417 else
1418 gcc_unreachable ();
1420 if (!cached_lhs)
1421 return;
1423 /* It is safe to ignore types here since we have already done
1424 type checking in the hashing and equality routines. In fact
1425 type checking here merely gets in the way of constant
1426 propagation. Also, make sure that it is safe to propagate
1427 CACHED_LHS into the expression in STMT. */
1428 if ((TREE_CODE (cached_lhs) != SSA_NAME
1429 && (assigns_var_p
1430 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1431 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1433 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1434 || is_gimple_min_invariant (cached_lhs));
1436 if (dump_file && (dump_flags & TDF_DETAILS))
1438 fprintf (dump_file, " Replaced redundant expr '");
1439 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1440 fprintf (dump_file, "' with '");
1441 print_generic_expr (dump_file, cached_lhs, dump_flags);
1442 fprintf (dump_file, "'\n");
1445 opt_stats.num_re++;
1447 if (assigns_var_p
1448 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1449 cached_lhs = fold_convert (expr_type, cached_lhs);
1451 propagate_tree_value_into_stmt (gsi, cached_lhs);
1453 /* Since it is always necessary to mark the result as modified,
1454 perhaps we should move this into propagate_tree_value_into_stmt
1455 itself. */
1456 gimple_set_modified (gsi_stmt (*gsi), true);
1460 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1461 the available expressions table or the const_and_copies table.
1462 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1464 We handle only very simple copy equivalences here. The heavy
1465 lifing is done by eliminate_redundant_computations. */
1467 static void
1468 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1469 class avail_exprs_stack *avail_exprs_stack)
1471 tree lhs;
1472 enum tree_code lhs_code;
1474 gcc_assert (is_gimple_assign (stmt));
1476 lhs = gimple_assign_lhs (stmt);
1477 lhs_code = TREE_CODE (lhs);
1479 if (lhs_code == SSA_NAME
1480 && gimple_assign_single_p (stmt))
1482 tree rhs = gimple_assign_rhs1 (stmt);
1484 /* If the RHS of the assignment is a constant or another variable that
1485 may be propagated, register it in the CONST_AND_COPIES table. We
1486 do not need to record unwind data for this, since this is a true
1487 assignment and not an equivalence inferred from a comparison. All
1488 uses of this ssa name are dominated by this assignment, so unwinding
1489 just costs time and space. */
1490 if (may_optimize_p
1491 && (TREE_CODE (rhs) == SSA_NAME
1492 || is_gimple_min_invariant (rhs)))
1494 rhs = dom_valueize (rhs);
1496 if (dump_file && (dump_flags & TDF_DETAILS))
1498 fprintf (dump_file, "==== ASGN ");
1499 print_generic_expr (dump_file, lhs);
1500 fprintf (dump_file, " = ");
1501 print_generic_expr (dump_file, rhs);
1502 fprintf (dump_file, "\n");
1505 set_ssa_name_value (lhs, rhs);
1509 /* Make sure we can propagate &x + CST. */
1510 if (lhs_code == SSA_NAME
1511 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1512 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1513 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1515 tree op0 = gimple_assign_rhs1 (stmt);
1516 tree op1 = gimple_assign_rhs2 (stmt);
1517 tree new_rhs
1518 = build_fold_addr_expr (fold_build2 (MEM_REF,
1519 TREE_TYPE (TREE_TYPE (op0)),
1520 unshare_expr (op0),
1521 fold_convert (ptr_type_node,
1522 op1)));
1523 if (dump_file && (dump_flags & TDF_DETAILS))
1525 fprintf (dump_file, "==== ASGN ");
1526 print_generic_expr (dump_file, lhs);
1527 fprintf (dump_file, " = ");
1528 print_generic_expr (dump_file, new_rhs);
1529 fprintf (dump_file, "\n");
1532 set_ssa_name_value (lhs, new_rhs);
1535 /* A memory store, even an aliased store, creates a useful
1536 equivalence. By exchanging the LHS and RHS, creating suitable
1537 vops and recording the result in the available expression table,
1538 we may be able to expose more redundant loads. */
1539 if (!gimple_has_volatile_ops (stmt)
1540 && gimple_references_memory_p (stmt)
1541 && gimple_assign_single_p (stmt)
1542 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1543 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1544 && !is_gimple_reg (lhs))
1546 tree rhs = gimple_assign_rhs1 (stmt);
1547 gassign *new_stmt;
1549 /* Build a new statement with the RHS and LHS exchanged. */
1550 if (TREE_CODE (rhs) == SSA_NAME)
1552 /* NOTE tuples. The call to gimple_build_assign below replaced
1553 a call to build_gimple_modify_stmt, which did not set the
1554 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1555 may cause an SSA validation failure, as the LHS may be a
1556 default-initialized name and should have no definition. I'm
1557 a bit dubious of this, as the artificial statement that we
1558 generate here may in fact be ill-formed, but it is simply
1559 used as an internal device in this pass, and never becomes
1560 part of the CFG. */
1561 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1562 new_stmt = gimple_build_assign (rhs, lhs);
1563 SSA_NAME_DEF_STMT (rhs) = defstmt;
1565 else
1566 new_stmt = gimple_build_assign (rhs, lhs);
1568 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1570 /* Finally enter the statement into the available expression
1571 table. */
1572 avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
1576 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1577 CONST_AND_COPIES. */
1579 static void
1580 cprop_operand (gimple *stmt, use_operand_p op_p)
1582 tree val;
1583 tree op = USE_FROM_PTR (op_p);
1585 /* If the operand has a known constant value or it is known to be a
1586 copy of some other variable, use the value or copy stored in
1587 CONST_AND_COPIES. */
1588 val = SSA_NAME_VALUE (op);
1589 if (val && val != op)
1591 /* Do not replace hard register operands in asm statements. */
1592 if (gimple_code (stmt) == GIMPLE_ASM
1593 && !may_propagate_copy_into_asm (op))
1594 return;
1596 /* Certain operands are not allowed to be copy propagated due
1597 to their interaction with exception handling and some GCC
1598 extensions. */
1599 if (!may_propagate_copy (op, val))
1600 return;
1602 /* Do not propagate copies into BIVs.
1603 See PR23821 and PR62217 for how this can disturb IV and
1604 number of iteration analysis. */
1605 if (TREE_CODE (val) != INTEGER_CST)
1607 gimple *def = SSA_NAME_DEF_STMT (op);
1608 if (gimple_code (def) == GIMPLE_PHI
1609 && gimple_bb (def)->loop_father->header == gimple_bb (def))
1610 return;
1613 /* Dump details. */
1614 if (dump_file && (dump_flags & TDF_DETAILS))
1616 fprintf (dump_file, " Replaced '");
1617 print_generic_expr (dump_file, op, dump_flags);
1618 fprintf (dump_file, "' with %s '",
1619 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1620 print_generic_expr (dump_file, val, dump_flags);
1621 fprintf (dump_file, "'\n");
1624 if (TREE_CODE (val) != SSA_NAME)
1625 opt_stats.num_const_prop++;
1626 else
1627 opt_stats.num_copy_prop++;
1629 propagate_value (op_p, val);
1631 /* And note that we modified this statement. This is now
1632 safe, even if we changed virtual operands since we will
1633 rescan the statement and rewrite its operands again. */
1634 gimple_set_modified (stmt, true);
1638 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1639 known value for that SSA_NAME (or NULL if no value is known).
1641 Propagate values from CONST_AND_COPIES into the uses, vuses and
1642 vdef_ops of STMT. */
1644 static void
1645 cprop_into_stmt (gimple *stmt)
1647 use_operand_p op_p;
1648 ssa_op_iter iter;
1649 tree last_copy_propagated_op = NULL;
1651 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1653 tree old_op = USE_FROM_PTR (op_p);
1655 /* If we have A = B and B = A in the copy propagation tables
1656 (due to an equality comparison), avoid substituting B for A
1657 then A for B in the trivially discovered cases. This allows
1658 optimization of statements were A and B appear as input
1659 operands. */
1660 if (old_op != last_copy_propagated_op)
1662 cprop_operand (stmt, op_p);
1664 tree new_op = USE_FROM_PTR (op_p);
1665 if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1666 last_copy_propagated_op = new_op;
1671 /* If STMT contains a relational test, try to convert it into an
1672 equality test if there is only a single value which can ever
1673 make the test true.
1675 For example, if the expression hash table contains:
1677 TRUE = (i <= 1)
1679 And we have a test within statement of i >= 1, then we can safely
1680 rewrite the test as i == 1 since there only a single value where
1681 the test is true.
1683 This is similar to code in VRP. */
1685 static void
1686 test_for_singularity (gimple *stmt, gcond *dummy_cond,
1687 avail_exprs_stack *avail_exprs_stack)
1689 /* We want to support gimple conditionals as well as assignments
1690 where the RHS contains a conditional. */
1691 if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND)
1693 enum tree_code code = ERROR_MARK;
1694 tree lhs, rhs;
1696 /* Extract the condition of interest from both forms we support. */
1697 if (is_gimple_assign (stmt))
1699 code = gimple_assign_rhs_code (stmt);
1700 lhs = gimple_assign_rhs1 (stmt);
1701 rhs = gimple_assign_rhs2 (stmt);
1703 else if (gimple_code (stmt) == GIMPLE_COND)
1705 code = gimple_cond_code (as_a <gcond *> (stmt));
1706 lhs = gimple_cond_lhs (as_a <gcond *> (stmt));
1707 rhs = gimple_cond_rhs (as_a <gcond *> (stmt));
1710 /* We're looking for a relational test using LE/GE. Also note we can
1711 canonicalize LT/GT tests against constants into LE/GT tests. */
1712 if (code == LE_EXPR || code == GE_EXPR
1713 || ((code == LT_EXPR || code == GT_EXPR)
1714 && TREE_CODE (rhs) == INTEGER_CST))
1716 /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR. */
1717 if (code == LT_EXPR)
1718 rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs),
1719 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1721 if (code == GT_EXPR)
1722 rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs),
1723 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1725 /* Determine the code we want to check for in the hash table. */
1726 enum tree_code test_code;
1727 if (code == GE_EXPR || code == GT_EXPR)
1728 test_code = LE_EXPR;
1729 else
1730 test_code = GE_EXPR;
1732 /* Update the dummy statement so we can query the hash tables. */
1733 gimple_cond_set_code (dummy_cond, test_code);
1734 gimple_cond_set_lhs (dummy_cond, lhs);
1735 gimple_cond_set_rhs (dummy_cond, rhs);
1736 tree cached_lhs
1737 = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false);
1739 /* If the lookup returned 1 (true), then the expression we
1740 queried was in the hash table. As a result there is only
1741 one value that makes the original conditional true. Update
1742 STMT accordingly. */
1743 if (cached_lhs && integer_onep (cached_lhs))
1745 if (is_gimple_assign (stmt))
1747 gimple_assign_set_rhs_code (stmt, EQ_EXPR);
1748 gimple_assign_set_rhs2 (stmt, rhs);
1749 gimple_set_modified (stmt, true);
1751 else
1753 gimple_set_modified (stmt, true);
1754 gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR);
1755 gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs);
1756 gimple_set_modified (stmt, true);
1763 /* Optimize the statement in block BB pointed to by iterator SI.
1765 We try to perform some simplistic global redundancy elimination and
1766 constant propagation:
1768 1- To detect global redundancy, we keep track of expressions that have
1769 been computed in this block and its dominators. If we find that the
1770 same expression is computed more than once, we eliminate repeated
1771 computations by using the target of the first one.
1773 2- Constant values and copy assignments. This is used to do very
1774 simplistic constant and copy propagation. When a constant or copy
1775 assignment is found, we map the value on the RHS of the assignment to
1776 the variable in the LHS in the CONST_AND_COPIES table.
1778 3- Very simple redundant store elimination is performed.
1780 4- We can simpify a condition to a constant or from a relational
1781 condition to an equality condition. */
1783 edge
1784 dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator si)
1786 gimple *stmt, *old_stmt;
1787 bool may_optimize_p;
1788 bool modified_p = false;
1789 bool was_noreturn;
1790 edge retval = NULL;
1792 old_stmt = stmt = gsi_stmt (si);
1793 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1795 if (dump_file && (dump_flags & TDF_DETAILS))
1797 fprintf (dump_file, "Optimizing statement ");
1798 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1801 update_stmt_if_modified (stmt);
1802 opt_stats.num_stmts++;
1804 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
1805 cprop_into_stmt (stmt);
1807 /* If the statement has been modified with constant replacements,
1808 fold its RHS before checking for redundant computations. */
1809 if (gimple_modified_p (stmt))
1811 tree rhs = NULL;
1813 /* Try to fold the statement making sure that STMT is kept
1814 up to date. */
1815 if (fold_stmt (&si))
1817 stmt = gsi_stmt (si);
1818 gimple_set_modified (stmt, true);
1820 if (dump_file && (dump_flags & TDF_DETAILS))
1822 fprintf (dump_file, " Folded to: ");
1823 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1827 /* We only need to consider cases that can yield a gimple operand. */
1828 if (gimple_assign_single_p (stmt))
1829 rhs = gimple_assign_rhs1 (stmt);
1830 else if (gimple_code (stmt) == GIMPLE_GOTO)
1831 rhs = gimple_goto_dest (stmt);
1832 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1833 /* This should never be an ADDR_EXPR. */
1834 rhs = gimple_switch_index (swtch_stmt);
1836 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
1837 recompute_tree_invariant_for_addr_expr (rhs);
1839 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1840 even if fold_stmt updated the stmt already and thus cleared
1841 gimple_modified_p flag on it. */
1842 modified_p = true;
1845 /* Check for redundant computations. Do this optimization only
1846 for assignments that have no volatile ops and conditionals. */
1847 may_optimize_p = (!gimple_has_side_effects (stmt)
1848 && (is_gimple_assign (stmt)
1849 || (is_gimple_call (stmt)
1850 && gimple_call_lhs (stmt) != NULL_TREE)
1851 || gimple_code (stmt) == GIMPLE_COND
1852 || gimple_code (stmt) == GIMPLE_SWITCH));
1854 if (may_optimize_p)
1856 if (gimple_code (stmt) == GIMPLE_CALL)
1858 /* Resolve __builtin_constant_p. If it hasn't been
1859 folded to integer_one_node by now, it's fairly
1860 certain that the value simply isn't constant. */
1861 tree callee = gimple_call_fndecl (stmt);
1862 if (callee
1863 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
1864 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
1866 propagate_tree_value_into_stmt (&si, integer_zero_node);
1867 stmt = gsi_stmt (si);
1871 if (gimple_code (stmt) == GIMPLE_COND)
1873 tree lhs = gimple_cond_lhs (stmt);
1874 tree rhs = gimple_cond_rhs (stmt);
1876 /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
1877 then this conditional is computable at compile time. We can just
1878 shove either 0 or 1 into the LHS, mark the statement as modified
1879 and all the right things will just happen below.
1881 Note this would apply to any case where LHS has a range
1882 narrower than its type implies and RHS is outside that
1883 narrower range. Future work. */
1884 if (TREE_CODE (lhs) == SSA_NAME
1885 && ssa_name_has_boolean_range (lhs)
1886 && TREE_CODE (rhs) == INTEGER_CST
1887 && ! (integer_zerop (rhs) || integer_onep (rhs)))
1889 gimple_cond_set_lhs (as_a <gcond *> (stmt),
1890 fold_convert (TREE_TYPE (lhs),
1891 integer_zero_node));
1892 gimple_set_modified (stmt, true);
1896 update_stmt_if_modified (stmt);
1897 eliminate_redundant_computations (&si, m_const_and_copies,
1898 m_avail_exprs_stack);
1899 stmt = gsi_stmt (si);
1901 /* Perform simple redundant store elimination. */
1902 if (gimple_assign_single_p (stmt)
1903 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1905 tree lhs = gimple_assign_lhs (stmt);
1906 tree rhs = gimple_assign_rhs1 (stmt);
1907 tree cached_lhs;
1908 gassign *new_stmt;
1909 rhs = dom_valueize (rhs);
1910 /* Build a new statement with the RHS and LHS exchanged. */
1911 if (TREE_CODE (rhs) == SSA_NAME)
1913 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1914 new_stmt = gimple_build_assign (rhs, lhs);
1915 SSA_NAME_DEF_STMT (rhs) = defstmt;
1917 else
1918 new_stmt = gimple_build_assign (rhs, lhs);
1919 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
1920 cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false,
1921 false);
1922 if (cached_lhs && operand_equal_p (rhs, cached_lhs, 0))
1924 basic_block bb = gimple_bb (stmt);
1925 unlink_stmt_vdef (stmt);
1926 if (gsi_remove (&si, true))
1928 bitmap_set_bit (need_eh_cleanup, bb->index);
1929 if (dump_file && (dump_flags & TDF_DETAILS))
1930 fprintf (dump_file, " Flagged to clear EH edges.\n");
1932 release_defs (stmt);
1933 return retval;
1937 /* If this statement was not redundant, we may still be able to simplify
1938 it, which may in turn allow other part of DOM or other passes to do
1939 a better job. */
1940 test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack);
1943 /* Record any additional equivalences created by this statement. */
1944 if (is_gimple_assign (stmt))
1945 record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack);
1947 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
1948 know where it goes. */
1949 if (gimple_modified_p (stmt) || modified_p)
1951 tree val = NULL;
1953 if (gimple_code (stmt) == GIMPLE_COND)
1954 val = fold_binary_loc (gimple_location (stmt),
1955 gimple_cond_code (stmt), boolean_type_node,
1956 gimple_cond_lhs (stmt),
1957 gimple_cond_rhs (stmt));
1958 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1959 val = gimple_switch_index (swtch_stmt);
1961 if (val && TREE_CODE (val) == INTEGER_CST)
1963 retval = find_taken_edge (bb, val);
1964 if (retval)
1966 /* Fix the condition to be either true or false. */
1967 if (gimple_code (stmt) == GIMPLE_COND)
1969 if (integer_zerop (val))
1970 gimple_cond_make_false (as_a <gcond *> (stmt));
1971 else if (integer_onep (val))
1972 gimple_cond_make_true (as_a <gcond *> (stmt));
1973 else
1974 gcc_unreachable ();
1976 gimple_set_modified (stmt, true);
1979 /* Further simplifications may be possible. */
1980 cfg_altered = true;
1984 update_stmt_if_modified (stmt);
1986 /* If we simplified a statement in such a way as to be shown that it
1987 cannot trap, update the eh information and the cfg to match. */
1988 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1990 bitmap_set_bit (need_eh_cleanup, bb->index);
1991 if (dump_file && (dump_flags & TDF_DETAILS))
1992 fprintf (dump_file, " Flagged to clear EH edges.\n");
1995 if (!was_noreturn
1996 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
1997 need_noreturn_fixup.safe_push (stmt);
1999 return retval;