2013-11-08 Andrew MacLeod <amacleod@redhat.com>
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobef114a01a6dd8438b1b67d08d6c2e5e8177b84b4
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "gimple.h"
31 #include "gimple-ssa.h"
32 #include "tree-cfg.h"
33 #include "tree-phinodes.h"
34 #include "ssa-iterators.h"
35 #include "tree-ssanames.h"
36 #include "tree-dfa.h"
37 #include "tree-pass.h"
38 #include "langhooks.h"
39 #include "pointer-set.h"
40 #include "domwalk.h"
41 #include "cfgloop.h"
42 #include "tree-data-ref.h"
43 #include "gimple-pretty-print.h"
44 #include "insn-config.h"
45 #include "expr.h"
46 #include "optabs.h"
47 #include "tree-scalar-evolution.h"
49 #ifndef HAVE_conditional_move
50 #define HAVE_conditional_move (0)
51 #endif
53 static unsigned int tree_ssa_phiopt (void);
54 static unsigned int tree_ssa_phiopt_worker (bool, bool);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gimple, tree, tree);
57 static int value_replacement (basic_block, basic_block,
58 edge, edge, gimple, tree, tree);
59 static bool minmax_replacement (basic_block, basic_block,
60 edge, edge, gimple, tree, tree);
61 static bool abs_replacement (basic_block, basic_block,
62 edge, edge, gimple, tree, tree);
63 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
64 struct pointer_set_t *);
65 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
66 static struct pointer_set_t * get_non_trapping (void);
67 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
68 static void hoist_adjacent_loads (basic_block, basic_block,
69 basic_block, basic_block);
70 static bool gate_hoist_loads (void);
72 /* This pass tries to replaces an if-then-else block with an
73 assignment. We have four kinds of transformations. Some of these
74 transformations are also performed by the ifcvt RTL optimizer.
76 Conditional Replacement
77 -----------------------
79 This transformation, implemented in conditional_replacement,
80 replaces
82 bb0:
83 if (cond) goto bb2; else goto bb1;
84 bb1:
85 bb2:
86 x = PHI <0 (bb1), 1 (bb0), ...>;
88 with
90 bb0:
91 x' = cond;
92 goto bb2;
93 bb2:
94 x = PHI <x' (bb0), ...>;
96 We remove bb1 as it becomes unreachable. This occurs often due to
97 gimplification of conditionals.
99 Value Replacement
100 -----------------
102 This transformation, implemented in value_replacement, replaces
104 bb0:
105 if (a != b) goto bb2; else goto bb1;
106 bb1:
107 bb2:
108 x = PHI <a (bb1), b (bb0), ...>;
110 with
112 bb0:
113 bb2:
114 x = PHI <b (bb0), ...>;
116 This opportunity can sometimes occur as a result of other
117 optimizations.
120 Another case caught by value replacement looks like this:
122 bb0:
123 t1 = a == CONST;
124 t2 = b > c;
125 t3 = t1 & t2;
126 if (t3 != 0) goto bb1; else goto bb2;
127 bb1:
128 bb2:
129 x = PHI (CONST, a)
131 Gets replaced with:
132 bb0:
133 bb2:
134 t1 = a == CONST;
135 t2 = b > c;
136 t3 = t1 & t2;
137 x = a;
139 ABS Replacement
140 ---------------
142 This transformation, implemented in abs_replacement, replaces
144 bb0:
145 if (a >= 0) goto bb2; else goto bb1;
146 bb1:
147 x = -a;
148 bb2:
149 x = PHI <x (bb1), a (bb0), ...>;
151 with
153 bb0:
154 x' = ABS_EXPR< a >;
155 bb2:
156 x = PHI <x' (bb0), ...>;
158 MIN/MAX Replacement
159 -------------------
161 This transformation, minmax_replacement replaces
163 bb0:
164 if (a <= b) goto bb2; else goto bb1;
165 bb1:
166 bb2:
167 x = PHI <b (bb1), a (bb0), ...>;
169 with
171 bb0:
172 x' = MIN_EXPR (a, b)
173 bb2:
174 x = PHI <x' (bb0), ...>;
176 A similar transformation is done for MAX_EXPR.
179 This pass also performs a fifth transformation of a slightly different
180 flavor.
182 Adjacent Load Hoisting
183 ----------------------
185 This transformation replaces
187 bb0:
188 if (...) goto bb2; else goto bb1;
189 bb1:
190 x1 = (<expr>).field1;
191 goto bb3;
192 bb2:
193 x2 = (<expr>).field2;
194 bb3:
195 # x = PHI <x1, x2>;
197 with
199 bb0:
200 x1 = (<expr>).field1;
201 x2 = (<expr>).field2;
202 if (...) goto bb2; else goto bb1;
203 bb1:
204 goto bb3;
205 bb2:
206 bb3:
207 # x = PHI <x1, x2>;
209 The purpose of this transformation is to enable generation of conditional
210 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
211 the loads is speculative, the transformation is restricted to very
212 specific cases to avoid introducing a page fault. We are looking for
213 the common idiom:
215 if (...)
216 x = y->left;
217 else
218 x = y->right;
220 where left and right are typically adjacent pointers in a tree structure. */
222 static unsigned int
223 tree_ssa_phiopt (void)
225 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
228 /* This pass tries to transform conditional stores into unconditional
229 ones, enabling further simplifications with the simpler then and else
230 blocks. In particular it replaces this:
232 bb0:
233 if (cond) goto bb2; else goto bb1;
234 bb1:
235 *p = RHS;
236 bb2:
238 with
240 bb0:
241 if (cond) goto bb1; else goto bb2;
242 bb1:
243 condtmp' = *p;
244 bb2:
245 condtmp = PHI <RHS, condtmp'>
246 *p = condtmp;
248 This transformation can only be done under several constraints,
249 documented below. It also replaces:
251 bb0:
252 if (cond) goto bb2; else goto bb1;
253 bb1:
254 *p = RHS1;
255 goto bb3;
256 bb2:
257 *p = RHS2;
258 bb3:
260 with
262 bb0:
263 if (cond) goto bb3; else goto bb1;
264 bb1:
265 bb3:
266 condtmp = PHI <RHS1, RHS2>
267 *p = condtmp; */
269 static unsigned int
270 tree_ssa_cs_elim (void)
272 unsigned todo;
273 /* ??? We are not interested in loop related info, but the following
274 will create it, ICEing as we didn't init loops with pre-headers.
275 An interfacing issue of find_data_references_in_bb. */
276 loop_optimizer_init (LOOPS_NORMAL);
277 scev_initialize ();
278 todo = tree_ssa_phiopt_worker (true, false);
279 scev_finalize ();
280 loop_optimizer_finalize ();
281 return todo;
284 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
286 static gimple
287 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
289 gimple_stmt_iterator i;
290 gimple phi = NULL;
291 if (gimple_seq_singleton_p (seq))
292 return gsi_stmt (gsi_start (seq));
293 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
295 gimple p = gsi_stmt (i);
296 /* If the PHI arguments are equal then we can skip this PHI. */
297 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
298 gimple_phi_arg_def (p, e1->dest_idx)))
299 continue;
301 /* If we already have a PHI that has the two edge arguments are
302 different, then return it is not a singleton for these PHIs. */
303 if (phi)
304 return NULL;
306 phi = p;
308 return phi;
311 /* The core routine of conditional store replacement and normal
312 phi optimizations. Both share much of the infrastructure in how
313 to match applicable basic block patterns. DO_STORE_ELIM is true
314 when we want to do conditional store replacement, false otherwise.
315 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
316 of diamond control flow patterns, false otherwise. */
317 static unsigned int
318 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
320 basic_block bb;
321 basic_block *bb_order;
322 unsigned n, i;
323 bool cfgchanged = false;
324 struct pointer_set_t *nontrap = 0;
326 if (do_store_elim)
327 /* Calculate the set of non-trapping memory accesses. */
328 nontrap = get_non_trapping ();
330 /* Search every basic block for COND_EXPR we may be able to optimize.
332 We walk the blocks in order that guarantees that a block with
333 a single predecessor is processed before the predecessor.
334 This ensures that we collapse inner ifs before visiting the
335 outer ones, and also that we do not try to visit a removed
336 block. */
337 bb_order = single_pred_before_succ_order ();
338 n = n_basic_blocks - NUM_FIXED_BLOCKS;
340 for (i = 0; i < n; i++)
342 gimple cond_stmt, phi;
343 basic_block bb1, bb2;
344 edge e1, e2;
345 tree arg0, arg1;
347 bb = bb_order[i];
349 cond_stmt = last_stmt (bb);
350 /* Check to see if the last statement is a GIMPLE_COND. */
351 if (!cond_stmt
352 || gimple_code (cond_stmt) != GIMPLE_COND)
353 continue;
355 e1 = EDGE_SUCC (bb, 0);
356 bb1 = e1->dest;
357 e2 = EDGE_SUCC (bb, 1);
358 bb2 = e2->dest;
360 /* We cannot do the optimization on abnormal edges. */
361 if ((e1->flags & EDGE_ABNORMAL) != 0
362 || (e2->flags & EDGE_ABNORMAL) != 0)
363 continue;
365 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
366 if (EDGE_COUNT (bb1->succs) == 0
367 || bb2 == NULL
368 || EDGE_COUNT (bb2->succs) == 0)
369 continue;
371 /* Find the bb which is the fall through to the other. */
372 if (EDGE_SUCC (bb1, 0)->dest == bb2)
374 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
376 basic_block bb_tmp = bb1;
377 edge e_tmp = e1;
378 bb1 = bb2;
379 bb2 = bb_tmp;
380 e1 = e2;
381 e2 = e_tmp;
383 else if (do_store_elim
384 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
386 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
388 if (!single_succ_p (bb1)
389 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
390 || !single_succ_p (bb2)
391 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
392 || EDGE_COUNT (bb3->preds) != 2)
393 continue;
394 if (cond_if_else_store_replacement (bb1, bb2, bb3))
395 cfgchanged = true;
396 continue;
398 else if (do_hoist_loads
399 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
401 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
403 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
404 && single_succ_p (bb1)
405 && single_succ_p (bb2)
406 && single_pred_p (bb1)
407 && single_pred_p (bb2)
408 && EDGE_COUNT (bb->succs) == 2
409 && EDGE_COUNT (bb3->preds) == 2
410 /* If one edge or the other is dominant, a conditional move
411 is likely to perform worse than the well-predicted branch. */
412 && !predictable_edge_p (EDGE_SUCC (bb, 0))
413 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
414 hoist_adjacent_loads (bb, bb1, bb2, bb3);
415 continue;
417 else
418 continue;
420 e1 = EDGE_SUCC (bb1, 0);
422 /* Make sure that bb1 is just a fall through. */
423 if (!single_succ_p (bb1)
424 || (e1->flags & EDGE_FALLTHRU) == 0)
425 continue;
427 /* Also make sure that bb1 only have one predecessor and that it
428 is bb. */
429 if (!single_pred_p (bb1)
430 || single_pred (bb1) != bb)
431 continue;
433 if (do_store_elim)
435 /* bb1 is the middle block, bb2 the join block, bb the split block,
436 e1 the fallthrough edge from bb1 to bb2. We can't do the
437 optimization if the join block has more than two predecessors. */
438 if (EDGE_COUNT (bb2->preds) > 2)
439 continue;
440 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
441 cfgchanged = true;
443 else
445 gimple_seq phis = phi_nodes (bb2);
446 gimple_stmt_iterator gsi;
447 bool candorest = true;
449 /* Value replacement can work with more than one PHI
450 so try that first. */
451 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
453 phi = gsi_stmt (gsi);
454 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
455 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
456 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
458 candorest = false;
459 cfgchanged = true;
460 break;
464 if (!candorest)
465 continue;
467 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
468 if (!phi)
469 continue;
471 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
472 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
474 /* Something is wrong if we cannot find the arguments in the PHI
475 node. */
476 gcc_assert (arg0 != NULL && arg1 != NULL);
478 /* Do the replacement of conditional if it can be done. */
479 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
480 cfgchanged = true;
481 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
482 cfgchanged = true;
483 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
484 cfgchanged = true;
488 free (bb_order);
490 if (do_store_elim)
491 pointer_set_destroy (nontrap);
492 /* If the CFG has changed, we should cleanup the CFG. */
493 if (cfgchanged && do_store_elim)
495 /* In cond-store replacement we have added some loads on edges
496 and new VOPS (as we moved the store, and created a load). */
497 gsi_commit_edge_inserts ();
498 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
500 else if (cfgchanged)
501 return TODO_cleanup_cfg;
502 return 0;
505 /* Replace PHI node element whose edge is E in block BB with variable NEW.
506 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
507 is known to have two edges, one of which must reach BB). */
509 static void
510 replace_phi_edge_with_variable (basic_block cond_block,
511 edge e, gimple phi, tree new_tree)
513 basic_block bb = gimple_bb (phi);
514 basic_block block_to_remove;
515 gimple_stmt_iterator gsi;
517 /* Change the PHI argument to new. */
518 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
520 /* Remove the empty basic block. */
521 if (EDGE_SUCC (cond_block, 0)->dest == bb)
523 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
524 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
525 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
526 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
528 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
530 else
532 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
533 EDGE_SUCC (cond_block, 1)->flags
534 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
535 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
536 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
538 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
540 delete_basic_block (block_to_remove);
542 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
543 gsi = gsi_last_bb (cond_block);
544 gsi_remove (&gsi, true);
546 if (dump_file && (dump_flags & TDF_DETAILS))
547 fprintf (dump_file,
548 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
549 cond_block->index,
550 bb->index);
553 /* The function conditional_replacement does the main work of doing the
554 conditional replacement. Return true if the replacement is done.
555 Otherwise return false.
556 BB is the basic block where the replacement is going to be done on. ARG0
557 is argument 0 from PHI. Likewise for ARG1. */
559 static bool
560 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
561 edge e0, edge e1, gimple phi,
562 tree arg0, tree arg1)
564 tree result;
565 gimple stmt, new_stmt;
566 tree cond;
567 gimple_stmt_iterator gsi;
568 edge true_edge, false_edge;
569 tree new_var, new_var2;
570 bool neg;
572 /* FIXME: Gimplification of complex type is too hard for now. */
573 /* We aren't prepared to handle vectors either (and it is a question
574 if it would be worthwhile anyway). */
575 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
576 || POINTER_TYPE_P (TREE_TYPE (arg0)))
577 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
578 || POINTER_TYPE_P (TREE_TYPE (arg1))))
579 return false;
581 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
582 convert it to the conditional. */
583 if ((integer_zerop (arg0) && integer_onep (arg1))
584 || (integer_zerop (arg1) && integer_onep (arg0)))
585 neg = false;
586 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
587 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
588 neg = true;
589 else
590 return false;
592 if (!empty_block_p (middle_bb))
593 return false;
595 /* At this point we know we have a GIMPLE_COND with two successors.
596 One successor is BB, the other successor is an empty block which
597 falls through into BB.
599 There is a single PHI node at the join point (BB) and its arguments
600 are constants (0, 1) or (0, -1).
602 So, given the condition COND, and the two PHI arguments, we can
603 rewrite this PHI into non-branching code:
605 dest = (COND) or dest = COND'
607 We use the condition as-is if the argument associated with the
608 true edge has the value one or the argument associated with the
609 false edge as the value zero. Note that those conditions are not
610 the same since only one of the outgoing edges from the GIMPLE_COND
611 will directly reach BB and thus be associated with an argument. */
613 stmt = last_stmt (cond_bb);
614 result = PHI_RESULT (phi);
616 /* To handle special cases like floating point comparison, it is easier and
617 less error-prone to build a tree and gimplify it on the fly though it is
618 less efficient. */
619 cond = fold_build2_loc (gimple_location (stmt),
620 gimple_cond_code (stmt), boolean_type_node,
621 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
623 /* We need to know which is the true edge and which is the false
624 edge so that we know when to invert the condition below. */
625 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
626 if ((e0 == true_edge && integer_zerop (arg0))
627 || (e0 == false_edge && !integer_zerop (arg0))
628 || (e1 == true_edge && integer_zerop (arg1))
629 || (e1 == false_edge && !integer_zerop (arg1)))
630 cond = fold_build1_loc (gimple_location (stmt),
631 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
633 if (neg)
635 cond = fold_convert_loc (gimple_location (stmt),
636 TREE_TYPE (result), cond);
637 cond = fold_build1_loc (gimple_location (stmt),
638 NEGATE_EXPR, TREE_TYPE (cond), cond);
641 /* Insert our new statements at the end of conditional block before the
642 COND_STMT. */
643 gsi = gsi_for_stmt (stmt);
644 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
645 GSI_SAME_STMT);
647 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
649 source_location locus_0, locus_1;
651 new_var2 = make_ssa_name (TREE_TYPE (result), NULL);
652 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
653 new_var, NULL);
654 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
655 new_var = new_var2;
657 /* Set the locus to the first argument, unless is doesn't have one. */
658 locus_0 = gimple_phi_arg_location (phi, 0);
659 locus_1 = gimple_phi_arg_location (phi, 1);
660 if (locus_0 == UNKNOWN_LOCATION)
661 locus_0 = locus_1;
662 gimple_set_location (new_stmt, locus_0);
665 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
667 /* Note that we optimized this PHI. */
668 return true;
671 /* Update *ARG which is defined in STMT so that it contains the
672 computed value if that seems profitable. Return true if the
673 statement is made dead by that rewriting. */
675 static bool
676 jump_function_from_stmt (tree *arg, gimple stmt)
678 enum tree_code code = gimple_assign_rhs_code (stmt);
679 if (code == ADDR_EXPR)
681 /* For arg = &p->i transform it to p, if possible. */
682 tree rhs1 = gimple_assign_rhs1 (stmt);
683 HOST_WIDE_INT offset;
684 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
685 &offset);
686 if (tem
687 && TREE_CODE (tem) == MEM_REF
688 && (mem_ref_offset (tem) + double_int::from_shwi (offset)).is_zero ())
690 *arg = TREE_OPERAND (tem, 0);
691 return true;
694 /* TODO: Much like IPA-CP jump-functions we want to handle constant
695 additions symbolically here, and we'd need to update the comparison
696 code that compares the arg + cst tuples in our caller. For now the
697 code above exactly handles the VEC_BASE pattern from vec.h. */
698 return false;
701 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
702 of the form SSA_NAME NE 0.
704 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
705 the two input values of the EQ_EXPR match arg0 and arg1.
707 If so update *code and return TRUE. Otherwise return FALSE. */
709 static bool
710 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
711 enum tree_code *code, const_tree rhs)
713 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
714 statement. */
715 if (TREE_CODE (rhs) == SSA_NAME)
717 gimple def1 = SSA_NAME_DEF_STMT (rhs);
719 /* Verify the defining statement has an EQ_EXPR on the RHS. */
720 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
722 /* Finally verify the source operands of the EQ_EXPR are equal
723 to arg0 and arg1. */
724 tree op0 = gimple_assign_rhs1 (def1);
725 tree op1 = gimple_assign_rhs2 (def1);
726 if ((operand_equal_for_phi_arg_p (arg0, op0)
727 && operand_equal_for_phi_arg_p (arg1, op1))
728 || (operand_equal_for_phi_arg_p (arg0, op1)
729 && operand_equal_for_phi_arg_p (arg1, op0)))
731 /* We will perform the optimization. */
732 *code = gimple_assign_rhs_code (def1);
733 return true;
737 return false;
740 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
742 Also return TRUE if arg0/arg1 are equal to the source arguments of a
743 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
745 Return FALSE otherwise. */
747 static bool
748 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
749 enum tree_code *code, gimple cond)
751 gimple def;
752 tree lhs = gimple_cond_lhs (cond);
753 tree rhs = gimple_cond_rhs (cond);
755 if ((operand_equal_for_phi_arg_p (arg0, lhs)
756 && operand_equal_for_phi_arg_p (arg1, rhs))
757 || (operand_equal_for_phi_arg_p (arg1, lhs)
758 && operand_equal_for_phi_arg_p (arg0, rhs)))
759 return true;
761 /* Now handle more complex case where we have an EQ comparison
762 which feeds a BIT_AND_EXPR which feeds COND.
764 First verify that COND is of the form SSA_NAME NE 0. */
765 if (*code != NE_EXPR || !integer_zerop (rhs)
766 || TREE_CODE (lhs) != SSA_NAME)
767 return false;
769 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
770 def = SSA_NAME_DEF_STMT (lhs);
771 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
772 return false;
774 /* Now verify arg0/arg1 correspond to the source arguments of an
775 EQ comparison feeding the BIT_AND_EXPR. */
777 tree tmp = gimple_assign_rhs1 (def);
778 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
779 return true;
781 tmp = gimple_assign_rhs2 (def);
782 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
783 return true;
785 return false;
788 /* The function value_replacement does the main work of doing the value
789 replacement. Return non-zero if the replacement is done. Otherwise return
790 0. If we remove the middle basic block, return 2.
791 BB is the basic block where the replacement is going to be done on. ARG0
792 is argument 0 from the PHI. Likewise for ARG1. */
794 static int
795 value_replacement (basic_block cond_bb, basic_block middle_bb,
796 edge e0, edge e1, gimple phi,
797 tree arg0, tree arg1)
799 gimple_stmt_iterator gsi;
800 gimple cond;
801 edge true_edge, false_edge;
802 enum tree_code code;
803 bool emtpy_or_with_defined_p = true;
805 /* If the type says honor signed zeros we cannot do this
806 optimization. */
807 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
808 return 0;
810 /* If there is a statement in MIDDLE_BB that defines one of the PHI
811 arguments, then adjust arg0 or arg1. */
812 gsi = gsi_after_labels (middle_bb);
813 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
814 gsi_next_nondebug (&gsi);
815 while (!gsi_end_p (gsi))
817 gimple stmt = gsi_stmt (gsi);
818 tree lhs;
819 gsi_next_nondebug (&gsi);
820 if (!is_gimple_assign (stmt))
822 emtpy_or_with_defined_p = false;
823 continue;
825 /* Now try to adjust arg0 or arg1 according to the computation
826 in the statement. */
827 lhs = gimple_assign_lhs (stmt);
828 if (!(lhs == arg0
829 && jump_function_from_stmt (&arg0, stmt))
830 || (lhs == arg1
831 && jump_function_from_stmt (&arg1, stmt)))
832 emtpy_or_with_defined_p = false;
835 cond = last_stmt (cond_bb);
836 code = gimple_cond_code (cond);
838 /* This transformation is only valid for equality comparisons. */
839 if (code != NE_EXPR && code != EQ_EXPR)
840 return 0;
842 /* We need to know which is the true edge and which is the false
843 edge so that we know if have abs or negative abs. */
844 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
846 /* At this point we know we have a COND_EXPR with two successors.
847 One successor is BB, the other successor is an empty block which
848 falls through into BB.
850 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
852 There is a single PHI node at the join point (BB) with two arguments.
854 We now need to verify that the two arguments in the PHI node match
855 the two arguments to the equality comparison. */
857 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
859 edge e;
860 tree arg;
862 /* For NE_EXPR, we want to build an assignment result = arg where
863 arg is the PHI argument associated with the true edge. For
864 EQ_EXPR we want the PHI argument associated with the false edge. */
865 e = (code == NE_EXPR ? true_edge : false_edge);
867 /* Unfortunately, E may not reach BB (it may instead have gone to
868 OTHER_BLOCK). If that is the case, then we want the single outgoing
869 edge from OTHER_BLOCK which reaches BB and represents the desired
870 path from COND_BLOCK. */
871 if (e->dest == middle_bb)
872 e = single_succ_edge (e->dest);
874 /* Now we know the incoming edge to BB that has the argument for the
875 RHS of our new assignment statement. */
876 if (e0 == e)
877 arg = arg0;
878 else
879 arg = arg1;
881 /* If the middle basic block was empty or is defining the
882 PHI arguments and this is a single phi where the args are different
883 for the edges e0 and e1 then we can remove the middle basic block. */
884 if (emtpy_or_with_defined_p
885 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
886 e0, e1))
888 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
889 /* Note that we optimized this PHI. */
890 return 2;
892 else
894 /* Replace the PHI arguments with arg. */
895 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
896 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
897 if (dump_file && (dump_flags & TDF_DETAILS))
899 fprintf (dump_file, "PHI ");
900 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
901 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
902 cond_bb->index);
903 print_generic_expr (dump_file, arg, 0);
904 fprintf (dump_file, ".\n");
906 return 1;
910 return 0;
913 /* The function minmax_replacement does the main work of doing the minmax
914 replacement. Return true if the replacement is done. Otherwise return
915 false.
916 BB is the basic block where the replacement is going to be done on. ARG0
917 is argument 0 from the PHI. Likewise for ARG1. */
919 static bool
920 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
921 edge e0, edge e1, gimple phi,
922 tree arg0, tree arg1)
924 tree result, type;
925 gimple cond, new_stmt;
926 edge true_edge, false_edge;
927 enum tree_code cmp, minmax, ass_code;
928 tree smaller, larger, arg_true, arg_false;
929 gimple_stmt_iterator gsi, gsi_from;
931 type = TREE_TYPE (PHI_RESULT (phi));
933 /* The optimization may be unsafe due to NaNs. */
934 if (HONOR_NANS (TYPE_MODE (type)))
935 return false;
937 cond = last_stmt (cond_bb);
938 cmp = gimple_cond_code (cond);
940 /* This transformation is only valid for order comparisons. Record which
941 operand is smaller/larger if the result of the comparison is true. */
942 if (cmp == LT_EXPR || cmp == LE_EXPR)
944 smaller = gimple_cond_lhs (cond);
945 larger = gimple_cond_rhs (cond);
947 else if (cmp == GT_EXPR || cmp == GE_EXPR)
949 smaller = gimple_cond_rhs (cond);
950 larger = gimple_cond_lhs (cond);
952 else
953 return false;
955 /* We need to know which is the true edge and which is the false
956 edge so that we know if have abs or negative abs. */
957 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
959 /* Forward the edges over the middle basic block. */
960 if (true_edge->dest == middle_bb)
961 true_edge = EDGE_SUCC (true_edge->dest, 0);
962 if (false_edge->dest == middle_bb)
963 false_edge = EDGE_SUCC (false_edge->dest, 0);
965 if (true_edge == e0)
967 gcc_assert (false_edge == e1);
968 arg_true = arg0;
969 arg_false = arg1;
971 else
973 gcc_assert (false_edge == e0);
974 gcc_assert (true_edge == e1);
975 arg_true = arg1;
976 arg_false = arg0;
979 if (empty_block_p (middle_bb))
981 if (operand_equal_for_phi_arg_p (arg_true, smaller)
982 && operand_equal_for_phi_arg_p (arg_false, larger))
984 /* Case
986 if (smaller < larger)
987 rslt = smaller;
988 else
989 rslt = larger; */
990 minmax = MIN_EXPR;
992 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
993 && operand_equal_for_phi_arg_p (arg_true, larger))
994 minmax = MAX_EXPR;
995 else
996 return false;
998 else
1000 /* Recognize the following case, assuming d <= u:
1002 if (a <= u)
1003 b = MAX (a, d);
1004 x = PHI <b, u>
1006 This is equivalent to
1008 b = MAX (a, d);
1009 x = MIN (b, u); */
1011 gimple assign = last_and_only_stmt (middle_bb);
1012 tree lhs, op0, op1, bound;
1014 if (!assign
1015 || gimple_code (assign) != GIMPLE_ASSIGN)
1016 return false;
1018 lhs = gimple_assign_lhs (assign);
1019 ass_code = gimple_assign_rhs_code (assign);
1020 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1021 return false;
1022 op0 = gimple_assign_rhs1 (assign);
1023 op1 = gimple_assign_rhs2 (assign);
1025 if (true_edge->src == middle_bb)
1027 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1028 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1029 return false;
1031 if (operand_equal_for_phi_arg_p (arg_false, larger))
1033 /* Case
1035 if (smaller < larger)
1037 r' = MAX_EXPR (smaller, bound)
1039 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1040 if (ass_code != MAX_EXPR)
1041 return false;
1043 minmax = MIN_EXPR;
1044 if (operand_equal_for_phi_arg_p (op0, smaller))
1045 bound = op1;
1046 else if (operand_equal_for_phi_arg_p (op1, smaller))
1047 bound = op0;
1048 else
1049 return false;
1051 /* We need BOUND <= LARGER. */
1052 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1053 bound, larger)))
1054 return false;
1056 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1058 /* Case
1060 if (smaller < larger)
1062 r' = MIN_EXPR (larger, bound)
1064 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1065 if (ass_code != MIN_EXPR)
1066 return false;
1068 minmax = MAX_EXPR;
1069 if (operand_equal_for_phi_arg_p (op0, larger))
1070 bound = op1;
1071 else if (operand_equal_for_phi_arg_p (op1, larger))
1072 bound = op0;
1073 else
1074 return false;
1076 /* We need BOUND >= SMALLER. */
1077 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1078 bound, smaller)))
1079 return false;
1081 else
1082 return false;
1084 else
1086 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1087 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1088 return false;
1090 if (operand_equal_for_phi_arg_p (arg_true, larger))
1092 /* Case
1094 if (smaller > larger)
1096 r' = MIN_EXPR (smaller, bound)
1098 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1099 if (ass_code != MIN_EXPR)
1100 return false;
1102 minmax = MAX_EXPR;
1103 if (operand_equal_for_phi_arg_p (op0, smaller))
1104 bound = op1;
1105 else if (operand_equal_for_phi_arg_p (op1, smaller))
1106 bound = op0;
1107 else
1108 return false;
1110 /* We need BOUND >= LARGER. */
1111 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1112 bound, larger)))
1113 return false;
1115 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1117 /* Case
1119 if (smaller > larger)
1121 r' = MAX_EXPR (larger, bound)
1123 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1124 if (ass_code != MAX_EXPR)
1125 return false;
1127 minmax = MIN_EXPR;
1128 if (operand_equal_for_phi_arg_p (op0, larger))
1129 bound = op1;
1130 else if (operand_equal_for_phi_arg_p (op1, larger))
1131 bound = op0;
1132 else
1133 return false;
1135 /* We need BOUND <= SMALLER. */
1136 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1137 bound, smaller)))
1138 return false;
1140 else
1141 return false;
1144 /* Move the statement from the middle block. */
1145 gsi = gsi_last_bb (cond_bb);
1146 gsi_from = gsi_last_nondebug_bb (middle_bb);
1147 gsi_move_before (&gsi_from, &gsi);
1150 /* Emit the statement to compute min/max. */
1151 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1152 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
1153 gsi = gsi_last_bb (cond_bb);
1154 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1156 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1157 return true;
1160 /* The function absolute_replacement does the main work of doing the absolute
1161 replacement. Return true if the replacement is done. Otherwise return
1162 false.
1163 bb is the basic block where the replacement is going to be done on. arg0
1164 is argument 0 from the phi. Likewise for arg1. */
1166 static bool
1167 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1168 edge e0 ATTRIBUTE_UNUSED, edge e1,
1169 gimple phi, tree arg0, tree arg1)
1171 tree result;
1172 gimple new_stmt, cond;
1173 gimple_stmt_iterator gsi;
1174 edge true_edge, false_edge;
1175 gimple assign;
1176 edge e;
1177 tree rhs, lhs;
1178 bool negate;
1179 enum tree_code cond_code;
1181 /* If the type says honor signed zeros we cannot do this
1182 optimization. */
1183 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1184 return false;
1186 /* OTHER_BLOCK must have only one executable statement which must have the
1187 form arg0 = -arg1 or arg1 = -arg0. */
1189 assign = last_and_only_stmt (middle_bb);
1190 /* If we did not find the proper negation assignment, then we can not
1191 optimize. */
1192 if (assign == NULL)
1193 return false;
1195 /* If we got here, then we have found the only executable statement
1196 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1197 arg1 = -arg0, then we can not optimize. */
1198 if (gimple_code (assign) != GIMPLE_ASSIGN)
1199 return false;
1201 lhs = gimple_assign_lhs (assign);
1203 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1204 return false;
1206 rhs = gimple_assign_rhs1 (assign);
1208 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1209 if (!(lhs == arg0 && rhs == arg1)
1210 && !(lhs == arg1 && rhs == arg0))
1211 return false;
1213 cond = last_stmt (cond_bb);
1214 result = PHI_RESULT (phi);
1216 /* Only relationals comparing arg[01] against zero are interesting. */
1217 cond_code = gimple_cond_code (cond);
1218 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1219 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1220 return false;
1222 /* Make sure the conditional is arg[01] OP y. */
1223 if (gimple_cond_lhs (cond) != rhs)
1224 return false;
1226 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1227 ? real_zerop (gimple_cond_rhs (cond))
1228 : integer_zerop (gimple_cond_rhs (cond)))
1230 else
1231 return false;
1233 /* We need to know which is the true edge and which is the false
1234 edge so that we know if have abs or negative abs. */
1235 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1237 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1238 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1239 the false edge goes to OTHER_BLOCK. */
1240 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1241 e = true_edge;
1242 else
1243 e = false_edge;
1245 if (e->dest == middle_bb)
1246 negate = true;
1247 else
1248 negate = false;
1250 result = duplicate_ssa_name (result, NULL);
1252 if (negate)
1253 lhs = make_ssa_name (TREE_TYPE (result), NULL);
1254 else
1255 lhs = result;
1257 /* Build the modify expression with abs expression. */
1258 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1260 gsi = gsi_last_bb (cond_bb);
1261 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1263 if (negate)
1265 /* Get the right GSI. We want to insert after the recently
1266 added ABS_EXPR statement (which we know is the first statement
1267 in the block. */
1268 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1270 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1273 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1275 /* Note that we optimized this PHI. */
1276 return true;
1279 /* Auxiliary functions to determine the set of memory accesses which
1280 can't trap because they are preceded by accesses to the same memory
1281 portion. We do that for MEM_REFs, so we only need to track
1282 the SSA_NAME of the pointer indirectly referenced. The algorithm
1283 simply is a walk over all instructions in dominator order. When
1284 we see an MEM_REF we determine if we've already seen a same
1285 ref anywhere up to the root of the dominator tree. If we do the
1286 current access can't trap. If we don't see any dominating access
1287 the current access might trap, but might also make later accesses
1288 non-trapping, so we remember it. We need to be careful with loads
1289 or stores, for instance a load might not trap, while a store would,
1290 so if we see a dominating read access this doesn't mean that a later
1291 write access would not trap. Hence we also need to differentiate the
1292 type of access(es) seen.
1294 ??? We currently are very conservative and assume that a load might
1295 trap even if a store doesn't (write-only memory). This probably is
1296 overly conservative. */
1298 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1299 through it was seen, which would constitute a no-trap region for
1300 same accesses. */
1301 struct name_to_bb
1303 unsigned int ssa_name_ver;
1304 unsigned int phase;
1305 bool store;
1306 HOST_WIDE_INT offset, size;
1307 basic_block bb;
1310 /* Hashtable helpers. */
1312 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1314 typedef name_to_bb value_type;
1315 typedef name_to_bb compare_type;
1316 static inline hashval_t hash (const value_type *);
1317 static inline bool equal (const value_type *, const compare_type *);
1320 /* Used for quick clearing of the hash-table when we see calls.
1321 Hash entries with phase < nt_call_phase are invalid. */
1322 static unsigned int nt_call_phase;
1324 /* The hash function. */
1326 inline hashval_t
1327 ssa_names_hasher::hash (const value_type *n)
1329 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1330 ^ (n->offset << 6) ^ (n->size << 3);
1333 /* The equality function of *P1 and *P2. */
1335 inline bool
1336 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1338 return n1->ssa_name_ver == n2->ssa_name_ver
1339 && n1->store == n2->store
1340 && n1->offset == n2->offset
1341 && n1->size == n2->size;
1344 /* The hash table for remembering what we've seen. */
1345 static hash_table <ssa_names_hasher> seen_ssa_names;
1347 /* We see the expression EXP in basic block BB. If it's an interesting
1348 expression (an MEM_REF through an SSA_NAME) possibly insert the
1349 expression into the set NONTRAP or the hash table of seen expressions.
1350 STORE is true if this expression is on the LHS, otherwise it's on
1351 the RHS. */
1352 static void
1353 add_or_mark_expr (basic_block bb, tree exp,
1354 struct pointer_set_t *nontrap, bool store)
1356 HOST_WIDE_INT size;
1358 if (TREE_CODE (exp) == MEM_REF
1359 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1360 && host_integerp (TREE_OPERAND (exp, 1), 0)
1361 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1363 tree name = TREE_OPERAND (exp, 0);
1364 struct name_to_bb map;
1365 name_to_bb **slot;
1366 struct name_to_bb *n2bb;
1367 basic_block found_bb = 0;
1369 /* Try to find the last seen MEM_REF through the same
1370 SSA_NAME, which can trap. */
1371 map.ssa_name_ver = SSA_NAME_VERSION (name);
1372 map.phase = 0;
1373 map.bb = 0;
1374 map.store = store;
1375 map.offset = tree_low_cst (TREE_OPERAND (exp, 1), 0);
1376 map.size = size;
1378 slot = seen_ssa_names.find_slot (&map, INSERT);
1379 n2bb = *slot;
1380 if (n2bb && n2bb->phase >= nt_call_phase)
1381 found_bb = n2bb->bb;
1383 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1384 (it's in a basic block on the path from us to the dominator root)
1385 then we can't trap. */
1386 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1388 pointer_set_insert (nontrap, exp);
1390 else
1392 /* EXP might trap, so insert it into the hash table. */
1393 if (n2bb)
1395 n2bb->phase = nt_call_phase;
1396 n2bb->bb = bb;
1398 else
1400 n2bb = XNEW (struct name_to_bb);
1401 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1402 n2bb->phase = nt_call_phase;
1403 n2bb->bb = bb;
1404 n2bb->store = store;
1405 n2bb->offset = map.offset;
1406 n2bb->size = size;
1407 *slot = n2bb;
1413 class nontrapping_dom_walker : public dom_walker
1415 public:
1416 nontrapping_dom_walker (cdi_direction direction, pointer_set_t *ps)
1417 : dom_walker (direction), m_nontrapping (ps) {}
1419 virtual void before_dom_children (basic_block);
1420 virtual void after_dom_children (basic_block);
1422 private:
1423 pointer_set_t *m_nontrapping;
1426 /* Called by walk_dominator_tree, when entering the block BB. */
1427 void
1428 nontrapping_dom_walker::before_dom_children (basic_block bb)
1430 edge e;
1431 edge_iterator ei;
1432 gimple_stmt_iterator gsi;
1434 /* If we haven't seen all our predecessors, clear the hash-table. */
1435 FOR_EACH_EDGE (e, ei, bb->preds)
1436 if ((((size_t)e->src->aux) & 2) == 0)
1438 nt_call_phase++;
1439 break;
1442 /* Mark this BB as being on the path to dominator root and as visited. */
1443 bb->aux = (void*)(1 | 2);
1445 /* And walk the statements in order. */
1446 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1448 gimple stmt = gsi_stmt (gsi);
1450 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1451 nt_call_phase++;
1452 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1454 add_or_mark_expr (bb, gimple_assign_lhs (stmt), m_nontrapping, true);
1455 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), m_nontrapping, false);
1460 /* Called by walk_dominator_tree, when basic block BB is exited. */
1461 void
1462 nontrapping_dom_walker::after_dom_children (basic_block bb)
1464 /* This BB isn't on the path to dominator root anymore. */
1465 bb->aux = (void*)2;
1468 /* This is the entry point of gathering non trapping memory accesses.
1469 It will do a dominator walk over the whole function, and it will
1470 make use of the bb->aux pointers. It returns a set of trees
1471 (the MEM_REFs itself) which can't trap. */
1472 static struct pointer_set_t *
1473 get_non_trapping (void)
1475 nt_call_phase = 0;
1476 pointer_set_t *nontrap = pointer_set_create ();
1477 seen_ssa_names.create (128);
1478 /* We're going to do a dominator walk, so ensure that we have
1479 dominance information. */
1480 calculate_dominance_info (CDI_DOMINATORS);
1482 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1483 .walk (cfun->cfg->x_entry_block_ptr);
1485 seen_ssa_names.dispose ();
1487 clear_aux_for_blocks ();
1488 return nontrap;
1491 /* Do the main work of conditional store replacement. We already know
1492 that the recognized pattern looks like so:
1494 split:
1495 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1496 MIDDLE_BB:
1497 something
1498 fallthrough (edge E0)
1499 JOIN_BB:
1500 some more
1502 We check that MIDDLE_BB contains only one store, that that store
1503 doesn't trap (not via NOTRAP, but via checking if an access to the same
1504 memory location dominates us) and that the store has a "simple" RHS. */
1506 static bool
1507 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1508 edge e0, edge e1, struct pointer_set_t *nontrap)
1510 gimple assign = last_and_only_stmt (middle_bb);
1511 tree lhs, rhs, name, name2;
1512 gimple newphi, new_stmt;
1513 gimple_stmt_iterator gsi;
1514 source_location locus;
1516 /* Check if middle_bb contains of only one store. */
1517 if (!assign
1518 || !gimple_assign_single_p (assign)
1519 || gimple_has_volatile_ops (assign))
1520 return false;
1522 locus = gimple_location (assign);
1523 lhs = gimple_assign_lhs (assign);
1524 rhs = gimple_assign_rhs1 (assign);
1525 if (TREE_CODE (lhs) != MEM_REF
1526 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1527 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1528 return false;
1530 /* Prove that we can move the store down. We could also check
1531 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1532 whose value is not available readily, which we want to avoid. */
1533 if (!pointer_set_contains (nontrap, lhs))
1534 return false;
1536 /* Now we've checked the constraints, so do the transformation:
1537 1) Remove the single store. */
1538 gsi = gsi_for_stmt (assign);
1539 unlink_stmt_vdef (assign);
1540 gsi_remove (&gsi, true);
1541 release_defs (assign);
1543 /* 2) Insert a load from the memory of the store to the temporary
1544 on the edge which did not contain the store. */
1545 lhs = unshare_expr (lhs);
1546 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1547 new_stmt = gimple_build_assign (name, lhs);
1548 gimple_set_location (new_stmt, locus);
1549 gsi_insert_on_edge (e1, new_stmt);
1551 /* 3) Create a PHI node at the join block, with one argument
1552 holding the old RHS, and the other holding the temporary
1553 where we stored the old memory contents. */
1554 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1555 newphi = create_phi_node (name2, join_bb);
1556 add_phi_arg (newphi, rhs, e0, locus);
1557 add_phi_arg (newphi, name, e1, locus);
1559 lhs = unshare_expr (lhs);
1560 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1562 /* 4) Insert that PHI node. */
1563 gsi = gsi_after_labels (join_bb);
1564 if (gsi_end_p (gsi))
1566 gsi = gsi_last_bb (join_bb);
1567 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1569 else
1570 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1572 return true;
1575 /* Do the main work of conditional store replacement. */
1577 static bool
1578 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1579 basic_block join_bb, gimple then_assign,
1580 gimple else_assign)
1582 tree lhs_base, lhs, then_rhs, else_rhs, name;
1583 source_location then_locus, else_locus;
1584 gimple_stmt_iterator gsi;
1585 gimple newphi, new_stmt;
1587 if (then_assign == NULL
1588 || !gimple_assign_single_p (then_assign)
1589 || gimple_clobber_p (then_assign)
1590 || gimple_has_volatile_ops (then_assign)
1591 || else_assign == NULL
1592 || !gimple_assign_single_p (else_assign)
1593 || gimple_clobber_p (else_assign)
1594 || gimple_has_volatile_ops (else_assign))
1595 return false;
1597 lhs = gimple_assign_lhs (then_assign);
1598 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1599 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1600 return false;
1602 lhs_base = get_base_address (lhs);
1603 if (lhs_base == NULL_TREE
1604 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1605 return false;
1607 then_rhs = gimple_assign_rhs1 (then_assign);
1608 else_rhs = gimple_assign_rhs1 (else_assign);
1609 then_locus = gimple_location (then_assign);
1610 else_locus = gimple_location (else_assign);
1612 /* Now we've checked the constraints, so do the transformation:
1613 1) Remove the stores. */
1614 gsi = gsi_for_stmt (then_assign);
1615 unlink_stmt_vdef (then_assign);
1616 gsi_remove (&gsi, true);
1617 release_defs (then_assign);
1619 gsi = gsi_for_stmt (else_assign);
1620 unlink_stmt_vdef (else_assign);
1621 gsi_remove (&gsi, true);
1622 release_defs (else_assign);
1624 /* 2) Create a PHI node at the join block, with one argument
1625 holding the old RHS, and the other holding the temporary
1626 where we stored the old memory contents. */
1627 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1628 newphi = create_phi_node (name, join_bb);
1629 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1630 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1632 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1634 /* 3) Insert that PHI node. */
1635 gsi = gsi_after_labels (join_bb);
1636 if (gsi_end_p (gsi))
1638 gsi = gsi_last_bb (join_bb);
1639 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1641 else
1642 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1644 return true;
1647 /* Conditional store replacement. We already know
1648 that the recognized pattern looks like so:
1650 split:
1651 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1652 THEN_BB:
1654 X = Y;
1656 goto JOIN_BB;
1657 ELSE_BB:
1659 X = Z;
1661 fallthrough (edge E0)
1662 JOIN_BB:
1663 some more
1665 We check that it is safe to sink the store to JOIN_BB by verifying that
1666 there are no read-after-write or write-after-write dependencies in
1667 THEN_BB and ELSE_BB. */
1669 static bool
1670 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1671 basic_block join_bb)
1673 gimple then_assign = last_and_only_stmt (then_bb);
1674 gimple else_assign = last_and_only_stmt (else_bb);
1675 vec<data_reference_p> then_datarefs, else_datarefs;
1676 vec<ddr_p> then_ddrs, else_ddrs;
1677 gimple then_store, else_store;
1678 bool found, ok = false, res;
1679 struct data_dependence_relation *ddr;
1680 data_reference_p then_dr, else_dr;
1681 int i, j;
1682 tree then_lhs, else_lhs;
1683 basic_block blocks[3];
1685 if (MAX_STORES_TO_SINK == 0)
1686 return false;
1688 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1689 if (then_assign && else_assign)
1690 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1691 then_assign, else_assign);
1693 /* Find data references. */
1694 then_datarefs.create (1);
1695 else_datarefs.create (1);
1696 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1697 == chrec_dont_know)
1698 || !then_datarefs.length ()
1699 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1700 == chrec_dont_know)
1701 || !else_datarefs.length ())
1703 free_data_refs (then_datarefs);
1704 free_data_refs (else_datarefs);
1705 return false;
1708 /* Find pairs of stores with equal LHS. */
1709 stack_vec<gimple, 1> then_stores, else_stores;
1710 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1712 if (DR_IS_READ (then_dr))
1713 continue;
1715 then_store = DR_STMT (then_dr);
1716 then_lhs = gimple_get_lhs (then_store);
1717 found = false;
1719 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1721 if (DR_IS_READ (else_dr))
1722 continue;
1724 else_store = DR_STMT (else_dr);
1725 else_lhs = gimple_get_lhs (else_store);
1727 if (operand_equal_p (then_lhs, else_lhs, 0))
1729 found = true;
1730 break;
1734 if (!found)
1735 continue;
1737 then_stores.safe_push (then_store);
1738 else_stores.safe_push (else_store);
1741 /* No pairs of stores found. */
1742 if (!then_stores.length ()
1743 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1745 free_data_refs (then_datarefs);
1746 free_data_refs (else_datarefs);
1747 return false;
1750 /* Compute and check data dependencies in both basic blocks. */
1751 then_ddrs.create (1);
1752 else_ddrs.create (1);
1753 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1754 vNULL, false)
1755 || !compute_all_dependences (else_datarefs, &else_ddrs,
1756 vNULL, false))
1758 free_dependence_relations (then_ddrs);
1759 free_dependence_relations (else_ddrs);
1760 free_data_refs (then_datarefs);
1761 free_data_refs (else_datarefs);
1762 return false;
1764 blocks[0] = then_bb;
1765 blocks[1] = else_bb;
1766 blocks[2] = join_bb;
1767 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1769 /* Check that there are no read-after-write or write-after-write dependencies
1770 in THEN_BB. */
1771 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1773 struct data_reference *dra = DDR_A (ddr);
1774 struct data_reference *drb = DDR_B (ddr);
1776 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1777 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1778 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1779 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1780 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1781 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1783 free_dependence_relations (then_ddrs);
1784 free_dependence_relations (else_ddrs);
1785 free_data_refs (then_datarefs);
1786 free_data_refs (else_datarefs);
1787 return false;
1791 /* Check that there are no read-after-write or write-after-write dependencies
1792 in ELSE_BB. */
1793 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1795 struct data_reference *dra = DDR_A (ddr);
1796 struct data_reference *drb = DDR_B (ddr);
1798 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1799 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1800 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1801 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1802 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1803 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1805 free_dependence_relations (then_ddrs);
1806 free_dependence_relations (else_ddrs);
1807 free_data_refs (then_datarefs);
1808 free_data_refs (else_datarefs);
1809 return false;
1813 /* Sink stores with same LHS. */
1814 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1816 else_store = else_stores[i];
1817 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1818 then_store, else_store);
1819 ok = ok || res;
1822 free_dependence_relations (then_ddrs);
1823 free_dependence_relations (else_ddrs);
1824 free_data_refs (then_datarefs);
1825 free_data_refs (else_datarefs);
1827 return ok;
1830 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1832 static bool
1833 local_mem_dependence (gimple stmt, basic_block bb)
1835 tree vuse = gimple_vuse (stmt);
1836 gimple def;
1838 if (!vuse)
1839 return false;
1841 def = SSA_NAME_DEF_STMT (vuse);
1842 return (def && gimple_bb (def) == bb);
1845 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1846 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1847 and BB3 rejoins control flow following BB1 and BB2, look for
1848 opportunities to hoist loads as follows. If BB3 contains a PHI of
1849 two loads, one each occurring in BB1 and BB2, and the loads are
1850 provably of adjacent fields in the same structure, then move both
1851 loads into BB0. Of course this can only be done if there are no
1852 dependencies preventing such motion.
1854 One of the hoisted loads will always be speculative, so the
1855 transformation is currently conservative:
1857 - The fields must be strictly adjacent.
1858 - The two fields must occupy a single memory block that is
1859 guaranteed to not cross a page boundary.
1861 The last is difficult to prove, as such memory blocks should be
1862 aligned on the minimum of the stack alignment boundary and the
1863 alignment guaranteed by heap allocation interfaces. Thus we rely
1864 on a parameter for the alignment value.
1866 Provided a good value is used for the last case, the first
1867 restriction could possibly be relaxed. */
1869 static void
1870 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
1871 basic_block bb2, basic_block bb3)
1873 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
1874 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
1875 gimple_stmt_iterator gsi;
1877 /* Walk the phis in bb3 looking for an opportunity. We are looking
1878 for phis of two SSA names, one each of which is defined in bb1 and
1879 bb2. */
1880 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
1882 gimple phi_stmt = gsi_stmt (gsi);
1883 gimple def1, def2, defswap;
1884 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
1885 tree tree_offset1, tree_offset2, tree_size2, next;
1886 int offset1, offset2, size2;
1887 unsigned align1;
1888 gimple_stmt_iterator gsi2;
1889 basic_block bb_for_def1, bb_for_def2;
1891 if (gimple_phi_num_args (phi_stmt) != 2
1892 || virtual_operand_p (gimple_phi_result (phi_stmt)))
1893 continue;
1895 arg1 = gimple_phi_arg_def (phi_stmt, 0);
1896 arg2 = gimple_phi_arg_def (phi_stmt, 1);
1898 if (TREE_CODE (arg1) != SSA_NAME
1899 || TREE_CODE (arg2) != SSA_NAME
1900 || SSA_NAME_IS_DEFAULT_DEF (arg1)
1901 || SSA_NAME_IS_DEFAULT_DEF (arg2))
1902 continue;
1904 def1 = SSA_NAME_DEF_STMT (arg1);
1905 def2 = SSA_NAME_DEF_STMT (arg2);
1907 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
1908 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
1909 continue;
1911 /* Check the mode of the arguments to be sure a conditional move
1912 can be generated for it. */
1913 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
1914 == CODE_FOR_nothing)
1915 continue;
1917 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
1918 if (!gimple_assign_single_p (def1)
1919 || !gimple_assign_single_p (def2)
1920 || gimple_has_volatile_ops (def1)
1921 || gimple_has_volatile_ops (def2))
1922 continue;
1924 ref1 = gimple_assign_rhs1 (def1);
1925 ref2 = gimple_assign_rhs1 (def2);
1927 if (TREE_CODE (ref1) != COMPONENT_REF
1928 || TREE_CODE (ref2) != COMPONENT_REF)
1929 continue;
1931 /* The zeroth operand of the two component references must be
1932 identical. It is not sufficient to compare get_base_address of
1933 the two references, because this could allow for different
1934 elements of the same array in the two trees. It is not safe to
1935 assume that the existence of one array element implies the
1936 existence of a different one. */
1937 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
1938 continue;
1940 field1 = TREE_OPERAND (ref1, 1);
1941 field2 = TREE_OPERAND (ref2, 1);
1943 /* Check for field adjacency, and ensure field1 comes first. */
1944 for (next = DECL_CHAIN (field1);
1945 next && TREE_CODE (next) != FIELD_DECL;
1946 next = DECL_CHAIN (next))
1949 if (next != field2)
1951 for (next = DECL_CHAIN (field2);
1952 next && TREE_CODE (next) != FIELD_DECL;
1953 next = DECL_CHAIN (next))
1956 if (next != field1)
1957 continue;
1959 fieldswap = field1;
1960 field1 = field2;
1961 field2 = fieldswap;
1962 defswap = def1;
1963 def1 = def2;
1964 def2 = defswap;
1967 bb_for_def1 = gimple_bb (def1);
1968 bb_for_def2 = gimple_bb (def2);
1970 /* Check for proper alignment of the first field. */
1971 tree_offset1 = bit_position (field1);
1972 tree_offset2 = bit_position (field2);
1973 tree_size2 = DECL_SIZE (field2);
1975 if (!host_integerp (tree_offset1, 1)
1976 || !host_integerp (tree_offset2, 1)
1977 || !host_integerp (tree_size2, 1))
1978 continue;
1980 offset1 = TREE_INT_CST_LOW (tree_offset1);
1981 offset2 = TREE_INT_CST_LOW (tree_offset2);
1982 size2 = TREE_INT_CST_LOW (tree_size2);
1983 align1 = DECL_ALIGN (field1) % param_align_bits;
1985 if (offset1 % BITS_PER_UNIT != 0)
1986 continue;
1988 /* For profitability, the two field references should fit within
1989 a single cache line. */
1990 if (align1 + offset2 - offset1 + size2 > param_align_bits)
1991 continue;
1993 /* The two expressions cannot be dependent upon vdefs defined
1994 in bb1/bb2. */
1995 if (local_mem_dependence (def1, bb_for_def1)
1996 || local_mem_dependence (def2, bb_for_def2))
1997 continue;
1999 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2000 bb0. We hoist the first one first so that a cache miss is handled
2001 efficiently regardless of hardware cache-fill policy. */
2002 gsi2 = gsi_for_stmt (def1);
2003 gsi_move_to_bb_end (&gsi2, bb0);
2004 gsi2 = gsi_for_stmt (def2);
2005 gsi_move_to_bb_end (&gsi2, bb0);
2007 if (dump_file && (dump_flags & TDF_DETAILS))
2009 fprintf (dump_file,
2010 "\nHoisting adjacent loads from %d and %d into %d: \n",
2011 bb_for_def1->index, bb_for_def2->index, bb0->index);
2012 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2013 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2018 /* Determine whether we should attempt to hoist adjacent loads out of
2019 diamond patterns in pass_phiopt. Always hoist loads if
2020 -fhoist-adjacent-loads is specified and the target machine has
2021 both a conditional move instruction and a defined cache line size. */
2023 static bool
2024 gate_hoist_loads (void)
2026 return (flag_hoist_adjacent_loads == 1
2027 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2028 && HAVE_conditional_move);
2031 /* Always do these optimizations if we have SSA
2032 trees to work on. */
2033 static bool
2034 gate_phiopt (void)
2036 return 1;
2039 namespace {
2041 const pass_data pass_data_phiopt =
2043 GIMPLE_PASS, /* type */
2044 "phiopt", /* name */
2045 OPTGROUP_NONE, /* optinfo_flags */
2046 true, /* has_gate */
2047 true, /* has_execute */
2048 TV_TREE_PHIOPT, /* tv_id */
2049 ( PROP_cfg | PROP_ssa ), /* properties_required */
2050 0, /* properties_provided */
2051 0, /* properties_destroyed */
2052 0, /* todo_flags_start */
2053 ( TODO_verify_ssa | TODO_verify_flow
2054 | TODO_verify_stmts ), /* todo_flags_finish */
2057 class pass_phiopt : public gimple_opt_pass
2059 public:
2060 pass_phiopt (gcc::context *ctxt)
2061 : gimple_opt_pass (pass_data_phiopt, ctxt)
2064 /* opt_pass methods: */
2065 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2066 bool gate () { return gate_phiopt (); }
2067 unsigned int execute () { return tree_ssa_phiopt (); }
2069 }; // class pass_phiopt
2071 } // anon namespace
2073 gimple_opt_pass *
2074 make_pass_phiopt (gcc::context *ctxt)
2076 return new pass_phiopt (ctxt);
2079 static bool
2080 gate_cselim (void)
2082 return flag_tree_cselim;
2085 namespace {
2087 const pass_data pass_data_cselim =
2089 GIMPLE_PASS, /* type */
2090 "cselim", /* name */
2091 OPTGROUP_NONE, /* optinfo_flags */
2092 true, /* has_gate */
2093 true, /* has_execute */
2094 TV_TREE_PHIOPT, /* tv_id */
2095 ( PROP_cfg | PROP_ssa ), /* properties_required */
2096 0, /* properties_provided */
2097 0, /* properties_destroyed */
2098 0, /* todo_flags_start */
2099 ( TODO_verify_ssa | TODO_verify_flow
2100 | TODO_verify_stmts ), /* todo_flags_finish */
2103 class pass_cselim : public gimple_opt_pass
2105 public:
2106 pass_cselim (gcc::context *ctxt)
2107 : gimple_opt_pass (pass_data_cselim, ctxt)
2110 /* opt_pass methods: */
2111 bool gate () { return gate_cselim (); }
2112 unsigned int execute () { return tree_ssa_cs_elim (); }
2114 }; // class pass_cselim
2116 } // anon namespace
2118 gimple_opt_pass *
2119 make_pass_cselim (gcc::context *ctxt)
2121 return new pass_cselim (ctxt);