Daily bump.
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobcaa4925cfb503956cc4b8ff3507f382d06c22de2
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "gimplify.h"
31 #include "gimple-iterator.h"
32 #include "gimple-ssa.h"
33 #include "tree-cfg.h"
34 #include "tree-phinodes.h"
35 #include "ssa-iterators.h"
36 #include "tree-ssanames.h"
37 #include "tree-dfa.h"
38 #include "tree-pass.h"
39 #include "langhooks.h"
40 #include "pointer-set.h"
41 #include "domwalk.h"
42 #include "cfgloop.h"
43 #include "tree-data-ref.h"
44 #include "gimple-pretty-print.h"
45 #include "insn-config.h"
46 #include "expr.h"
47 #include "optabs.h"
48 #include "tree-scalar-evolution.h"
50 #ifndef HAVE_conditional_move
51 #define HAVE_conditional_move (0)
52 #endif
54 static unsigned int tree_ssa_phiopt (void);
55 static unsigned int tree_ssa_phiopt_worker (bool, bool);
56 static bool conditional_replacement (basic_block, basic_block,
57 edge, edge, gimple, tree, tree);
58 static int value_replacement (basic_block, basic_block,
59 edge, edge, gimple, tree, tree);
60 static bool minmax_replacement (basic_block, basic_block,
61 edge, edge, gimple, tree, tree);
62 static bool abs_replacement (basic_block, basic_block,
63 edge, edge, gimple, tree, tree);
64 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
65 struct pointer_set_t *);
66 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
67 static struct pointer_set_t * get_non_trapping (void);
68 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
69 static void hoist_adjacent_loads (basic_block, basic_block,
70 basic_block, basic_block);
71 static bool gate_hoist_loads (void);
73 /* This pass tries to replaces an if-then-else block with an
74 assignment. We have four kinds of transformations. Some of these
75 transformations are also performed by the ifcvt RTL optimizer.
77 Conditional Replacement
78 -----------------------
80 This transformation, implemented in conditional_replacement,
81 replaces
83 bb0:
84 if (cond) goto bb2; else goto bb1;
85 bb1:
86 bb2:
87 x = PHI <0 (bb1), 1 (bb0), ...>;
89 with
91 bb0:
92 x' = cond;
93 goto bb2;
94 bb2:
95 x = PHI <x' (bb0), ...>;
97 We remove bb1 as it becomes unreachable. This occurs often due to
98 gimplification of conditionals.
100 Value Replacement
101 -----------------
103 This transformation, implemented in value_replacement, replaces
105 bb0:
106 if (a != b) goto bb2; else goto bb1;
107 bb1:
108 bb2:
109 x = PHI <a (bb1), b (bb0), ...>;
111 with
113 bb0:
114 bb2:
115 x = PHI <b (bb0), ...>;
117 This opportunity can sometimes occur as a result of other
118 optimizations.
121 Another case caught by value replacement looks like this:
123 bb0:
124 t1 = a == CONST;
125 t2 = b > c;
126 t3 = t1 & t2;
127 if (t3 != 0) goto bb1; else goto bb2;
128 bb1:
129 bb2:
130 x = PHI (CONST, a)
132 Gets replaced with:
133 bb0:
134 bb2:
135 t1 = a == CONST;
136 t2 = b > c;
137 t3 = t1 & t2;
138 x = a;
140 ABS Replacement
141 ---------------
143 This transformation, implemented in abs_replacement, replaces
145 bb0:
146 if (a >= 0) goto bb2; else goto bb1;
147 bb1:
148 x = -a;
149 bb2:
150 x = PHI <x (bb1), a (bb0), ...>;
152 with
154 bb0:
155 x' = ABS_EXPR< a >;
156 bb2:
157 x = PHI <x' (bb0), ...>;
159 MIN/MAX Replacement
160 -------------------
162 This transformation, minmax_replacement replaces
164 bb0:
165 if (a <= b) goto bb2; else goto bb1;
166 bb1:
167 bb2:
168 x = PHI <b (bb1), a (bb0), ...>;
170 with
172 bb0:
173 x' = MIN_EXPR (a, b)
174 bb2:
175 x = PHI <x' (bb0), ...>;
177 A similar transformation is done for MAX_EXPR.
180 This pass also performs a fifth transformation of a slightly different
181 flavor.
183 Adjacent Load Hoisting
184 ----------------------
186 This transformation replaces
188 bb0:
189 if (...) goto bb2; else goto bb1;
190 bb1:
191 x1 = (<expr>).field1;
192 goto bb3;
193 bb2:
194 x2 = (<expr>).field2;
195 bb3:
196 # x = PHI <x1, x2>;
198 with
200 bb0:
201 x1 = (<expr>).field1;
202 x2 = (<expr>).field2;
203 if (...) goto bb2; else goto bb1;
204 bb1:
205 goto bb3;
206 bb2:
207 bb3:
208 # x = PHI <x1, x2>;
210 The purpose of this transformation is to enable generation of conditional
211 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
212 the loads is speculative, the transformation is restricted to very
213 specific cases to avoid introducing a page fault. We are looking for
214 the common idiom:
216 if (...)
217 x = y->left;
218 else
219 x = y->right;
221 where left and right are typically adjacent pointers in a tree structure. */
223 static unsigned int
224 tree_ssa_phiopt (void)
226 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
229 /* This pass tries to transform conditional stores into unconditional
230 ones, enabling further simplifications with the simpler then and else
231 blocks. In particular it replaces this:
233 bb0:
234 if (cond) goto bb2; else goto bb1;
235 bb1:
236 *p = RHS;
237 bb2:
239 with
241 bb0:
242 if (cond) goto bb1; else goto bb2;
243 bb1:
244 condtmp' = *p;
245 bb2:
246 condtmp = PHI <RHS, condtmp'>
247 *p = condtmp;
249 This transformation can only be done under several constraints,
250 documented below. It also replaces:
252 bb0:
253 if (cond) goto bb2; else goto bb1;
254 bb1:
255 *p = RHS1;
256 goto bb3;
257 bb2:
258 *p = RHS2;
259 bb3:
261 with
263 bb0:
264 if (cond) goto bb3; else goto bb1;
265 bb1:
266 bb3:
267 condtmp = PHI <RHS1, RHS2>
268 *p = condtmp; */
270 static unsigned int
271 tree_ssa_cs_elim (void)
273 unsigned todo;
274 /* ??? We are not interested in loop related info, but the following
275 will create it, ICEing as we didn't init loops with pre-headers.
276 An interfacing issue of find_data_references_in_bb. */
277 loop_optimizer_init (LOOPS_NORMAL);
278 scev_initialize ();
279 todo = tree_ssa_phiopt_worker (true, false);
280 scev_finalize ();
281 loop_optimizer_finalize ();
282 return todo;
285 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
287 static gimple
288 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
290 gimple_stmt_iterator i;
291 gimple phi = NULL;
292 if (gimple_seq_singleton_p (seq))
293 return gsi_stmt (gsi_start (seq));
294 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
296 gimple p = gsi_stmt (i);
297 /* If the PHI arguments are equal then we can skip this PHI. */
298 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
299 gimple_phi_arg_def (p, e1->dest_idx)))
300 continue;
302 /* If we already have a PHI that has the two edge arguments are
303 different, then return it is not a singleton for these PHIs. */
304 if (phi)
305 return NULL;
307 phi = p;
309 return phi;
312 /* The core routine of conditional store replacement and normal
313 phi optimizations. Both share much of the infrastructure in how
314 to match applicable basic block patterns. DO_STORE_ELIM is true
315 when we want to do conditional store replacement, false otherwise.
316 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
317 of diamond control flow patterns, false otherwise. */
318 static unsigned int
319 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
321 basic_block bb;
322 basic_block *bb_order;
323 unsigned n, i;
324 bool cfgchanged = false;
325 struct pointer_set_t *nontrap = 0;
327 if (do_store_elim)
328 /* Calculate the set of non-trapping memory accesses. */
329 nontrap = get_non_trapping ();
331 /* Search every basic block for COND_EXPR we may be able to optimize.
333 We walk the blocks in order that guarantees that a block with
334 a single predecessor is processed before the predecessor.
335 This ensures that we collapse inner ifs before visiting the
336 outer ones, and also that we do not try to visit a removed
337 block. */
338 bb_order = single_pred_before_succ_order ();
339 n = n_basic_blocks - NUM_FIXED_BLOCKS;
341 for (i = 0; i < n; i++)
343 gimple cond_stmt, phi;
344 basic_block bb1, bb2;
345 edge e1, e2;
346 tree arg0, arg1;
348 bb = bb_order[i];
350 cond_stmt = last_stmt (bb);
351 /* Check to see if the last statement is a GIMPLE_COND. */
352 if (!cond_stmt
353 || gimple_code (cond_stmt) != GIMPLE_COND)
354 continue;
356 e1 = EDGE_SUCC (bb, 0);
357 bb1 = e1->dest;
358 e2 = EDGE_SUCC (bb, 1);
359 bb2 = e2->dest;
361 /* We cannot do the optimization on abnormal edges. */
362 if ((e1->flags & EDGE_ABNORMAL) != 0
363 || (e2->flags & EDGE_ABNORMAL) != 0)
364 continue;
366 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
367 if (EDGE_COUNT (bb1->succs) == 0
368 || bb2 == NULL
369 || EDGE_COUNT (bb2->succs) == 0)
370 continue;
372 /* Find the bb which is the fall through to the other. */
373 if (EDGE_SUCC (bb1, 0)->dest == bb2)
375 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
377 basic_block bb_tmp = bb1;
378 edge e_tmp = e1;
379 bb1 = bb2;
380 bb2 = bb_tmp;
381 e1 = e2;
382 e2 = e_tmp;
384 else if (do_store_elim
385 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
387 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
389 if (!single_succ_p (bb1)
390 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
391 || !single_succ_p (bb2)
392 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
393 || EDGE_COUNT (bb3->preds) != 2)
394 continue;
395 if (cond_if_else_store_replacement (bb1, bb2, bb3))
396 cfgchanged = true;
397 continue;
399 else if (do_hoist_loads
400 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
402 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
404 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
405 && single_succ_p (bb1)
406 && single_succ_p (bb2)
407 && single_pred_p (bb1)
408 && single_pred_p (bb2)
409 && EDGE_COUNT (bb->succs) == 2
410 && EDGE_COUNT (bb3->preds) == 2
411 /* If one edge or the other is dominant, a conditional move
412 is likely to perform worse than the well-predicted branch. */
413 && !predictable_edge_p (EDGE_SUCC (bb, 0))
414 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
415 hoist_adjacent_loads (bb, bb1, bb2, bb3);
416 continue;
418 else
419 continue;
421 e1 = EDGE_SUCC (bb1, 0);
423 /* Make sure that bb1 is just a fall through. */
424 if (!single_succ_p (bb1)
425 || (e1->flags & EDGE_FALLTHRU) == 0)
426 continue;
428 /* Also make sure that bb1 only have one predecessor and that it
429 is bb. */
430 if (!single_pred_p (bb1)
431 || single_pred (bb1) != bb)
432 continue;
434 if (do_store_elim)
436 /* bb1 is the middle block, bb2 the join block, bb the split block,
437 e1 the fallthrough edge from bb1 to bb2. We can't do the
438 optimization if the join block has more than two predecessors. */
439 if (EDGE_COUNT (bb2->preds) > 2)
440 continue;
441 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
442 cfgchanged = true;
444 else
446 gimple_seq phis = phi_nodes (bb2);
447 gimple_stmt_iterator gsi;
448 bool candorest = true;
450 /* Value replacement can work with more than one PHI
451 so try that first. */
452 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
454 phi = gsi_stmt (gsi);
455 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
456 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
457 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
459 candorest = false;
460 cfgchanged = true;
461 break;
465 if (!candorest)
466 continue;
468 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
469 if (!phi)
470 continue;
472 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
473 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
475 /* Something is wrong if we cannot find the arguments in the PHI
476 node. */
477 gcc_assert (arg0 != NULL && arg1 != NULL);
479 /* Do the replacement of conditional if it can be done. */
480 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
481 cfgchanged = true;
482 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
483 cfgchanged = true;
484 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
485 cfgchanged = true;
489 free (bb_order);
491 if (do_store_elim)
492 pointer_set_destroy (nontrap);
493 /* If the CFG has changed, we should cleanup the CFG. */
494 if (cfgchanged && do_store_elim)
496 /* In cond-store replacement we have added some loads on edges
497 and new VOPS (as we moved the store, and created a load). */
498 gsi_commit_edge_inserts ();
499 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
501 else if (cfgchanged)
502 return TODO_cleanup_cfg;
503 return 0;
506 /* Replace PHI node element whose edge is E in block BB with variable NEW.
507 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
508 is known to have two edges, one of which must reach BB). */
510 static void
511 replace_phi_edge_with_variable (basic_block cond_block,
512 edge e, gimple phi, tree new_tree)
514 basic_block bb = gimple_bb (phi);
515 basic_block block_to_remove;
516 gimple_stmt_iterator gsi;
518 /* Change the PHI argument to new. */
519 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
521 /* Remove the empty basic block. */
522 if (EDGE_SUCC (cond_block, 0)->dest == bb)
524 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
525 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
526 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
527 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
529 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
531 else
533 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
534 EDGE_SUCC (cond_block, 1)->flags
535 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
536 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
537 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
539 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
541 delete_basic_block (block_to_remove);
543 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
544 gsi = gsi_last_bb (cond_block);
545 gsi_remove (&gsi, true);
547 if (dump_file && (dump_flags & TDF_DETAILS))
548 fprintf (dump_file,
549 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
550 cond_block->index,
551 bb->index);
554 /* The function conditional_replacement does the main work of doing the
555 conditional replacement. Return true if the replacement is done.
556 Otherwise return false.
557 BB is the basic block where the replacement is going to be done on. ARG0
558 is argument 0 from PHI. Likewise for ARG1. */
560 static bool
561 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
562 edge e0, edge e1, gimple phi,
563 tree arg0, tree arg1)
565 tree result;
566 gimple stmt, new_stmt;
567 tree cond;
568 gimple_stmt_iterator gsi;
569 edge true_edge, false_edge;
570 tree new_var, new_var2;
571 bool neg;
573 /* FIXME: Gimplification of complex type is too hard for now. */
574 /* We aren't prepared to handle vectors either (and it is a question
575 if it would be worthwhile anyway). */
576 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
577 || POINTER_TYPE_P (TREE_TYPE (arg0)))
578 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
579 || POINTER_TYPE_P (TREE_TYPE (arg1))))
580 return false;
582 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
583 convert it to the conditional. */
584 if ((integer_zerop (arg0) && integer_onep (arg1))
585 || (integer_zerop (arg1) && integer_onep (arg0)))
586 neg = false;
587 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
588 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
589 neg = true;
590 else
591 return false;
593 if (!empty_block_p (middle_bb))
594 return false;
596 /* At this point we know we have a GIMPLE_COND with two successors.
597 One successor is BB, the other successor is an empty block which
598 falls through into BB.
600 There is a single PHI node at the join point (BB) and its arguments
601 are constants (0, 1) or (0, -1).
603 So, given the condition COND, and the two PHI arguments, we can
604 rewrite this PHI into non-branching code:
606 dest = (COND) or dest = COND'
608 We use the condition as-is if the argument associated with the
609 true edge has the value one or the argument associated with the
610 false edge as the value zero. Note that those conditions are not
611 the same since only one of the outgoing edges from the GIMPLE_COND
612 will directly reach BB and thus be associated with an argument. */
614 stmt = last_stmt (cond_bb);
615 result = PHI_RESULT (phi);
617 /* To handle special cases like floating point comparison, it is easier and
618 less error-prone to build a tree and gimplify it on the fly though it is
619 less efficient. */
620 cond = fold_build2_loc (gimple_location (stmt),
621 gimple_cond_code (stmt), boolean_type_node,
622 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
624 /* We need to know which is the true edge and which is the false
625 edge so that we know when to invert the condition below. */
626 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
627 if ((e0 == true_edge && integer_zerop (arg0))
628 || (e0 == false_edge && !integer_zerop (arg0))
629 || (e1 == true_edge && integer_zerop (arg1))
630 || (e1 == false_edge && !integer_zerop (arg1)))
631 cond = fold_build1_loc (gimple_location (stmt),
632 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
634 if (neg)
636 cond = fold_convert_loc (gimple_location (stmt),
637 TREE_TYPE (result), cond);
638 cond = fold_build1_loc (gimple_location (stmt),
639 NEGATE_EXPR, TREE_TYPE (cond), cond);
642 /* Insert our new statements at the end of conditional block before the
643 COND_STMT. */
644 gsi = gsi_for_stmt (stmt);
645 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
646 GSI_SAME_STMT);
648 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
650 source_location locus_0, locus_1;
652 new_var2 = make_ssa_name (TREE_TYPE (result), NULL);
653 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
654 new_var, NULL);
655 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
656 new_var = new_var2;
658 /* Set the locus to the first argument, unless is doesn't have one. */
659 locus_0 = gimple_phi_arg_location (phi, 0);
660 locus_1 = gimple_phi_arg_location (phi, 1);
661 if (locus_0 == UNKNOWN_LOCATION)
662 locus_0 = locus_1;
663 gimple_set_location (new_stmt, locus_0);
666 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
668 /* Note that we optimized this PHI. */
669 return true;
672 /* Update *ARG which is defined in STMT so that it contains the
673 computed value if that seems profitable. Return true if the
674 statement is made dead by that rewriting. */
676 static bool
677 jump_function_from_stmt (tree *arg, gimple stmt)
679 enum tree_code code = gimple_assign_rhs_code (stmt);
680 if (code == ADDR_EXPR)
682 /* For arg = &p->i transform it to p, if possible. */
683 tree rhs1 = gimple_assign_rhs1 (stmt);
684 HOST_WIDE_INT offset;
685 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
686 &offset);
687 if (tem
688 && TREE_CODE (tem) == MEM_REF
689 && (mem_ref_offset (tem) + double_int::from_shwi (offset)).is_zero ())
691 *arg = TREE_OPERAND (tem, 0);
692 return true;
695 /* TODO: Much like IPA-CP jump-functions we want to handle constant
696 additions symbolically here, and we'd need to update the comparison
697 code that compares the arg + cst tuples in our caller. For now the
698 code above exactly handles the VEC_BASE pattern from vec.h. */
699 return false;
702 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
703 of the form SSA_NAME NE 0.
705 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
706 the two input values of the EQ_EXPR match arg0 and arg1.
708 If so update *code and return TRUE. Otherwise return FALSE. */
710 static bool
711 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
712 enum tree_code *code, const_tree rhs)
714 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
715 statement. */
716 if (TREE_CODE (rhs) == SSA_NAME)
718 gimple def1 = SSA_NAME_DEF_STMT (rhs);
720 /* Verify the defining statement has an EQ_EXPR on the RHS. */
721 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
723 /* Finally verify the source operands of the EQ_EXPR are equal
724 to arg0 and arg1. */
725 tree op0 = gimple_assign_rhs1 (def1);
726 tree op1 = gimple_assign_rhs2 (def1);
727 if ((operand_equal_for_phi_arg_p (arg0, op0)
728 && operand_equal_for_phi_arg_p (arg1, op1))
729 || (operand_equal_for_phi_arg_p (arg0, op1)
730 && operand_equal_for_phi_arg_p (arg1, op0)))
732 /* We will perform the optimization. */
733 *code = gimple_assign_rhs_code (def1);
734 return true;
738 return false;
741 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
743 Also return TRUE if arg0/arg1 are equal to the source arguments of a
744 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
746 Return FALSE otherwise. */
748 static bool
749 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
750 enum tree_code *code, gimple cond)
752 gimple def;
753 tree lhs = gimple_cond_lhs (cond);
754 tree rhs = gimple_cond_rhs (cond);
756 if ((operand_equal_for_phi_arg_p (arg0, lhs)
757 && operand_equal_for_phi_arg_p (arg1, rhs))
758 || (operand_equal_for_phi_arg_p (arg1, lhs)
759 && operand_equal_for_phi_arg_p (arg0, rhs)))
760 return true;
762 /* Now handle more complex case where we have an EQ comparison
763 which feeds a BIT_AND_EXPR which feeds COND.
765 First verify that COND is of the form SSA_NAME NE 0. */
766 if (*code != NE_EXPR || !integer_zerop (rhs)
767 || TREE_CODE (lhs) != SSA_NAME)
768 return false;
770 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
771 def = SSA_NAME_DEF_STMT (lhs);
772 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
773 return false;
775 /* Now verify arg0/arg1 correspond to the source arguments of an
776 EQ comparison feeding the BIT_AND_EXPR. */
778 tree tmp = gimple_assign_rhs1 (def);
779 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
780 return true;
782 tmp = gimple_assign_rhs2 (def);
783 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
784 return true;
786 return false;
789 /* The function value_replacement does the main work of doing the value
790 replacement. Return non-zero if the replacement is done. Otherwise return
791 0. If we remove the middle basic block, return 2.
792 BB is the basic block where the replacement is going to be done on. ARG0
793 is argument 0 from the PHI. Likewise for ARG1. */
795 static int
796 value_replacement (basic_block cond_bb, basic_block middle_bb,
797 edge e0, edge e1, gimple phi,
798 tree arg0, tree arg1)
800 gimple_stmt_iterator gsi;
801 gimple cond;
802 edge true_edge, false_edge;
803 enum tree_code code;
804 bool emtpy_or_with_defined_p = true;
806 /* If the type says honor signed zeros we cannot do this
807 optimization. */
808 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
809 return 0;
811 /* If there is a statement in MIDDLE_BB that defines one of the PHI
812 arguments, then adjust arg0 or arg1. */
813 gsi = gsi_after_labels (middle_bb);
814 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
815 gsi_next_nondebug (&gsi);
816 while (!gsi_end_p (gsi))
818 gimple stmt = gsi_stmt (gsi);
819 tree lhs;
820 gsi_next_nondebug (&gsi);
821 if (!is_gimple_assign (stmt))
823 emtpy_or_with_defined_p = false;
824 continue;
826 /* Now try to adjust arg0 or arg1 according to the computation
827 in the statement. */
828 lhs = gimple_assign_lhs (stmt);
829 if (!(lhs == arg0
830 && jump_function_from_stmt (&arg0, stmt))
831 || (lhs == arg1
832 && jump_function_from_stmt (&arg1, stmt)))
833 emtpy_or_with_defined_p = false;
836 cond = last_stmt (cond_bb);
837 code = gimple_cond_code (cond);
839 /* This transformation is only valid for equality comparisons. */
840 if (code != NE_EXPR && code != EQ_EXPR)
841 return 0;
843 /* We need to know which is the true edge and which is the false
844 edge so that we know if have abs or negative abs. */
845 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
847 /* At this point we know we have a COND_EXPR with two successors.
848 One successor is BB, the other successor is an empty block which
849 falls through into BB.
851 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
853 There is a single PHI node at the join point (BB) with two arguments.
855 We now need to verify that the two arguments in the PHI node match
856 the two arguments to the equality comparison. */
858 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
860 edge e;
861 tree arg;
863 /* For NE_EXPR, we want to build an assignment result = arg where
864 arg is the PHI argument associated with the true edge. For
865 EQ_EXPR we want the PHI argument associated with the false edge. */
866 e = (code == NE_EXPR ? true_edge : false_edge);
868 /* Unfortunately, E may not reach BB (it may instead have gone to
869 OTHER_BLOCK). If that is the case, then we want the single outgoing
870 edge from OTHER_BLOCK which reaches BB and represents the desired
871 path from COND_BLOCK. */
872 if (e->dest == middle_bb)
873 e = single_succ_edge (e->dest);
875 /* Now we know the incoming edge to BB that has the argument for the
876 RHS of our new assignment statement. */
877 if (e0 == e)
878 arg = arg0;
879 else
880 arg = arg1;
882 /* If the middle basic block was empty or is defining the
883 PHI arguments and this is a single phi where the args are different
884 for the edges e0 and e1 then we can remove the middle basic block. */
885 if (emtpy_or_with_defined_p
886 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
887 e0, e1))
889 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
890 /* Note that we optimized this PHI. */
891 return 2;
893 else
895 /* Replace the PHI arguments with arg. */
896 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
897 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
898 if (dump_file && (dump_flags & TDF_DETAILS))
900 fprintf (dump_file, "PHI ");
901 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
902 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
903 cond_bb->index);
904 print_generic_expr (dump_file, arg, 0);
905 fprintf (dump_file, ".\n");
907 return 1;
911 return 0;
914 /* The function minmax_replacement does the main work of doing the minmax
915 replacement. Return true if the replacement is done. Otherwise return
916 false.
917 BB is the basic block where the replacement is going to be done on. ARG0
918 is argument 0 from the PHI. Likewise for ARG1. */
920 static bool
921 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
922 edge e0, edge e1, gimple phi,
923 tree arg0, tree arg1)
925 tree result, type;
926 gimple cond, new_stmt;
927 edge true_edge, false_edge;
928 enum tree_code cmp, minmax, ass_code;
929 tree smaller, larger, arg_true, arg_false;
930 gimple_stmt_iterator gsi, gsi_from;
932 type = TREE_TYPE (PHI_RESULT (phi));
934 /* The optimization may be unsafe due to NaNs. */
935 if (HONOR_NANS (TYPE_MODE (type)))
936 return false;
938 cond = last_stmt (cond_bb);
939 cmp = gimple_cond_code (cond);
941 /* This transformation is only valid for order comparisons. Record which
942 operand is smaller/larger if the result of the comparison is true. */
943 if (cmp == LT_EXPR || cmp == LE_EXPR)
945 smaller = gimple_cond_lhs (cond);
946 larger = gimple_cond_rhs (cond);
948 else if (cmp == GT_EXPR || cmp == GE_EXPR)
950 smaller = gimple_cond_rhs (cond);
951 larger = gimple_cond_lhs (cond);
953 else
954 return false;
956 /* We need to know which is the true edge and which is the false
957 edge so that we know if have abs or negative abs. */
958 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
960 /* Forward the edges over the middle basic block. */
961 if (true_edge->dest == middle_bb)
962 true_edge = EDGE_SUCC (true_edge->dest, 0);
963 if (false_edge->dest == middle_bb)
964 false_edge = EDGE_SUCC (false_edge->dest, 0);
966 if (true_edge == e0)
968 gcc_assert (false_edge == e1);
969 arg_true = arg0;
970 arg_false = arg1;
972 else
974 gcc_assert (false_edge == e0);
975 gcc_assert (true_edge == e1);
976 arg_true = arg1;
977 arg_false = arg0;
980 if (empty_block_p (middle_bb))
982 if (operand_equal_for_phi_arg_p (arg_true, smaller)
983 && operand_equal_for_phi_arg_p (arg_false, larger))
985 /* Case
987 if (smaller < larger)
988 rslt = smaller;
989 else
990 rslt = larger; */
991 minmax = MIN_EXPR;
993 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
994 && operand_equal_for_phi_arg_p (arg_true, larger))
995 minmax = MAX_EXPR;
996 else
997 return false;
999 else
1001 /* Recognize the following case, assuming d <= u:
1003 if (a <= u)
1004 b = MAX (a, d);
1005 x = PHI <b, u>
1007 This is equivalent to
1009 b = MAX (a, d);
1010 x = MIN (b, u); */
1012 gimple assign = last_and_only_stmt (middle_bb);
1013 tree lhs, op0, op1, bound;
1015 if (!assign
1016 || gimple_code (assign) != GIMPLE_ASSIGN)
1017 return false;
1019 lhs = gimple_assign_lhs (assign);
1020 ass_code = gimple_assign_rhs_code (assign);
1021 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1022 return false;
1023 op0 = gimple_assign_rhs1 (assign);
1024 op1 = gimple_assign_rhs2 (assign);
1026 if (true_edge->src == middle_bb)
1028 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1029 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1030 return false;
1032 if (operand_equal_for_phi_arg_p (arg_false, larger))
1034 /* Case
1036 if (smaller < larger)
1038 r' = MAX_EXPR (smaller, bound)
1040 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1041 if (ass_code != MAX_EXPR)
1042 return false;
1044 minmax = MIN_EXPR;
1045 if (operand_equal_for_phi_arg_p (op0, smaller))
1046 bound = op1;
1047 else if (operand_equal_for_phi_arg_p (op1, smaller))
1048 bound = op0;
1049 else
1050 return false;
1052 /* We need BOUND <= LARGER. */
1053 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1054 bound, larger)))
1055 return false;
1057 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1059 /* Case
1061 if (smaller < larger)
1063 r' = MIN_EXPR (larger, bound)
1065 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1066 if (ass_code != MIN_EXPR)
1067 return false;
1069 minmax = MAX_EXPR;
1070 if (operand_equal_for_phi_arg_p (op0, larger))
1071 bound = op1;
1072 else if (operand_equal_for_phi_arg_p (op1, larger))
1073 bound = op0;
1074 else
1075 return false;
1077 /* We need BOUND >= SMALLER. */
1078 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1079 bound, smaller)))
1080 return false;
1082 else
1083 return false;
1085 else
1087 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1088 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1089 return false;
1091 if (operand_equal_for_phi_arg_p (arg_true, larger))
1093 /* Case
1095 if (smaller > larger)
1097 r' = MIN_EXPR (smaller, bound)
1099 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1100 if (ass_code != MIN_EXPR)
1101 return false;
1103 minmax = MAX_EXPR;
1104 if (operand_equal_for_phi_arg_p (op0, smaller))
1105 bound = op1;
1106 else if (operand_equal_for_phi_arg_p (op1, smaller))
1107 bound = op0;
1108 else
1109 return false;
1111 /* We need BOUND >= LARGER. */
1112 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1113 bound, larger)))
1114 return false;
1116 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1118 /* Case
1120 if (smaller > larger)
1122 r' = MAX_EXPR (larger, bound)
1124 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1125 if (ass_code != MAX_EXPR)
1126 return false;
1128 minmax = MIN_EXPR;
1129 if (operand_equal_for_phi_arg_p (op0, larger))
1130 bound = op1;
1131 else if (operand_equal_for_phi_arg_p (op1, larger))
1132 bound = op0;
1133 else
1134 return false;
1136 /* We need BOUND <= SMALLER. */
1137 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1138 bound, smaller)))
1139 return false;
1141 else
1142 return false;
1145 /* Move the statement from the middle block. */
1146 gsi = gsi_last_bb (cond_bb);
1147 gsi_from = gsi_last_nondebug_bb (middle_bb);
1148 gsi_move_before (&gsi_from, &gsi);
1151 /* Emit the statement to compute min/max. */
1152 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1153 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
1154 gsi = gsi_last_bb (cond_bb);
1155 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1157 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1158 return true;
1161 /* The function absolute_replacement does the main work of doing the absolute
1162 replacement. Return true if the replacement is done. Otherwise return
1163 false.
1164 bb is the basic block where the replacement is going to be done on. arg0
1165 is argument 0 from the phi. Likewise for arg1. */
1167 static bool
1168 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1169 edge e0 ATTRIBUTE_UNUSED, edge e1,
1170 gimple phi, tree arg0, tree arg1)
1172 tree result;
1173 gimple new_stmt, cond;
1174 gimple_stmt_iterator gsi;
1175 edge true_edge, false_edge;
1176 gimple assign;
1177 edge e;
1178 tree rhs, lhs;
1179 bool negate;
1180 enum tree_code cond_code;
1182 /* If the type says honor signed zeros we cannot do this
1183 optimization. */
1184 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1185 return false;
1187 /* OTHER_BLOCK must have only one executable statement which must have the
1188 form arg0 = -arg1 or arg1 = -arg0. */
1190 assign = last_and_only_stmt (middle_bb);
1191 /* If we did not find the proper negation assignment, then we can not
1192 optimize. */
1193 if (assign == NULL)
1194 return false;
1196 /* If we got here, then we have found the only executable statement
1197 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1198 arg1 = -arg0, then we can not optimize. */
1199 if (gimple_code (assign) != GIMPLE_ASSIGN)
1200 return false;
1202 lhs = gimple_assign_lhs (assign);
1204 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1205 return false;
1207 rhs = gimple_assign_rhs1 (assign);
1209 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1210 if (!(lhs == arg0 && rhs == arg1)
1211 && !(lhs == arg1 && rhs == arg0))
1212 return false;
1214 cond = last_stmt (cond_bb);
1215 result = PHI_RESULT (phi);
1217 /* Only relationals comparing arg[01] against zero are interesting. */
1218 cond_code = gimple_cond_code (cond);
1219 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1220 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1221 return false;
1223 /* Make sure the conditional is arg[01] OP y. */
1224 if (gimple_cond_lhs (cond) != rhs)
1225 return false;
1227 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1228 ? real_zerop (gimple_cond_rhs (cond))
1229 : integer_zerop (gimple_cond_rhs (cond)))
1231 else
1232 return false;
1234 /* We need to know which is the true edge and which is the false
1235 edge so that we know if have abs or negative abs. */
1236 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1238 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1239 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1240 the false edge goes to OTHER_BLOCK. */
1241 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1242 e = true_edge;
1243 else
1244 e = false_edge;
1246 if (e->dest == middle_bb)
1247 negate = true;
1248 else
1249 negate = false;
1251 result = duplicate_ssa_name (result, NULL);
1253 if (negate)
1254 lhs = make_ssa_name (TREE_TYPE (result), NULL);
1255 else
1256 lhs = result;
1258 /* Build the modify expression with abs expression. */
1259 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1261 gsi = gsi_last_bb (cond_bb);
1262 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1264 if (negate)
1266 /* Get the right GSI. We want to insert after the recently
1267 added ABS_EXPR statement (which we know is the first statement
1268 in the block. */
1269 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1271 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1274 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1276 /* Note that we optimized this PHI. */
1277 return true;
1280 /* Auxiliary functions to determine the set of memory accesses which
1281 can't trap because they are preceded by accesses to the same memory
1282 portion. We do that for MEM_REFs, so we only need to track
1283 the SSA_NAME of the pointer indirectly referenced. The algorithm
1284 simply is a walk over all instructions in dominator order. When
1285 we see an MEM_REF we determine if we've already seen a same
1286 ref anywhere up to the root of the dominator tree. If we do the
1287 current access can't trap. If we don't see any dominating access
1288 the current access might trap, but might also make later accesses
1289 non-trapping, so we remember it. We need to be careful with loads
1290 or stores, for instance a load might not trap, while a store would,
1291 so if we see a dominating read access this doesn't mean that a later
1292 write access would not trap. Hence we also need to differentiate the
1293 type of access(es) seen.
1295 ??? We currently are very conservative and assume that a load might
1296 trap even if a store doesn't (write-only memory). This probably is
1297 overly conservative. */
1299 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1300 through it was seen, which would constitute a no-trap region for
1301 same accesses. */
1302 struct name_to_bb
1304 unsigned int ssa_name_ver;
1305 unsigned int phase;
1306 bool store;
1307 HOST_WIDE_INT offset, size;
1308 basic_block bb;
1311 /* Hashtable helpers. */
1313 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1315 typedef name_to_bb value_type;
1316 typedef name_to_bb compare_type;
1317 static inline hashval_t hash (const value_type *);
1318 static inline bool equal (const value_type *, const compare_type *);
1321 /* Used for quick clearing of the hash-table when we see calls.
1322 Hash entries with phase < nt_call_phase are invalid. */
1323 static unsigned int nt_call_phase;
1325 /* The hash function. */
1327 inline hashval_t
1328 ssa_names_hasher::hash (const value_type *n)
1330 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1331 ^ (n->offset << 6) ^ (n->size << 3);
1334 /* The equality function of *P1 and *P2. */
1336 inline bool
1337 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1339 return n1->ssa_name_ver == n2->ssa_name_ver
1340 && n1->store == n2->store
1341 && n1->offset == n2->offset
1342 && n1->size == n2->size;
1345 /* The hash table for remembering what we've seen. */
1346 static hash_table <ssa_names_hasher> seen_ssa_names;
1348 /* We see the expression EXP in basic block BB. If it's an interesting
1349 expression (an MEM_REF through an SSA_NAME) possibly insert the
1350 expression into the set NONTRAP or the hash table of seen expressions.
1351 STORE is true if this expression is on the LHS, otherwise it's on
1352 the RHS. */
1353 static void
1354 add_or_mark_expr (basic_block bb, tree exp,
1355 struct pointer_set_t *nontrap, bool store)
1357 HOST_WIDE_INT size;
1359 if (TREE_CODE (exp) == MEM_REF
1360 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1361 && host_integerp (TREE_OPERAND (exp, 1), 0)
1362 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1364 tree name = TREE_OPERAND (exp, 0);
1365 struct name_to_bb map;
1366 name_to_bb **slot;
1367 struct name_to_bb *n2bb;
1368 basic_block found_bb = 0;
1370 /* Try to find the last seen MEM_REF through the same
1371 SSA_NAME, which can trap. */
1372 map.ssa_name_ver = SSA_NAME_VERSION (name);
1373 map.phase = 0;
1374 map.bb = 0;
1375 map.store = store;
1376 map.offset = tree_low_cst (TREE_OPERAND (exp, 1), 0);
1377 map.size = size;
1379 slot = seen_ssa_names.find_slot (&map, INSERT);
1380 n2bb = *slot;
1381 if (n2bb && n2bb->phase >= nt_call_phase)
1382 found_bb = n2bb->bb;
1384 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1385 (it's in a basic block on the path from us to the dominator root)
1386 then we can't trap. */
1387 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1389 pointer_set_insert (nontrap, exp);
1391 else
1393 /* EXP might trap, so insert it into the hash table. */
1394 if (n2bb)
1396 n2bb->phase = nt_call_phase;
1397 n2bb->bb = bb;
1399 else
1401 n2bb = XNEW (struct name_to_bb);
1402 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1403 n2bb->phase = nt_call_phase;
1404 n2bb->bb = bb;
1405 n2bb->store = store;
1406 n2bb->offset = map.offset;
1407 n2bb->size = size;
1408 *slot = n2bb;
1414 class nontrapping_dom_walker : public dom_walker
1416 public:
1417 nontrapping_dom_walker (cdi_direction direction, pointer_set_t *ps)
1418 : dom_walker (direction), m_nontrapping (ps) {}
1420 virtual void before_dom_children (basic_block);
1421 virtual void after_dom_children (basic_block);
1423 private:
1424 pointer_set_t *m_nontrapping;
1427 /* Called by walk_dominator_tree, when entering the block BB. */
1428 void
1429 nontrapping_dom_walker::before_dom_children (basic_block bb)
1431 edge e;
1432 edge_iterator ei;
1433 gimple_stmt_iterator gsi;
1435 /* If we haven't seen all our predecessors, clear the hash-table. */
1436 FOR_EACH_EDGE (e, ei, bb->preds)
1437 if ((((size_t)e->src->aux) & 2) == 0)
1439 nt_call_phase++;
1440 break;
1443 /* Mark this BB as being on the path to dominator root and as visited. */
1444 bb->aux = (void*)(1 | 2);
1446 /* And walk the statements in order. */
1447 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1449 gimple stmt = gsi_stmt (gsi);
1451 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1452 nt_call_phase++;
1453 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1455 add_or_mark_expr (bb, gimple_assign_lhs (stmt), m_nontrapping, true);
1456 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), m_nontrapping, false);
1461 /* Called by walk_dominator_tree, when basic block BB is exited. */
1462 void
1463 nontrapping_dom_walker::after_dom_children (basic_block bb)
1465 /* This BB isn't on the path to dominator root anymore. */
1466 bb->aux = (void*)2;
1469 /* This is the entry point of gathering non trapping memory accesses.
1470 It will do a dominator walk over the whole function, and it will
1471 make use of the bb->aux pointers. It returns a set of trees
1472 (the MEM_REFs itself) which can't trap. */
1473 static struct pointer_set_t *
1474 get_non_trapping (void)
1476 nt_call_phase = 0;
1477 pointer_set_t *nontrap = pointer_set_create ();
1478 seen_ssa_names.create (128);
1479 /* We're going to do a dominator walk, so ensure that we have
1480 dominance information. */
1481 calculate_dominance_info (CDI_DOMINATORS);
1483 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1484 .walk (cfun->cfg->x_entry_block_ptr);
1486 seen_ssa_names.dispose ();
1488 clear_aux_for_blocks ();
1489 return nontrap;
1492 /* Do the main work of conditional store replacement. We already know
1493 that the recognized pattern looks like so:
1495 split:
1496 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1497 MIDDLE_BB:
1498 something
1499 fallthrough (edge E0)
1500 JOIN_BB:
1501 some more
1503 We check that MIDDLE_BB contains only one store, that that store
1504 doesn't trap (not via NOTRAP, but via checking if an access to the same
1505 memory location dominates us) and that the store has a "simple" RHS. */
1507 static bool
1508 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1509 edge e0, edge e1, struct pointer_set_t *nontrap)
1511 gimple assign = last_and_only_stmt (middle_bb);
1512 tree lhs, rhs, name, name2;
1513 gimple newphi, new_stmt;
1514 gimple_stmt_iterator gsi;
1515 source_location locus;
1517 /* Check if middle_bb contains of only one store. */
1518 if (!assign
1519 || !gimple_assign_single_p (assign)
1520 || gimple_has_volatile_ops (assign))
1521 return false;
1523 locus = gimple_location (assign);
1524 lhs = gimple_assign_lhs (assign);
1525 rhs = gimple_assign_rhs1 (assign);
1526 if (TREE_CODE (lhs) != MEM_REF
1527 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1528 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1529 return false;
1531 /* Prove that we can move the store down. We could also check
1532 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1533 whose value is not available readily, which we want to avoid. */
1534 if (!pointer_set_contains (nontrap, lhs))
1535 return false;
1537 /* Now we've checked the constraints, so do the transformation:
1538 1) Remove the single store. */
1539 gsi = gsi_for_stmt (assign);
1540 unlink_stmt_vdef (assign);
1541 gsi_remove (&gsi, true);
1542 release_defs (assign);
1544 /* 2) Insert a load from the memory of the store to the temporary
1545 on the edge which did not contain the store. */
1546 lhs = unshare_expr (lhs);
1547 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1548 new_stmt = gimple_build_assign (name, lhs);
1549 gimple_set_location (new_stmt, locus);
1550 gsi_insert_on_edge (e1, new_stmt);
1552 /* 3) Create a PHI node at the join block, with one argument
1553 holding the old RHS, and the other holding the temporary
1554 where we stored the old memory contents. */
1555 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1556 newphi = create_phi_node (name2, join_bb);
1557 add_phi_arg (newphi, rhs, e0, locus);
1558 add_phi_arg (newphi, name, e1, locus);
1560 lhs = unshare_expr (lhs);
1561 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1563 /* 4) Insert that PHI node. */
1564 gsi = gsi_after_labels (join_bb);
1565 if (gsi_end_p (gsi))
1567 gsi = gsi_last_bb (join_bb);
1568 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1570 else
1571 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1573 return true;
1576 /* Do the main work of conditional store replacement. */
1578 static bool
1579 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1580 basic_block join_bb, gimple then_assign,
1581 gimple else_assign)
1583 tree lhs_base, lhs, then_rhs, else_rhs, name;
1584 source_location then_locus, else_locus;
1585 gimple_stmt_iterator gsi;
1586 gimple newphi, new_stmt;
1588 if (then_assign == NULL
1589 || !gimple_assign_single_p (then_assign)
1590 || gimple_clobber_p (then_assign)
1591 || gimple_has_volatile_ops (then_assign)
1592 || else_assign == NULL
1593 || !gimple_assign_single_p (else_assign)
1594 || gimple_clobber_p (else_assign)
1595 || gimple_has_volatile_ops (else_assign))
1596 return false;
1598 lhs = gimple_assign_lhs (then_assign);
1599 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1600 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1601 return false;
1603 lhs_base = get_base_address (lhs);
1604 if (lhs_base == NULL_TREE
1605 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1606 return false;
1608 then_rhs = gimple_assign_rhs1 (then_assign);
1609 else_rhs = gimple_assign_rhs1 (else_assign);
1610 then_locus = gimple_location (then_assign);
1611 else_locus = gimple_location (else_assign);
1613 /* Now we've checked the constraints, so do the transformation:
1614 1) Remove the stores. */
1615 gsi = gsi_for_stmt (then_assign);
1616 unlink_stmt_vdef (then_assign);
1617 gsi_remove (&gsi, true);
1618 release_defs (then_assign);
1620 gsi = gsi_for_stmt (else_assign);
1621 unlink_stmt_vdef (else_assign);
1622 gsi_remove (&gsi, true);
1623 release_defs (else_assign);
1625 /* 2) Create a PHI node at the join block, with one argument
1626 holding the old RHS, and the other holding the temporary
1627 where we stored the old memory contents. */
1628 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1629 newphi = create_phi_node (name, join_bb);
1630 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1631 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1633 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1635 /* 3) Insert that PHI node. */
1636 gsi = gsi_after_labels (join_bb);
1637 if (gsi_end_p (gsi))
1639 gsi = gsi_last_bb (join_bb);
1640 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1642 else
1643 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1645 return true;
1648 /* Conditional store replacement. We already know
1649 that the recognized pattern looks like so:
1651 split:
1652 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1653 THEN_BB:
1655 X = Y;
1657 goto JOIN_BB;
1658 ELSE_BB:
1660 X = Z;
1662 fallthrough (edge E0)
1663 JOIN_BB:
1664 some more
1666 We check that it is safe to sink the store to JOIN_BB by verifying that
1667 there are no read-after-write or write-after-write dependencies in
1668 THEN_BB and ELSE_BB. */
1670 static bool
1671 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1672 basic_block join_bb)
1674 gimple then_assign = last_and_only_stmt (then_bb);
1675 gimple else_assign = last_and_only_stmt (else_bb);
1676 vec<data_reference_p> then_datarefs, else_datarefs;
1677 vec<ddr_p> then_ddrs, else_ddrs;
1678 gimple then_store, else_store;
1679 bool found, ok = false, res;
1680 struct data_dependence_relation *ddr;
1681 data_reference_p then_dr, else_dr;
1682 int i, j;
1683 tree then_lhs, else_lhs;
1684 basic_block blocks[3];
1686 if (MAX_STORES_TO_SINK == 0)
1687 return false;
1689 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1690 if (then_assign && else_assign)
1691 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1692 then_assign, else_assign);
1694 /* Find data references. */
1695 then_datarefs.create (1);
1696 else_datarefs.create (1);
1697 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1698 == chrec_dont_know)
1699 || !then_datarefs.length ()
1700 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1701 == chrec_dont_know)
1702 || !else_datarefs.length ())
1704 free_data_refs (then_datarefs);
1705 free_data_refs (else_datarefs);
1706 return false;
1709 /* Find pairs of stores with equal LHS. */
1710 stack_vec<gimple, 1> then_stores, else_stores;
1711 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1713 if (DR_IS_READ (then_dr))
1714 continue;
1716 then_store = DR_STMT (then_dr);
1717 then_lhs = gimple_get_lhs (then_store);
1718 found = false;
1720 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1722 if (DR_IS_READ (else_dr))
1723 continue;
1725 else_store = DR_STMT (else_dr);
1726 else_lhs = gimple_get_lhs (else_store);
1728 if (operand_equal_p (then_lhs, else_lhs, 0))
1730 found = true;
1731 break;
1735 if (!found)
1736 continue;
1738 then_stores.safe_push (then_store);
1739 else_stores.safe_push (else_store);
1742 /* No pairs of stores found. */
1743 if (!then_stores.length ()
1744 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1746 free_data_refs (then_datarefs);
1747 free_data_refs (else_datarefs);
1748 return false;
1751 /* Compute and check data dependencies in both basic blocks. */
1752 then_ddrs.create (1);
1753 else_ddrs.create (1);
1754 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1755 vNULL, false)
1756 || !compute_all_dependences (else_datarefs, &else_ddrs,
1757 vNULL, false))
1759 free_dependence_relations (then_ddrs);
1760 free_dependence_relations (else_ddrs);
1761 free_data_refs (then_datarefs);
1762 free_data_refs (else_datarefs);
1763 return false;
1765 blocks[0] = then_bb;
1766 blocks[1] = else_bb;
1767 blocks[2] = join_bb;
1768 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1770 /* Check that there are no read-after-write or write-after-write dependencies
1771 in THEN_BB. */
1772 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1774 struct data_reference *dra = DDR_A (ddr);
1775 struct data_reference *drb = DDR_B (ddr);
1777 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1778 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1779 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1780 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1781 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1782 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1784 free_dependence_relations (then_ddrs);
1785 free_dependence_relations (else_ddrs);
1786 free_data_refs (then_datarefs);
1787 free_data_refs (else_datarefs);
1788 return false;
1792 /* Check that there are no read-after-write or write-after-write dependencies
1793 in ELSE_BB. */
1794 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1796 struct data_reference *dra = DDR_A (ddr);
1797 struct data_reference *drb = DDR_B (ddr);
1799 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1800 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1801 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1802 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1803 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1804 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1806 free_dependence_relations (then_ddrs);
1807 free_dependence_relations (else_ddrs);
1808 free_data_refs (then_datarefs);
1809 free_data_refs (else_datarefs);
1810 return false;
1814 /* Sink stores with same LHS. */
1815 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1817 else_store = else_stores[i];
1818 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1819 then_store, else_store);
1820 ok = ok || res;
1823 free_dependence_relations (then_ddrs);
1824 free_dependence_relations (else_ddrs);
1825 free_data_refs (then_datarefs);
1826 free_data_refs (else_datarefs);
1828 return ok;
1831 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1833 static bool
1834 local_mem_dependence (gimple stmt, basic_block bb)
1836 tree vuse = gimple_vuse (stmt);
1837 gimple def;
1839 if (!vuse)
1840 return false;
1842 def = SSA_NAME_DEF_STMT (vuse);
1843 return (def && gimple_bb (def) == bb);
1846 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1847 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1848 and BB3 rejoins control flow following BB1 and BB2, look for
1849 opportunities to hoist loads as follows. If BB3 contains a PHI of
1850 two loads, one each occurring in BB1 and BB2, and the loads are
1851 provably of adjacent fields in the same structure, then move both
1852 loads into BB0. Of course this can only be done if there are no
1853 dependencies preventing such motion.
1855 One of the hoisted loads will always be speculative, so the
1856 transformation is currently conservative:
1858 - The fields must be strictly adjacent.
1859 - The two fields must occupy a single memory block that is
1860 guaranteed to not cross a page boundary.
1862 The last is difficult to prove, as such memory blocks should be
1863 aligned on the minimum of the stack alignment boundary and the
1864 alignment guaranteed by heap allocation interfaces. Thus we rely
1865 on a parameter for the alignment value.
1867 Provided a good value is used for the last case, the first
1868 restriction could possibly be relaxed. */
1870 static void
1871 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
1872 basic_block bb2, basic_block bb3)
1874 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
1875 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
1876 gimple_stmt_iterator gsi;
1878 /* Walk the phis in bb3 looking for an opportunity. We are looking
1879 for phis of two SSA names, one each of which is defined in bb1 and
1880 bb2. */
1881 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
1883 gimple phi_stmt = gsi_stmt (gsi);
1884 gimple def1, def2, defswap;
1885 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
1886 tree tree_offset1, tree_offset2, tree_size2, next;
1887 int offset1, offset2, size2;
1888 unsigned align1;
1889 gimple_stmt_iterator gsi2;
1890 basic_block bb_for_def1, bb_for_def2;
1892 if (gimple_phi_num_args (phi_stmt) != 2
1893 || virtual_operand_p (gimple_phi_result (phi_stmt)))
1894 continue;
1896 arg1 = gimple_phi_arg_def (phi_stmt, 0);
1897 arg2 = gimple_phi_arg_def (phi_stmt, 1);
1899 if (TREE_CODE (arg1) != SSA_NAME
1900 || TREE_CODE (arg2) != SSA_NAME
1901 || SSA_NAME_IS_DEFAULT_DEF (arg1)
1902 || SSA_NAME_IS_DEFAULT_DEF (arg2))
1903 continue;
1905 def1 = SSA_NAME_DEF_STMT (arg1);
1906 def2 = SSA_NAME_DEF_STMT (arg2);
1908 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
1909 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
1910 continue;
1912 /* Check the mode of the arguments to be sure a conditional move
1913 can be generated for it. */
1914 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
1915 == CODE_FOR_nothing)
1916 continue;
1918 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
1919 if (!gimple_assign_single_p (def1)
1920 || !gimple_assign_single_p (def2)
1921 || gimple_has_volatile_ops (def1)
1922 || gimple_has_volatile_ops (def2))
1923 continue;
1925 ref1 = gimple_assign_rhs1 (def1);
1926 ref2 = gimple_assign_rhs1 (def2);
1928 if (TREE_CODE (ref1) != COMPONENT_REF
1929 || TREE_CODE (ref2) != COMPONENT_REF)
1930 continue;
1932 /* The zeroth operand of the two component references must be
1933 identical. It is not sufficient to compare get_base_address of
1934 the two references, because this could allow for different
1935 elements of the same array in the two trees. It is not safe to
1936 assume that the existence of one array element implies the
1937 existence of a different one. */
1938 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
1939 continue;
1941 field1 = TREE_OPERAND (ref1, 1);
1942 field2 = TREE_OPERAND (ref2, 1);
1944 /* Check for field adjacency, and ensure field1 comes first. */
1945 for (next = DECL_CHAIN (field1);
1946 next && TREE_CODE (next) != FIELD_DECL;
1947 next = DECL_CHAIN (next))
1950 if (next != field2)
1952 for (next = DECL_CHAIN (field2);
1953 next && TREE_CODE (next) != FIELD_DECL;
1954 next = DECL_CHAIN (next))
1957 if (next != field1)
1958 continue;
1960 fieldswap = field1;
1961 field1 = field2;
1962 field2 = fieldswap;
1963 defswap = def1;
1964 def1 = def2;
1965 def2 = defswap;
1968 bb_for_def1 = gimple_bb (def1);
1969 bb_for_def2 = gimple_bb (def2);
1971 /* Check for proper alignment of the first field. */
1972 tree_offset1 = bit_position (field1);
1973 tree_offset2 = bit_position (field2);
1974 tree_size2 = DECL_SIZE (field2);
1976 if (!host_integerp (tree_offset1, 1)
1977 || !host_integerp (tree_offset2, 1)
1978 || !host_integerp (tree_size2, 1))
1979 continue;
1981 offset1 = TREE_INT_CST_LOW (tree_offset1);
1982 offset2 = TREE_INT_CST_LOW (tree_offset2);
1983 size2 = TREE_INT_CST_LOW (tree_size2);
1984 align1 = DECL_ALIGN (field1) % param_align_bits;
1986 if (offset1 % BITS_PER_UNIT != 0)
1987 continue;
1989 /* For profitability, the two field references should fit within
1990 a single cache line. */
1991 if (align1 + offset2 - offset1 + size2 > param_align_bits)
1992 continue;
1994 /* The two expressions cannot be dependent upon vdefs defined
1995 in bb1/bb2. */
1996 if (local_mem_dependence (def1, bb_for_def1)
1997 || local_mem_dependence (def2, bb_for_def2))
1998 continue;
2000 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2001 bb0. We hoist the first one first so that a cache miss is handled
2002 efficiently regardless of hardware cache-fill policy. */
2003 gsi2 = gsi_for_stmt (def1);
2004 gsi_move_to_bb_end (&gsi2, bb0);
2005 gsi2 = gsi_for_stmt (def2);
2006 gsi_move_to_bb_end (&gsi2, bb0);
2008 if (dump_file && (dump_flags & TDF_DETAILS))
2010 fprintf (dump_file,
2011 "\nHoisting adjacent loads from %d and %d into %d: \n",
2012 bb_for_def1->index, bb_for_def2->index, bb0->index);
2013 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2014 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2019 /* Determine whether we should attempt to hoist adjacent loads out of
2020 diamond patterns in pass_phiopt. Always hoist loads if
2021 -fhoist-adjacent-loads is specified and the target machine has
2022 both a conditional move instruction and a defined cache line size. */
2024 static bool
2025 gate_hoist_loads (void)
2027 return (flag_hoist_adjacent_loads == 1
2028 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2029 && HAVE_conditional_move);
2032 /* Always do these optimizations if we have SSA
2033 trees to work on. */
2034 static bool
2035 gate_phiopt (void)
2037 return 1;
2040 namespace {
2042 const pass_data pass_data_phiopt =
2044 GIMPLE_PASS, /* type */
2045 "phiopt", /* name */
2046 OPTGROUP_NONE, /* optinfo_flags */
2047 true, /* has_gate */
2048 true, /* has_execute */
2049 TV_TREE_PHIOPT, /* tv_id */
2050 ( PROP_cfg | PROP_ssa ), /* properties_required */
2051 0, /* properties_provided */
2052 0, /* properties_destroyed */
2053 0, /* todo_flags_start */
2054 ( TODO_verify_ssa | TODO_verify_flow
2055 | TODO_verify_stmts ), /* todo_flags_finish */
2058 class pass_phiopt : public gimple_opt_pass
2060 public:
2061 pass_phiopt (gcc::context *ctxt)
2062 : gimple_opt_pass (pass_data_phiopt, ctxt)
2065 /* opt_pass methods: */
2066 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2067 bool gate () { return gate_phiopt (); }
2068 unsigned int execute () { return tree_ssa_phiopt (); }
2070 }; // class pass_phiopt
2072 } // anon namespace
2074 gimple_opt_pass *
2075 make_pass_phiopt (gcc::context *ctxt)
2077 return new pass_phiopt (ctxt);
2080 static bool
2081 gate_cselim (void)
2083 return flag_tree_cselim;
2086 namespace {
2088 const pass_data pass_data_cselim =
2090 GIMPLE_PASS, /* type */
2091 "cselim", /* name */
2092 OPTGROUP_NONE, /* optinfo_flags */
2093 true, /* has_gate */
2094 true, /* has_execute */
2095 TV_TREE_PHIOPT, /* tv_id */
2096 ( PROP_cfg | PROP_ssa ), /* properties_required */
2097 0, /* properties_provided */
2098 0, /* properties_destroyed */
2099 0, /* todo_flags_start */
2100 ( TODO_verify_ssa | TODO_verify_flow
2101 | TODO_verify_stmts ), /* todo_flags_finish */
2104 class pass_cselim : public gimple_opt_pass
2106 public:
2107 pass_cselim (gcc::context *ctxt)
2108 : gimple_opt_pass (pass_data_cselim, ctxt)
2111 /* opt_pass methods: */
2112 bool gate () { return gate_cselim (); }
2113 unsigned int execute () { return tree_ssa_cs_elim (); }
2115 }; // class pass_cselim
2117 } // anon namespace
2119 gimple_opt_pass *
2120 make_pass_cselim (gcc::context *ctxt)
2122 return new pass_cselim (ctxt);