* friend.c (make_friend_class): Handle template template parameters.
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blob4c729158deb535641828070ab8e704a0b8a34710
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "langhooks.h"
33 #include "pointer-set.h"
34 #include "domwalk.h"
35 #include "cfgloop.h"
36 #include "tree-data-ref.h"
37 #include "gimple-pretty-print.h"
38 #include "insn-config.h"
39 #include "expr.h"
40 #include "optabs.h"
42 #ifndef HAVE_conditional_move
43 #define HAVE_conditional_move (0)
44 #endif
46 static unsigned int tree_ssa_phiopt (void);
47 static unsigned int tree_ssa_phiopt_worker (bool, bool);
48 static bool conditional_replacement (basic_block, basic_block,
49 edge, edge, gimple, tree, tree);
50 static int value_replacement (basic_block, basic_block,
51 edge, edge, gimple, tree, tree);
52 static bool minmax_replacement (basic_block, basic_block,
53 edge, edge, gimple, tree, tree);
54 static bool abs_replacement (basic_block, basic_block,
55 edge, edge, gimple, tree, tree);
56 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
57 struct pointer_set_t *);
58 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
59 static struct pointer_set_t * get_non_trapping (void);
60 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
61 static void hoist_adjacent_loads (basic_block, basic_block,
62 basic_block, basic_block);
63 static bool gate_hoist_loads (void);
65 /* This pass tries to replaces an if-then-else block with an
66 assignment. We have four kinds of transformations. Some of these
67 transformations are also performed by the ifcvt RTL optimizer.
69 Conditional Replacement
70 -----------------------
72 This transformation, implemented in conditional_replacement,
73 replaces
75 bb0:
76 if (cond) goto bb2; else goto bb1;
77 bb1:
78 bb2:
79 x = PHI <0 (bb1), 1 (bb0), ...>;
81 with
83 bb0:
84 x' = cond;
85 goto bb2;
86 bb2:
87 x = PHI <x' (bb0), ...>;
89 We remove bb1 as it becomes unreachable. This occurs often due to
90 gimplification of conditionals.
92 Value Replacement
93 -----------------
95 This transformation, implemented in value_replacement, replaces
97 bb0:
98 if (a != b) goto bb2; else goto bb1;
99 bb1:
100 bb2:
101 x = PHI <a (bb1), b (bb0), ...>;
103 with
105 bb0:
106 bb2:
107 x = PHI <b (bb0), ...>;
109 This opportunity can sometimes occur as a result of other
110 optimizations.
112 ABS Replacement
113 ---------------
115 This transformation, implemented in abs_replacement, replaces
117 bb0:
118 if (a >= 0) goto bb2; else goto bb1;
119 bb1:
120 x = -a;
121 bb2:
122 x = PHI <x (bb1), a (bb0), ...>;
124 with
126 bb0:
127 x' = ABS_EXPR< a >;
128 bb2:
129 x = PHI <x' (bb0), ...>;
131 MIN/MAX Replacement
132 -------------------
134 This transformation, minmax_replacement replaces
136 bb0:
137 if (a <= b) goto bb2; else goto bb1;
138 bb1:
139 bb2:
140 x = PHI <b (bb1), a (bb0), ...>;
142 with
144 bb0:
145 x' = MIN_EXPR (a, b)
146 bb2:
147 x = PHI <x' (bb0), ...>;
149 A similar transformation is done for MAX_EXPR.
152 This pass also performs a fifth transformation of a slightly different
153 flavor.
155 Adjacent Load Hoisting
156 ----------------------
158 This transformation replaces
160 bb0:
161 if (...) goto bb2; else goto bb1;
162 bb1:
163 x1 = (<expr>).field1;
164 goto bb3;
165 bb2:
166 x2 = (<expr>).field2;
167 bb3:
168 # x = PHI <x1, x2>;
170 with
172 bb0:
173 x1 = (<expr>).field1;
174 x2 = (<expr>).field2;
175 if (...) goto bb2; else goto bb1;
176 bb1:
177 goto bb3;
178 bb2:
179 bb3:
180 # x = PHI <x1, x2>;
182 The purpose of this transformation is to enable generation of conditional
183 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
184 the loads is speculative, the transformation is restricted to very
185 specific cases to avoid introducing a page fault. We are looking for
186 the common idiom:
188 if (...)
189 x = y->left;
190 else
191 x = y->right;
193 where left and right are typically adjacent pointers in a tree structure. */
195 static unsigned int
196 tree_ssa_phiopt (void)
198 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
201 /* This pass tries to transform conditional stores into unconditional
202 ones, enabling further simplifications with the simpler then and else
203 blocks. In particular it replaces this:
205 bb0:
206 if (cond) goto bb2; else goto bb1;
207 bb1:
208 *p = RHS;
209 bb2:
211 with
213 bb0:
214 if (cond) goto bb1; else goto bb2;
215 bb1:
216 condtmp' = *p;
217 bb2:
218 condtmp = PHI <RHS, condtmp'>
219 *p = condtmp;
221 This transformation can only be done under several constraints,
222 documented below. It also replaces:
224 bb0:
225 if (cond) goto bb2; else goto bb1;
226 bb1:
227 *p = RHS1;
228 goto bb3;
229 bb2:
230 *p = RHS2;
231 bb3:
233 with
235 bb0:
236 if (cond) goto bb3; else goto bb1;
237 bb1:
238 bb3:
239 condtmp = PHI <RHS1, RHS2>
240 *p = condtmp; */
242 static unsigned int
243 tree_ssa_cs_elim (void)
245 return tree_ssa_phiopt_worker (true, false);
248 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
250 static gimple
251 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
253 gimple_stmt_iterator i;
254 gimple phi = NULL;
255 if (gimple_seq_singleton_p (seq))
256 return gsi_stmt (gsi_start (seq));
257 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
259 gimple p = gsi_stmt (i);
260 /* If the PHI arguments are equal then we can skip this PHI. */
261 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
262 gimple_phi_arg_def (p, e1->dest_idx)))
263 continue;
265 /* If we already have a PHI that has the two edge arguments are
266 different, then return it is not a singleton for these PHIs. */
267 if (phi)
268 return NULL;
270 phi = p;
272 return phi;
275 /* The core routine of conditional store replacement and normal
276 phi optimizations. Both share much of the infrastructure in how
277 to match applicable basic block patterns. DO_STORE_ELIM is true
278 when we want to do conditional store replacement, false otherwise.
279 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
280 of diamond control flow patterns, false otherwise. */
281 static unsigned int
282 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
284 basic_block bb;
285 basic_block *bb_order;
286 unsigned n, i;
287 bool cfgchanged = false;
288 struct pointer_set_t *nontrap = 0;
290 if (do_store_elim)
291 /* Calculate the set of non-trapping memory accesses. */
292 nontrap = get_non_trapping ();
294 /* Search every basic block for COND_EXPR we may be able to optimize.
296 We walk the blocks in order that guarantees that a block with
297 a single predecessor is processed before the predecessor.
298 This ensures that we collapse inner ifs before visiting the
299 outer ones, and also that we do not try to visit a removed
300 block. */
301 bb_order = blocks_in_phiopt_order ();
302 n = n_basic_blocks - NUM_FIXED_BLOCKS;
304 for (i = 0; i < n; i++)
306 gimple cond_stmt, phi;
307 basic_block bb1, bb2;
308 edge e1, e2;
309 tree arg0, arg1;
311 bb = bb_order[i];
313 cond_stmt = last_stmt (bb);
314 /* Check to see if the last statement is a GIMPLE_COND. */
315 if (!cond_stmt
316 || gimple_code (cond_stmt) != GIMPLE_COND)
317 continue;
319 e1 = EDGE_SUCC (bb, 0);
320 bb1 = e1->dest;
321 e2 = EDGE_SUCC (bb, 1);
322 bb2 = e2->dest;
324 /* We cannot do the optimization on abnormal edges. */
325 if ((e1->flags & EDGE_ABNORMAL) != 0
326 || (e2->flags & EDGE_ABNORMAL) != 0)
327 continue;
329 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
330 if (EDGE_COUNT (bb1->succs) == 0
331 || bb2 == NULL
332 || EDGE_COUNT (bb2->succs) == 0)
333 continue;
335 /* Find the bb which is the fall through to the other. */
336 if (EDGE_SUCC (bb1, 0)->dest == bb2)
338 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
340 basic_block bb_tmp = bb1;
341 edge e_tmp = e1;
342 bb1 = bb2;
343 bb2 = bb_tmp;
344 e1 = e2;
345 e2 = e_tmp;
347 else if (do_store_elim
348 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
350 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
352 if (!single_succ_p (bb1)
353 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
354 || !single_succ_p (bb2)
355 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
356 || EDGE_COUNT (bb3->preds) != 2)
357 continue;
358 if (cond_if_else_store_replacement (bb1, bb2, bb3))
359 cfgchanged = true;
360 continue;
362 else if (do_hoist_loads
363 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
365 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
367 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
368 && single_succ_p (bb1)
369 && single_succ_p (bb2)
370 && single_pred_p (bb1)
371 && single_pred_p (bb2)
372 && EDGE_COUNT (bb->succs) == 2
373 && EDGE_COUNT (bb3->preds) == 2
374 /* If one edge or the other is dominant, a conditional move
375 is likely to perform worse than the well-predicted branch. */
376 && !predictable_edge_p (EDGE_SUCC (bb, 0))
377 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
378 hoist_adjacent_loads (bb, bb1, bb2, bb3);
379 continue;
381 else
382 continue;
384 e1 = EDGE_SUCC (bb1, 0);
386 /* Make sure that bb1 is just a fall through. */
387 if (!single_succ_p (bb1)
388 || (e1->flags & EDGE_FALLTHRU) == 0)
389 continue;
391 /* Also make sure that bb1 only have one predecessor and that it
392 is bb. */
393 if (!single_pred_p (bb1)
394 || single_pred (bb1) != bb)
395 continue;
397 if (do_store_elim)
399 /* bb1 is the middle block, bb2 the join block, bb the split block,
400 e1 the fallthrough edge from bb1 to bb2. We can't do the
401 optimization if the join block has more than two predecessors. */
402 if (EDGE_COUNT (bb2->preds) > 2)
403 continue;
404 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
405 cfgchanged = true;
407 else
409 gimple_seq phis = phi_nodes (bb2);
410 gimple_stmt_iterator gsi;
411 bool candorest = true;
413 /* Value replacement can work with more than one PHI
414 so try that first. */
415 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
417 phi = gsi_stmt (gsi);
418 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
419 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
420 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
422 candorest = false;
423 cfgchanged = true;
424 break;
428 if (!candorest)
429 continue;
431 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
432 if (!phi)
433 continue;
435 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
436 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
438 /* Something is wrong if we cannot find the arguments in the PHI
439 node. */
440 gcc_assert (arg0 != NULL && arg1 != NULL);
442 /* Do the replacement of conditional if it can be done. */
443 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
444 cfgchanged = true;
445 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
446 cfgchanged = true;
447 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
448 cfgchanged = true;
452 free (bb_order);
454 if (do_store_elim)
455 pointer_set_destroy (nontrap);
456 /* If the CFG has changed, we should cleanup the CFG. */
457 if (cfgchanged && do_store_elim)
459 /* In cond-store replacement we have added some loads on edges
460 and new VOPS (as we moved the store, and created a load). */
461 gsi_commit_edge_inserts ();
462 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
464 else if (cfgchanged)
465 return TODO_cleanup_cfg;
466 return 0;
469 /* Returns the list of basic blocks in the function in an order that guarantees
470 that if a block X has just a single predecessor Y, then Y is after X in the
471 ordering. */
473 basic_block *
474 blocks_in_phiopt_order (void)
476 basic_block x, y;
477 basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
478 unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
479 unsigned np, i;
480 sbitmap visited = sbitmap_alloc (last_basic_block);
482 #define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
483 #define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
485 sbitmap_zero (visited);
487 MARK_VISITED (ENTRY_BLOCK_PTR);
488 FOR_EACH_BB (x)
490 if (VISITED_P (x))
491 continue;
493 /* Walk the predecessors of x as long as they have precisely one
494 predecessor and add them to the list, so that they get stored
495 after x. */
496 for (y = x, np = 1;
497 single_pred_p (y) && !VISITED_P (single_pred (y));
498 y = single_pred (y))
499 np++;
500 for (y = x, i = n - np;
501 single_pred_p (y) && !VISITED_P (single_pred (y));
502 y = single_pred (y), i++)
504 order[i] = y;
505 MARK_VISITED (y);
507 order[i] = y;
508 MARK_VISITED (y);
510 gcc_assert (i == n - 1);
511 n -= np;
514 sbitmap_free (visited);
515 gcc_assert (n == 0);
516 return order;
518 #undef MARK_VISITED
519 #undef VISITED_P
523 /* Return TRUE if block BB has no executable statements, otherwise return
524 FALSE. */
526 bool
527 empty_block_p (basic_block bb)
529 /* BB must have no executable statements. */
530 gimple_stmt_iterator gsi = gsi_after_labels (bb);
531 if (phi_nodes (bb))
532 return false;
533 if (gsi_end_p (gsi))
534 return true;
535 if (is_gimple_debug (gsi_stmt (gsi)))
536 gsi_next_nondebug (&gsi);
537 return gsi_end_p (gsi);
540 /* Replace PHI node element whose edge is E in block BB with variable NEW.
541 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
542 is known to have two edges, one of which must reach BB). */
544 static void
545 replace_phi_edge_with_variable (basic_block cond_block,
546 edge e, gimple phi, tree new_tree)
548 basic_block bb = gimple_bb (phi);
549 basic_block block_to_remove;
550 gimple_stmt_iterator gsi;
552 /* Change the PHI argument to new. */
553 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
555 /* Remove the empty basic block. */
556 if (EDGE_SUCC (cond_block, 0)->dest == bb)
558 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
559 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
560 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
561 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
563 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
565 else
567 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
568 EDGE_SUCC (cond_block, 1)->flags
569 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
570 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
571 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
573 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
575 delete_basic_block (block_to_remove);
577 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
578 gsi = gsi_last_bb (cond_block);
579 gsi_remove (&gsi, true);
581 if (dump_file && (dump_flags & TDF_DETAILS))
582 fprintf (dump_file,
583 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
584 cond_block->index,
585 bb->index);
588 /* The function conditional_replacement does the main work of doing the
589 conditional replacement. Return true if the replacement is done.
590 Otherwise return false.
591 BB is the basic block where the replacement is going to be done on. ARG0
592 is argument 0 from PHI. Likewise for ARG1. */
594 static bool
595 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
596 edge e0, edge e1, gimple phi,
597 tree arg0, tree arg1)
599 tree result;
600 gimple stmt, new_stmt;
601 tree cond;
602 gimple_stmt_iterator gsi;
603 edge true_edge, false_edge;
604 tree new_var, new_var2;
605 bool neg;
607 /* FIXME: Gimplification of complex type is too hard for now. */
608 /* We aren't prepared to handle vectors either (and it is a question
609 if it would be worthwhile anyway). */
610 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
611 || POINTER_TYPE_P (TREE_TYPE (arg0)))
612 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
613 || POINTER_TYPE_P (TREE_TYPE (arg1))))
614 return false;
616 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
617 convert it to the conditional. */
618 if ((integer_zerop (arg0) && integer_onep (arg1))
619 || (integer_zerop (arg1) && integer_onep (arg0)))
620 neg = false;
621 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
622 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
623 neg = true;
624 else
625 return false;
627 if (!empty_block_p (middle_bb))
628 return false;
630 /* At this point we know we have a GIMPLE_COND with two successors.
631 One successor is BB, the other successor is an empty block which
632 falls through into BB.
634 There is a single PHI node at the join point (BB) and its arguments
635 are constants (0, 1) or (0, -1).
637 So, given the condition COND, and the two PHI arguments, we can
638 rewrite this PHI into non-branching code:
640 dest = (COND) or dest = COND'
642 We use the condition as-is if the argument associated with the
643 true edge has the value one or the argument associated with the
644 false edge as the value zero. Note that those conditions are not
645 the same since only one of the outgoing edges from the GIMPLE_COND
646 will directly reach BB and thus be associated with an argument. */
648 stmt = last_stmt (cond_bb);
649 result = PHI_RESULT (phi);
651 /* To handle special cases like floating point comparison, it is easier and
652 less error-prone to build a tree and gimplify it on the fly though it is
653 less efficient. */
654 cond = fold_build2_loc (gimple_location (stmt),
655 gimple_cond_code (stmt), boolean_type_node,
656 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
658 /* We need to know which is the true edge and which is the false
659 edge so that we know when to invert the condition below. */
660 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
661 if ((e0 == true_edge && integer_zerop (arg0))
662 || (e0 == false_edge && !integer_zerop (arg0))
663 || (e1 == true_edge && integer_zerop (arg1))
664 || (e1 == false_edge && !integer_zerop (arg1)))
665 cond = fold_build1_loc (gimple_location (stmt),
666 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
668 if (neg)
670 cond = fold_convert_loc (gimple_location (stmt),
671 TREE_TYPE (result), cond);
672 cond = fold_build1_loc (gimple_location (stmt),
673 NEGATE_EXPR, TREE_TYPE (cond), cond);
676 /* Insert our new statements at the end of conditional block before the
677 COND_STMT. */
678 gsi = gsi_for_stmt (stmt);
679 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
680 GSI_SAME_STMT);
682 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
684 source_location locus_0, locus_1;
686 new_var2 = make_ssa_name (TREE_TYPE (result), NULL);
687 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
688 new_var, NULL);
689 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
690 new_var = new_var2;
692 /* Set the locus to the first argument, unless is doesn't have one. */
693 locus_0 = gimple_phi_arg_location (phi, 0);
694 locus_1 = gimple_phi_arg_location (phi, 1);
695 if (locus_0 == UNKNOWN_LOCATION)
696 locus_0 = locus_1;
697 gimple_set_location (new_stmt, locus_0);
700 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
702 /* Note that we optimized this PHI. */
703 return true;
706 /* Update *ARG which is defined in STMT so that it contains the
707 computed value if that seems profitable. Return true if the
708 statement is made dead by that rewriting. */
710 static bool
711 jump_function_from_stmt (tree *arg, gimple stmt)
713 enum tree_code code = gimple_assign_rhs_code (stmt);
714 if (code == ADDR_EXPR)
716 /* For arg = &p->i transform it to p, if possible. */
717 tree rhs1 = gimple_assign_rhs1 (stmt);
718 HOST_WIDE_INT offset;
719 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
720 &offset);
721 if (tem
722 && TREE_CODE (tem) == MEM_REF
723 && double_int_zero_p
724 (double_int_add (mem_ref_offset (tem),
725 shwi_to_double_int (offset))))
727 *arg = TREE_OPERAND (tem, 0);
728 return true;
731 /* TODO: Much like IPA-CP jump-functions we want to handle constant
732 additions symbolically here, and we'd need to update the comparison
733 code that compares the arg + cst tuples in our caller. For now the
734 code above exactly handles the VEC_BASE pattern from vec.h. */
735 return false;
738 /* The function value_replacement does the main work of doing the value
739 replacement. Return non-zero if the replacement is done. Otherwise return
740 0. If we remove the middle basic block, return 2.
741 BB is the basic block where the replacement is going to be done on. ARG0
742 is argument 0 from the PHI. Likewise for ARG1. */
744 static int
745 value_replacement (basic_block cond_bb, basic_block middle_bb,
746 edge e0, edge e1, gimple phi,
747 tree arg0, tree arg1)
749 gimple_stmt_iterator gsi;
750 gimple cond;
751 edge true_edge, false_edge;
752 enum tree_code code;
753 bool emtpy_or_with_defined_p = true;
755 /* If the type says honor signed zeros we cannot do this
756 optimization. */
757 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
758 return 0;
760 /* If there is a statement in MIDDLE_BB that defines one of the PHI
761 arguments, then adjust arg0 or arg1. */
762 gsi = gsi_after_labels (middle_bb);
763 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
764 gsi_next_nondebug (&gsi);
765 while (!gsi_end_p (gsi))
767 gimple stmt = gsi_stmt (gsi);
768 tree lhs;
769 gsi_next_nondebug (&gsi);
770 if (!is_gimple_assign (stmt))
772 emtpy_or_with_defined_p = false;
773 continue;
775 /* Now try to adjust arg0 or arg1 according to the computation
776 in the statement. */
777 lhs = gimple_assign_lhs (stmt);
778 if (!(lhs == arg0
779 && jump_function_from_stmt (&arg0, stmt))
780 || (lhs == arg1
781 && jump_function_from_stmt (&arg1, stmt)))
782 emtpy_or_with_defined_p = false;
785 cond = last_stmt (cond_bb);
786 code = gimple_cond_code (cond);
788 /* This transformation is only valid for equality comparisons. */
789 if (code != NE_EXPR && code != EQ_EXPR)
790 return 0;
792 /* We need to know which is the true edge and which is the false
793 edge so that we know if have abs or negative abs. */
794 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
796 /* At this point we know we have a COND_EXPR with two successors.
797 One successor is BB, the other successor is an empty block which
798 falls through into BB.
800 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
802 There is a single PHI node at the join point (BB) with two arguments.
804 We now need to verify that the two arguments in the PHI node match
805 the two arguments to the equality comparison. */
807 if ((operand_equal_for_phi_arg_p (arg0, gimple_cond_lhs (cond))
808 && operand_equal_for_phi_arg_p (arg1, gimple_cond_rhs (cond)))
809 || (operand_equal_for_phi_arg_p (arg1, gimple_cond_lhs (cond))
810 && operand_equal_for_phi_arg_p (arg0, gimple_cond_rhs (cond))))
812 edge e;
813 tree arg;
815 /* For NE_EXPR, we want to build an assignment result = arg where
816 arg is the PHI argument associated with the true edge. For
817 EQ_EXPR we want the PHI argument associated with the false edge. */
818 e = (code == NE_EXPR ? true_edge : false_edge);
820 /* Unfortunately, E may not reach BB (it may instead have gone to
821 OTHER_BLOCK). If that is the case, then we want the single outgoing
822 edge from OTHER_BLOCK which reaches BB and represents the desired
823 path from COND_BLOCK. */
824 if (e->dest == middle_bb)
825 e = single_succ_edge (e->dest);
827 /* Now we know the incoming edge to BB that has the argument for the
828 RHS of our new assignment statement. */
829 if (e0 == e)
830 arg = arg0;
831 else
832 arg = arg1;
834 /* If the middle basic block was empty or is defining the
835 PHI arguments and this is a single phi where the args are different
836 for the edges e0 and e1 then we can remove the middle basic block. */
837 if (emtpy_or_with_defined_p
838 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
839 e0, e1))
841 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
842 /* Note that we optimized this PHI. */
843 return 2;
845 else
847 /* Replace the PHI arguments with arg. */
848 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
849 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
850 if (dump_file && (dump_flags & TDF_DETAILS))
852 fprintf (dump_file, "PHI ");
853 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
854 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
855 cond_bb->index);
856 print_generic_expr (dump_file, arg, 0);
857 fprintf (dump_file, ".\n");
859 return 1;
863 return 0;
866 /* The function minmax_replacement does the main work of doing the minmax
867 replacement. Return true if the replacement is done. Otherwise return
868 false.
869 BB is the basic block where the replacement is going to be done on. ARG0
870 is argument 0 from the PHI. Likewise for ARG1. */
872 static bool
873 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
874 edge e0, edge e1, gimple phi,
875 tree arg0, tree arg1)
877 tree result, type;
878 gimple cond, new_stmt;
879 edge true_edge, false_edge;
880 enum tree_code cmp, minmax, ass_code;
881 tree smaller, larger, arg_true, arg_false;
882 gimple_stmt_iterator gsi, gsi_from;
884 type = TREE_TYPE (PHI_RESULT (phi));
886 /* The optimization may be unsafe due to NaNs. */
887 if (HONOR_NANS (TYPE_MODE (type)))
888 return false;
890 cond = last_stmt (cond_bb);
891 cmp = gimple_cond_code (cond);
893 /* This transformation is only valid for order comparisons. Record which
894 operand is smaller/larger if the result of the comparison is true. */
895 if (cmp == LT_EXPR || cmp == LE_EXPR)
897 smaller = gimple_cond_lhs (cond);
898 larger = gimple_cond_rhs (cond);
900 else if (cmp == GT_EXPR || cmp == GE_EXPR)
902 smaller = gimple_cond_rhs (cond);
903 larger = gimple_cond_lhs (cond);
905 else
906 return false;
908 /* We need to know which is the true edge and which is the false
909 edge so that we know if have abs or negative abs. */
910 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
912 /* Forward the edges over the middle basic block. */
913 if (true_edge->dest == middle_bb)
914 true_edge = EDGE_SUCC (true_edge->dest, 0);
915 if (false_edge->dest == middle_bb)
916 false_edge = EDGE_SUCC (false_edge->dest, 0);
918 if (true_edge == e0)
920 gcc_assert (false_edge == e1);
921 arg_true = arg0;
922 arg_false = arg1;
924 else
926 gcc_assert (false_edge == e0);
927 gcc_assert (true_edge == e1);
928 arg_true = arg1;
929 arg_false = arg0;
932 if (empty_block_p (middle_bb))
934 if (operand_equal_for_phi_arg_p (arg_true, smaller)
935 && operand_equal_for_phi_arg_p (arg_false, larger))
937 /* Case
939 if (smaller < larger)
940 rslt = smaller;
941 else
942 rslt = larger; */
943 minmax = MIN_EXPR;
945 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
946 && operand_equal_for_phi_arg_p (arg_true, larger))
947 minmax = MAX_EXPR;
948 else
949 return false;
951 else
953 /* Recognize the following case, assuming d <= u:
955 if (a <= u)
956 b = MAX (a, d);
957 x = PHI <b, u>
959 This is equivalent to
961 b = MAX (a, d);
962 x = MIN (b, u); */
964 gimple assign = last_and_only_stmt (middle_bb);
965 tree lhs, op0, op1, bound;
967 if (!assign
968 || gimple_code (assign) != GIMPLE_ASSIGN)
969 return false;
971 lhs = gimple_assign_lhs (assign);
972 ass_code = gimple_assign_rhs_code (assign);
973 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
974 return false;
975 op0 = gimple_assign_rhs1 (assign);
976 op1 = gimple_assign_rhs2 (assign);
978 if (true_edge->src == middle_bb)
980 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
981 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
982 return false;
984 if (operand_equal_for_phi_arg_p (arg_false, larger))
986 /* Case
988 if (smaller < larger)
990 r' = MAX_EXPR (smaller, bound)
992 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
993 if (ass_code != MAX_EXPR)
994 return false;
996 minmax = MIN_EXPR;
997 if (operand_equal_for_phi_arg_p (op0, smaller))
998 bound = op1;
999 else if (operand_equal_for_phi_arg_p (op1, smaller))
1000 bound = op0;
1001 else
1002 return false;
1004 /* We need BOUND <= LARGER. */
1005 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1006 bound, larger)))
1007 return false;
1009 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1011 /* Case
1013 if (smaller < larger)
1015 r' = MIN_EXPR (larger, bound)
1017 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1018 if (ass_code != MIN_EXPR)
1019 return false;
1021 minmax = MAX_EXPR;
1022 if (operand_equal_for_phi_arg_p (op0, larger))
1023 bound = op1;
1024 else if (operand_equal_for_phi_arg_p (op1, larger))
1025 bound = op0;
1026 else
1027 return false;
1029 /* We need BOUND >= SMALLER. */
1030 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1031 bound, smaller)))
1032 return false;
1034 else
1035 return false;
1037 else
1039 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1040 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1041 return false;
1043 if (operand_equal_for_phi_arg_p (arg_true, larger))
1045 /* Case
1047 if (smaller > larger)
1049 r' = MIN_EXPR (smaller, bound)
1051 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1052 if (ass_code != MIN_EXPR)
1053 return false;
1055 minmax = MAX_EXPR;
1056 if (operand_equal_for_phi_arg_p (op0, smaller))
1057 bound = op1;
1058 else if (operand_equal_for_phi_arg_p (op1, smaller))
1059 bound = op0;
1060 else
1061 return false;
1063 /* We need BOUND >= LARGER. */
1064 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1065 bound, larger)))
1066 return false;
1068 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1070 /* Case
1072 if (smaller > larger)
1074 r' = MAX_EXPR (larger, bound)
1076 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1077 if (ass_code != MAX_EXPR)
1078 return false;
1080 minmax = MIN_EXPR;
1081 if (operand_equal_for_phi_arg_p (op0, larger))
1082 bound = op1;
1083 else if (operand_equal_for_phi_arg_p (op1, larger))
1084 bound = op0;
1085 else
1086 return false;
1088 /* We need BOUND <= SMALLER. */
1089 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1090 bound, smaller)))
1091 return false;
1093 else
1094 return false;
1097 /* Move the statement from the middle block. */
1098 gsi = gsi_last_bb (cond_bb);
1099 gsi_from = gsi_last_nondebug_bb (middle_bb);
1100 gsi_move_before (&gsi_from, &gsi);
1103 /* Emit the statement to compute min/max. */
1104 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1105 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
1106 gsi = gsi_last_bb (cond_bb);
1107 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1109 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1110 return true;
1113 /* The function absolute_replacement does the main work of doing the absolute
1114 replacement. Return true if the replacement is done. Otherwise return
1115 false.
1116 bb is the basic block where the replacement is going to be done on. arg0
1117 is argument 0 from the phi. Likewise for arg1. */
1119 static bool
1120 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1121 edge e0 ATTRIBUTE_UNUSED, edge e1,
1122 gimple phi, tree arg0, tree arg1)
1124 tree result;
1125 gimple new_stmt, cond;
1126 gimple_stmt_iterator gsi;
1127 edge true_edge, false_edge;
1128 gimple assign;
1129 edge e;
1130 tree rhs, lhs;
1131 bool negate;
1132 enum tree_code cond_code;
1134 /* If the type says honor signed zeros we cannot do this
1135 optimization. */
1136 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1137 return false;
1139 /* OTHER_BLOCK must have only one executable statement which must have the
1140 form arg0 = -arg1 or arg1 = -arg0. */
1142 assign = last_and_only_stmt (middle_bb);
1143 /* If we did not find the proper negation assignment, then we can not
1144 optimize. */
1145 if (assign == NULL)
1146 return false;
1148 /* If we got here, then we have found the only executable statement
1149 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1150 arg1 = -arg0, then we can not optimize. */
1151 if (gimple_code (assign) != GIMPLE_ASSIGN)
1152 return false;
1154 lhs = gimple_assign_lhs (assign);
1156 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1157 return false;
1159 rhs = gimple_assign_rhs1 (assign);
1161 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1162 if (!(lhs == arg0 && rhs == arg1)
1163 && !(lhs == arg1 && rhs == arg0))
1164 return false;
1166 cond = last_stmt (cond_bb);
1167 result = PHI_RESULT (phi);
1169 /* Only relationals comparing arg[01] against zero are interesting. */
1170 cond_code = gimple_cond_code (cond);
1171 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1172 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1173 return false;
1175 /* Make sure the conditional is arg[01] OP y. */
1176 if (gimple_cond_lhs (cond) != rhs)
1177 return false;
1179 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1180 ? real_zerop (gimple_cond_rhs (cond))
1181 : integer_zerop (gimple_cond_rhs (cond)))
1183 else
1184 return false;
1186 /* We need to know which is the true edge and which is the false
1187 edge so that we know if have abs or negative abs. */
1188 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1190 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1191 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1192 the false edge goes to OTHER_BLOCK. */
1193 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1194 e = true_edge;
1195 else
1196 e = false_edge;
1198 if (e->dest == middle_bb)
1199 negate = true;
1200 else
1201 negate = false;
1203 result = duplicate_ssa_name (result, NULL);
1205 if (negate)
1206 lhs = make_ssa_name (TREE_TYPE (result), NULL);
1207 else
1208 lhs = result;
1210 /* Build the modify expression with abs expression. */
1211 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1213 gsi = gsi_last_bb (cond_bb);
1214 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1216 if (negate)
1218 /* Get the right GSI. We want to insert after the recently
1219 added ABS_EXPR statement (which we know is the first statement
1220 in the block. */
1221 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1223 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1226 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1228 /* Note that we optimized this PHI. */
1229 return true;
1232 /* Auxiliary functions to determine the set of memory accesses which
1233 can't trap because they are preceded by accesses to the same memory
1234 portion. We do that for MEM_REFs, so we only need to track
1235 the SSA_NAME of the pointer indirectly referenced. The algorithm
1236 simply is a walk over all instructions in dominator order. When
1237 we see an MEM_REF we determine if we've already seen a same
1238 ref anywhere up to the root of the dominator tree. If we do the
1239 current access can't trap. If we don't see any dominating access
1240 the current access might trap, but might also make later accesses
1241 non-trapping, so we remember it. We need to be careful with loads
1242 or stores, for instance a load might not trap, while a store would,
1243 so if we see a dominating read access this doesn't mean that a later
1244 write access would not trap. Hence we also need to differentiate the
1245 type of access(es) seen.
1247 ??? We currently are very conservative and assume that a load might
1248 trap even if a store doesn't (write-only memory). This probably is
1249 overly conservative. */
1251 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1252 through it was seen, which would constitute a no-trap region for
1253 same accesses. */
1254 struct name_to_bb
1256 unsigned int ssa_name_ver;
1257 bool store;
1258 HOST_WIDE_INT offset, size;
1259 basic_block bb;
1262 /* The hash table for remembering what we've seen. */
1263 static htab_t seen_ssa_names;
1265 /* The set of MEM_REFs which can't trap. */
1266 static struct pointer_set_t *nontrap_set;
1268 /* The hash function. */
1269 static hashval_t
1270 name_to_bb_hash (const void *p)
1272 const struct name_to_bb *n = (const struct name_to_bb *) p;
1273 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1274 ^ (n->offset << 6) ^ (n->size << 3);
1277 /* The equality function of *P1 and *P2. */
1278 static int
1279 name_to_bb_eq (const void *p1, const void *p2)
1281 const struct name_to_bb *n1 = (const struct name_to_bb *)p1;
1282 const struct name_to_bb *n2 = (const struct name_to_bb *)p2;
1284 return n1->ssa_name_ver == n2->ssa_name_ver
1285 && n1->store == n2->store
1286 && n1->offset == n2->offset
1287 && n1->size == n2->size;
1290 /* We see the expression EXP in basic block BB. If it's an interesting
1291 expression (an MEM_REF through an SSA_NAME) possibly insert the
1292 expression into the set NONTRAP or the hash table of seen expressions.
1293 STORE is true if this expression is on the LHS, otherwise it's on
1294 the RHS. */
1295 static void
1296 add_or_mark_expr (basic_block bb, tree exp,
1297 struct pointer_set_t *nontrap, bool store)
1299 HOST_WIDE_INT size;
1301 if (TREE_CODE (exp) == MEM_REF
1302 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1303 && host_integerp (TREE_OPERAND (exp, 1), 0)
1304 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1306 tree name = TREE_OPERAND (exp, 0);
1307 struct name_to_bb map;
1308 void **slot;
1309 struct name_to_bb *n2bb;
1310 basic_block found_bb = 0;
1312 /* Try to find the last seen MEM_REF through the same
1313 SSA_NAME, which can trap. */
1314 map.ssa_name_ver = SSA_NAME_VERSION (name);
1315 map.bb = 0;
1316 map.store = store;
1317 map.offset = tree_low_cst (TREE_OPERAND (exp, 1), 0);
1318 map.size = size;
1320 slot = htab_find_slot (seen_ssa_names, &map, INSERT);
1321 n2bb = (struct name_to_bb *) *slot;
1322 if (n2bb)
1323 found_bb = n2bb->bb;
1325 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1326 (it's in a basic block on the path from us to the dominator root)
1327 then we can't trap. */
1328 if (found_bb && found_bb->aux == (void *)1)
1330 pointer_set_insert (nontrap, exp);
1332 else
1334 /* EXP might trap, so insert it into the hash table. */
1335 if (n2bb)
1337 n2bb->bb = bb;
1339 else
1341 n2bb = XNEW (struct name_to_bb);
1342 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1343 n2bb->bb = bb;
1344 n2bb->store = store;
1345 n2bb->offset = map.offset;
1346 n2bb->size = size;
1347 *slot = n2bb;
1353 /* Called by walk_dominator_tree, when entering the block BB. */
1354 static void
1355 nt_init_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
1357 gimple_stmt_iterator gsi;
1358 /* Mark this BB as being on the path to dominator root. */
1359 bb->aux = (void*)1;
1361 /* And walk the statements in order. */
1362 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1364 gimple stmt = gsi_stmt (gsi);
1366 if (gimple_assign_single_p (stmt))
1368 add_or_mark_expr (bb, gimple_assign_lhs (stmt), nontrap_set, true);
1369 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), nontrap_set, false);
1374 /* Called by walk_dominator_tree, when basic block BB is exited. */
1375 static void
1376 nt_fini_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
1378 /* This BB isn't on the path to dominator root anymore. */
1379 bb->aux = NULL;
1382 /* This is the entry point of gathering non trapping memory accesses.
1383 It will do a dominator walk over the whole function, and it will
1384 make use of the bb->aux pointers. It returns a set of trees
1385 (the MEM_REFs itself) which can't trap. */
1386 static struct pointer_set_t *
1387 get_non_trapping (void)
1389 struct pointer_set_t *nontrap;
1390 struct dom_walk_data walk_data;
1392 nontrap = pointer_set_create ();
1393 seen_ssa_names = htab_create (128, name_to_bb_hash, name_to_bb_eq,
1394 free);
1395 /* We're going to do a dominator walk, so ensure that we have
1396 dominance information. */
1397 calculate_dominance_info (CDI_DOMINATORS);
1399 /* Setup callbacks for the generic dominator tree walker. */
1400 nontrap_set = nontrap;
1401 walk_data.dom_direction = CDI_DOMINATORS;
1402 walk_data.initialize_block_local_data = NULL;
1403 walk_data.before_dom_children = nt_init_block;
1404 walk_data.after_dom_children = nt_fini_block;
1405 walk_data.global_data = NULL;
1406 walk_data.block_local_data_size = 0;
1408 init_walk_dominator_tree (&walk_data);
1409 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1410 fini_walk_dominator_tree (&walk_data);
1411 htab_delete (seen_ssa_names);
1413 return nontrap;
1416 /* Do the main work of conditional store replacement. We already know
1417 that the recognized pattern looks like so:
1419 split:
1420 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1421 MIDDLE_BB:
1422 something
1423 fallthrough (edge E0)
1424 JOIN_BB:
1425 some more
1427 We check that MIDDLE_BB contains only one store, that that store
1428 doesn't trap (not via NOTRAP, but via checking if an access to the same
1429 memory location dominates us) and that the store has a "simple" RHS. */
1431 static bool
1432 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1433 edge e0, edge e1, struct pointer_set_t *nontrap)
1435 gimple assign = last_and_only_stmt (middle_bb);
1436 tree lhs, rhs, name, name2;
1437 gimple newphi, new_stmt;
1438 gimple_stmt_iterator gsi;
1439 source_location locus;
1441 /* Check if middle_bb contains of only one store. */
1442 if (!assign
1443 || !gimple_assign_single_p (assign))
1444 return false;
1446 locus = gimple_location (assign);
1447 lhs = gimple_assign_lhs (assign);
1448 rhs = gimple_assign_rhs1 (assign);
1449 if (TREE_CODE (lhs) != MEM_REF
1450 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1451 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1452 return false;
1454 /* Prove that we can move the store down. We could also check
1455 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1456 whose value is not available readily, which we want to avoid. */
1457 if (!pointer_set_contains (nontrap, lhs))
1458 return false;
1460 /* Now we've checked the constraints, so do the transformation:
1461 1) Remove the single store. */
1462 gsi = gsi_for_stmt (assign);
1463 unlink_stmt_vdef (assign);
1464 gsi_remove (&gsi, true);
1465 release_defs (assign);
1467 /* 2) Insert a load from the memory of the store to the temporary
1468 on the edge which did not contain the store. */
1469 lhs = unshare_expr (lhs);
1470 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1471 new_stmt = gimple_build_assign (name, lhs);
1472 gimple_set_location (new_stmt, locus);
1473 gsi_insert_on_edge (e1, new_stmt);
1475 /* 3) Create a PHI node at the join block, with one argument
1476 holding the old RHS, and the other holding the temporary
1477 where we stored the old memory contents. */
1478 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1479 newphi = create_phi_node (name2, join_bb);
1480 add_phi_arg (newphi, rhs, e0, locus);
1481 add_phi_arg (newphi, name, e1, locus);
1483 lhs = unshare_expr (lhs);
1484 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1486 /* 4) Insert that PHI node. */
1487 gsi = gsi_after_labels (join_bb);
1488 if (gsi_end_p (gsi))
1490 gsi = gsi_last_bb (join_bb);
1491 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1493 else
1494 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1496 return true;
1499 /* Do the main work of conditional store replacement. */
1501 static bool
1502 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1503 basic_block join_bb, gimple then_assign,
1504 gimple else_assign)
1506 tree lhs_base, lhs, then_rhs, else_rhs, name;
1507 source_location then_locus, else_locus;
1508 gimple_stmt_iterator gsi;
1509 gimple newphi, new_stmt;
1511 if (then_assign == NULL
1512 || !gimple_assign_single_p (then_assign)
1513 || gimple_clobber_p (then_assign)
1514 || else_assign == NULL
1515 || !gimple_assign_single_p (else_assign)
1516 || gimple_clobber_p (else_assign))
1517 return false;
1519 lhs = gimple_assign_lhs (then_assign);
1520 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1521 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1522 return false;
1524 lhs_base = get_base_address (lhs);
1525 if (lhs_base == NULL_TREE
1526 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1527 return false;
1529 then_rhs = gimple_assign_rhs1 (then_assign);
1530 else_rhs = gimple_assign_rhs1 (else_assign);
1531 then_locus = gimple_location (then_assign);
1532 else_locus = gimple_location (else_assign);
1534 /* Now we've checked the constraints, so do the transformation:
1535 1) Remove the stores. */
1536 gsi = gsi_for_stmt (then_assign);
1537 unlink_stmt_vdef (then_assign);
1538 gsi_remove (&gsi, true);
1539 release_defs (then_assign);
1541 gsi = gsi_for_stmt (else_assign);
1542 unlink_stmt_vdef (else_assign);
1543 gsi_remove (&gsi, true);
1544 release_defs (else_assign);
1546 /* 2) Create a PHI node at the join block, with one argument
1547 holding the old RHS, and the other holding the temporary
1548 where we stored the old memory contents. */
1549 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1550 newphi = create_phi_node (name, join_bb);
1551 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1552 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1554 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1556 /* 3) Insert that PHI node. */
1557 gsi = gsi_after_labels (join_bb);
1558 if (gsi_end_p (gsi))
1560 gsi = gsi_last_bb (join_bb);
1561 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1563 else
1564 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1566 return true;
1569 /* Conditional store replacement. We already know
1570 that the recognized pattern looks like so:
1572 split:
1573 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1574 THEN_BB:
1576 X = Y;
1578 goto JOIN_BB;
1579 ELSE_BB:
1581 X = Z;
1583 fallthrough (edge E0)
1584 JOIN_BB:
1585 some more
1587 We check that it is safe to sink the store to JOIN_BB by verifying that
1588 there are no read-after-write or write-after-write dependencies in
1589 THEN_BB and ELSE_BB. */
1591 static bool
1592 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1593 basic_block join_bb)
1595 gimple then_assign = last_and_only_stmt (then_bb);
1596 gimple else_assign = last_and_only_stmt (else_bb);
1597 VEC (data_reference_p, heap) *then_datarefs, *else_datarefs;
1598 VEC (ddr_p, heap) *then_ddrs, *else_ddrs;
1599 gimple then_store, else_store;
1600 bool found, ok = false, res;
1601 struct data_dependence_relation *ddr;
1602 data_reference_p then_dr, else_dr;
1603 int i, j;
1604 tree then_lhs, else_lhs;
1605 VEC (gimple, heap) *then_stores, *else_stores;
1606 basic_block blocks[3];
1608 if (MAX_STORES_TO_SINK == 0)
1609 return false;
1611 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1612 if (then_assign && else_assign)
1613 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1614 then_assign, else_assign);
1616 /* Find data references. */
1617 then_datarefs = VEC_alloc (data_reference_p, heap, 1);
1618 else_datarefs = VEC_alloc (data_reference_p, heap, 1);
1619 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1620 == chrec_dont_know)
1621 || !VEC_length (data_reference_p, then_datarefs)
1622 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1623 == chrec_dont_know)
1624 || !VEC_length (data_reference_p, else_datarefs))
1626 free_data_refs (then_datarefs);
1627 free_data_refs (else_datarefs);
1628 return false;
1631 /* Find pairs of stores with equal LHS. */
1632 then_stores = VEC_alloc (gimple, heap, 1);
1633 else_stores = VEC_alloc (gimple, heap, 1);
1634 FOR_EACH_VEC_ELT (data_reference_p, then_datarefs, i, then_dr)
1636 if (DR_IS_READ (then_dr))
1637 continue;
1639 then_store = DR_STMT (then_dr);
1640 then_lhs = gimple_get_lhs (then_store);
1641 found = false;
1643 FOR_EACH_VEC_ELT (data_reference_p, else_datarefs, j, else_dr)
1645 if (DR_IS_READ (else_dr))
1646 continue;
1648 else_store = DR_STMT (else_dr);
1649 else_lhs = gimple_get_lhs (else_store);
1651 if (operand_equal_p (then_lhs, else_lhs, 0))
1653 found = true;
1654 break;
1658 if (!found)
1659 continue;
1661 VEC_safe_push (gimple, heap, then_stores, then_store);
1662 VEC_safe_push (gimple, heap, else_stores, else_store);
1665 /* No pairs of stores found. */
1666 if (!VEC_length (gimple, then_stores)
1667 || VEC_length (gimple, then_stores) > (unsigned) MAX_STORES_TO_SINK)
1669 free_data_refs (then_datarefs);
1670 free_data_refs (else_datarefs);
1671 VEC_free (gimple, heap, then_stores);
1672 VEC_free (gimple, heap, else_stores);
1673 return false;
1676 /* Compute and check data dependencies in both basic blocks. */
1677 then_ddrs = VEC_alloc (ddr_p, heap, 1);
1678 else_ddrs = VEC_alloc (ddr_p, heap, 1);
1679 if (!compute_all_dependences (then_datarefs, &then_ddrs, NULL, false)
1680 || !compute_all_dependences (else_datarefs, &else_ddrs, NULL, false))
1682 free_dependence_relations (then_ddrs);
1683 free_dependence_relations (else_ddrs);
1684 free_data_refs (then_datarefs);
1685 free_data_refs (else_datarefs);
1686 VEC_free (gimple, heap, then_stores);
1687 VEC_free (gimple, heap, else_stores);
1688 return false;
1690 blocks[0] = then_bb;
1691 blocks[1] = else_bb;
1692 blocks[2] = join_bb;
1693 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1695 /* Check that there are no read-after-write or write-after-write dependencies
1696 in THEN_BB. */
1697 FOR_EACH_VEC_ELT (ddr_p, then_ddrs, i, ddr)
1699 struct data_reference *dra = DDR_A (ddr);
1700 struct data_reference *drb = DDR_B (ddr);
1702 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1703 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1704 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1705 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1706 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1707 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1709 free_dependence_relations (then_ddrs);
1710 free_dependence_relations (else_ddrs);
1711 free_data_refs (then_datarefs);
1712 free_data_refs (else_datarefs);
1713 VEC_free (gimple, heap, then_stores);
1714 VEC_free (gimple, heap, else_stores);
1715 return false;
1719 /* Check that there are no read-after-write or write-after-write dependencies
1720 in ELSE_BB. */
1721 FOR_EACH_VEC_ELT (ddr_p, else_ddrs, i, ddr)
1723 struct data_reference *dra = DDR_A (ddr);
1724 struct data_reference *drb = DDR_B (ddr);
1726 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1727 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1728 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1729 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1730 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1731 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1733 free_dependence_relations (then_ddrs);
1734 free_dependence_relations (else_ddrs);
1735 free_data_refs (then_datarefs);
1736 free_data_refs (else_datarefs);
1737 VEC_free (gimple, heap, then_stores);
1738 VEC_free (gimple, heap, else_stores);
1739 return false;
1743 /* Sink stores with same LHS. */
1744 FOR_EACH_VEC_ELT (gimple, then_stores, i, then_store)
1746 else_store = VEC_index (gimple, else_stores, i);
1747 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1748 then_store, else_store);
1749 ok = ok || res;
1752 free_dependence_relations (then_ddrs);
1753 free_dependence_relations (else_ddrs);
1754 free_data_refs (then_datarefs);
1755 free_data_refs (else_datarefs);
1756 VEC_free (gimple, heap, then_stores);
1757 VEC_free (gimple, heap, else_stores);
1759 return ok;
1762 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1764 static bool
1765 local_mem_dependence (gimple stmt, basic_block bb)
1767 tree vuse = gimple_vuse (stmt);
1768 gimple def;
1770 if (!vuse)
1771 return false;
1773 def = SSA_NAME_DEF_STMT (vuse);
1774 return (def && gimple_bb (def) == bb);
1777 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1778 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1779 and BB3 rejoins control flow following BB1 and BB2, look for
1780 opportunities to hoist loads as follows. If BB3 contains a PHI of
1781 two loads, one each occurring in BB1 and BB2, and the loads are
1782 provably of adjacent fields in the same structure, then move both
1783 loads into BB0. Of course this can only be done if there are no
1784 dependencies preventing such motion.
1786 One of the hoisted loads will always be speculative, so the
1787 transformation is currently conservative:
1789 - The fields must be strictly adjacent.
1790 - The two fields must occupy a single memory block that is
1791 guaranteed to not cross a page boundary.
1793 The last is difficult to prove, as such memory blocks should be
1794 aligned on the minimum of the stack alignment boundary and the
1795 alignment guaranteed by heap allocation interfaces. Thus we rely
1796 on a parameter for the alignment value.
1798 Provided a good value is used for the last case, the first
1799 restriction could possibly be relaxed. */
1801 static void
1802 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
1803 basic_block bb2, basic_block bb3)
1805 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
1806 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
1807 gimple_stmt_iterator gsi;
1809 /* Walk the phis in bb3 looking for an opportunity. We are looking
1810 for phis of two SSA names, one each of which is defined in bb1 and
1811 bb2. */
1812 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
1814 gimple phi_stmt = gsi_stmt (gsi);
1815 gimple def1, def2, defswap;
1816 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
1817 tree tree_offset1, tree_offset2, tree_size2, next;
1818 int offset1, offset2, size2;
1819 unsigned align1;
1820 gimple_stmt_iterator gsi2;
1821 basic_block bb_for_def1, bb_for_def2;
1823 if (gimple_phi_num_args (phi_stmt) != 2
1824 || virtual_operand_p (gimple_phi_result (phi_stmt)))
1825 continue;
1827 arg1 = gimple_phi_arg_def (phi_stmt, 0);
1828 arg2 = gimple_phi_arg_def (phi_stmt, 1);
1830 if (TREE_CODE (arg1) != SSA_NAME
1831 || TREE_CODE (arg2) != SSA_NAME
1832 || SSA_NAME_IS_DEFAULT_DEF (arg1)
1833 || SSA_NAME_IS_DEFAULT_DEF (arg2))
1834 continue;
1836 def1 = SSA_NAME_DEF_STMT (arg1);
1837 def2 = SSA_NAME_DEF_STMT (arg2);
1839 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
1840 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
1841 continue;
1843 /* Check the mode of the arguments to be sure a conditional move
1844 can be generated for it. */
1845 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
1846 == CODE_FOR_nothing)
1847 continue;
1849 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
1850 if (!gimple_assign_single_p (def1)
1851 || !gimple_assign_single_p (def2))
1852 continue;
1854 ref1 = gimple_assign_rhs1 (def1);
1855 ref2 = gimple_assign_rhs1 (def2);
1857 if (TREE_CODE (ref1) != COMPONENT_REF
1858 || TREE_CODE (ref2) != COMPONENT_REF)
1859 continue;
1861 /* The zeroth operand of the two component references must be
1862 identical. It is not sufficient to compare get_base_address of
1863 the two references, because this could allow for different
1864 elements of the same array in the two trees. It is not safe to
1865 assume that the existence of one array element implies the
1866 existence of a different one. */
1867 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
1868 continue;
1870 field1 = TREE_OPERAND (ref1, 1);
1871 field2 = TREE_OPERAND (ref2, 1);
1873 /* Check for field adjacency, and ensure field1 comes first. */
1874 for (next = DECL_CHAIN (field1);
1875 next && TREE_CODE (next) != FIELD_DECL;
1876 next = DECL_CHAIN (next))
1879 if (next != field2)
1881 for (next = DECL_CHAIN (field2);
1882 next && TREE_CODE (next) != FIELD_DECL;
1883 next = DECL_CHAIN (next))
1886 if (next != field1)
1887 continue;
1889 fieldswap = field1;
1890 field1 = field2;
1891 field2 = fieldswap;
1892 defswap = def1;
1893 def1 = def2;
1894 def2 = defswap;
1897 bb_for_def1 = gimple_bb (def1);
1898 bb_for_def2 = gimple_bb (def2);
1900 /* Check for proper alignment of the first field. */
1901 tree_offset1 = bit_position (field1);
1902 tree_offset2 = bit_position (field2);
1903 tree_size2 = DECL_SIZE (field2);
1905 if (!host_integerp (tree_offset1, 1)
1906 || !host_integerp (tree_offset2, 1)
1907 || !host_integerp (tree_size2, 1))
1908 continue;
1910 offset1 = TREE_INT_CST_LOW (tree_offset1);
1911 offset2 = TREE_INT_CST_LOW (tree_offset2);
1912 size2 = TREE_INT_CST_LOW (tree_size2);
1913 align1 = DECL_ALIGN (field1) % param_align_bits;
1915 if (offset1 % BITS_PER_UNIT != 0)
1916 continue;
1918 /* For profitability, the two field references should fit within
1919 a single cache line. */
1920 if (align1 + offset2 - offset1 + size2 > param_align_bits)
1921 continue;
1923 /* The two expressions cannot be dependent upon vdefs defined
1924 in bb1/bb2. */
1925 if (local_mem_dependence (def1, bb_for_def1)
1926 || local_mem_dependence (def2, bb_for_def2))
1927 continue;
1929 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
1930 bb0. We hoist the first one first so that a cache miss is handled
1931 efficiently regardless of hardware cache-fill policy. */
1932 gsi2 = gsi_for_stmt (def1);
1933 gsi_move_to_bb_end (&gsi2, bb0);
1934 gsi2 = gsi_for_stmt (def2);
1935 gsi_move_to_bb_end (&gsi2, bb0);
1937 if (dump_file && (dump_flags & TDF_DETAILS))
1939 fprintf (dump_file,
1940 "\nHoisting adjacent loads from %d and %d into %d: \n",
1941 bb_for_def1->index, bb_for_def2->index, bb0->index);
1942 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
1943 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
1948 /* Determine whether we should attempt to hoist adjacent loads out of
1949 diamond patterns in pass_phiopt. Always hoist loads if
1950 -fhoist-adjacent-loads is specified and the target machine has
1951 both a conditional move instruction and a defined cache line size. */
1953 static bool
1954 gate_hoist_loads (void)
1956 return (flag_hoist_adjacent_loads == 1
1957 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
1958 && HAVE_conditional_move);
1961 /* Always do these optimizations if we have SSA
1962 trees to work on. */
1963 static bool
1964 gate_phiopt (void)
1966 return 1;
1969 struct gimple_opt_pass pass_phiopt =
1972 GIMPLE_PASS,
1973 "phiopt", /* name */
1974 gate_phiopt, /* gate */
1975 tree_ssa_phiopt, /* execute */
1976 NULL, /* sub */
1977 NULL, /* next */
1978 0, /* static_pass_number */
1979 TV_TREE_PHIOPT, /* tv_id */
1980 PROP_cfg | PROP_ssa, /* properties_required */
1981 0, /* properties_provided */
1982 0, /* properties_destroyed */
1983 0, /* todo_flags_start */
1984 TODO_ggc_collect
1985 | TODO_verify_ssa
1986 | TODO_verify_flow
1987 | TODO_verify_stmts /* todo_flags_finish */
1991 static bool
1992 gate_cselim (void)
1994 return flag_tree_cselim;
1997 struct gimple_opt_pass pass_cselim =
2000 GIMPLE_PASS,
2001 "cselim", /* name */
2002 gate_cselim, /* gate */
2003 tree_ssa_cs_elim, /* execute */
2004 NULL, /* sub */
2005 NULL, /* next */
2006 0, /* static_pass_number */
2007 TV_TREE_PHIOPT, /* tv_id */
2008 PROP_cfg | PROP_ssa, /* properties_required */
2009 0, /* properties_provided */
2010 0, /* properties_destroyed */
2011 0, /* todo_flags_start */
2012 TODO_ggc_collect
2013 | TODO_verify_ssa
2014 | TODO_verify_flow
2015 | TODO_verify_stmts /* todo_flags_finish */