Fixups after merge
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blob191845e0cdfd4b69f6bf60e9cfcd8e88580b6baa
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "stor-layout.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "predict.h"
30 #include "vec.h"
31 #include "hashtab.h"
32 #include "hash-set.h"
33 #include "machmode.h"
34 #include "hard-reg-set.h"
35 #include "input.h"
36 #include "function.h"
37 #include "dominance.h"
38 #include "cfg.h"
39 #include "cfganal.h"
40 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimplify.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-ssa.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "expr.h"
56 #include "tree-dfa.h"
57 #include "tree-pass.h"
58 #include "langhooks.h"
59 #include "domwalk.h"
60 #include "cfgloop.h"
61 #include "tree-data-ref.h"
62 #include "gimple-pretty-print.h"
63 #include "insn-config.h"
64 #include "expr.h"
65 #include "insn-codes.h"
66 #include "optabs.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-inline.h"
70 #ifndef HAVE_conditional_move
71 #define HAVE_conditional_move (0)
72 #endif
74 static unsigned int tree_ssa_phiopt_worker (bool, bool);
75 static bool conditional_replacement (basic_block, basic_block,
76 edge, edge, gphi *, tree, tree);
77 static int value_replacement (basic_block, basic_block,
78 edge, edge, gimple, tree, tree);
79 static bool minmax_replacement (basic_block, basic_block,
80 edge, edge, gimple, tree, tree);
81 static bool abs_replacement (basic_block, basic_block,
82 edge, edge, gimple, tree, tree);
83 static bool neg_replacement (basic_block, basic_block,
84 edge, edge, gimple, tree, tree);
85 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
86 hash_set<tree> *);
87 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
88 static hash_set<tree> * get_non_trapping ();
89 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
90 static void hoist_adjacent_loads (basic_block, basic_block,
91 basic_block, basic_block);
92 static bool gate_hoist_loads (void);
94 /* This pass tries to transform conditional stores into unconditional
95 ones, enabling further simplifications with the simpler then and else
96 blocks. In particular it replaces this:
98 bb0:
99 if (cond) goto bb2; else goto bb1;
100 bb1:
101 *p = RHS;
102 bb2:
104 with
106 bb0:
107 if (cond) goto bb1; else goto bb2;
108 bb1:
109 condtmp' = *p;
110 bb2:
111 condtmp = PHI <RHS, condtmp'>
112 *p = condtmp;
114 This transformation can only be done under several constraints,
115 documented below. It also replaces:
117 bb0:
118 if (cond) goto bb2; else goto bb1;
119 bb1:
120 *p = RHS1;
121 goto bb3;
122 bb2:
123 *p = RHS2;
124 bb3:
126 with
128 bb0:
129 if (cond) goto bb3; else goto bb1;
130 bb1:
131 bb3:
132 condtmp = PHI <RHS1, RHS2>
133 *p = condtmp; */
135 static unsigned int
136 tree_ssa_cs_elim (void)
138 unsigned todo;
139 /* ??? We are not interested in loop related info, but the following
140 will create it, ICEing as we didn't init loops with pre-headers.
141 An interfacing issue of find_data_references_in_bb. */
142 loop_optimizer_init (LOOPS_NORMAL);
143 scev_initialize ();
144 todo = tree_ssa_phiopt_worker (true, false);
145 scev_finalize ();
146 loop_optimizer_finalize ();
147 return todo;
150 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
152 static gphi *
153 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
155 gimple_stmt_iterator i;
156 gphi *phi = NULL;
157 if (gimple_seq_singleton_p (seq))
158 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
159 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
161 gphi *p = as_a <gphi *> (gsi_stmt (i));
162 /* If the PHI arguments are equal then we can skip this PHI. */
163 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
164 gimple_phi_arg_def (p, e1->dest_idx)))
165 continue;
167 /* If we already have a PHI that has the two edge arguments are
168 different, then return it is not a singleton for these PHIs. */
169 if (phi)
170 return NULL;
172 phi = p;
174 return phi;
177 /* The core routine of conditional store replacement and normal
178 phi optimizations. Both share much of the infrastructure in how
179 to match applicable basic block patterns. DO_STORE_ELIM is true
180 when we want to do conditional store replacement, false otherwise.
181 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
182 of diamond control flow patterns, false otherwise. */
183 static unsigned int
184 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
186 basic_block bb;
187 basic_block *bb_order;
188 unsigned n, i;
189 bool cfgchanged = false;
190 hash_set<tree> *nontrap = 0;
192 if (do_store_elim)
193 /* Calculate the set of non-trapping memory accesses. */
194 nontrap = get_non_trapping ();
196 /* The replacement of conditional negation with a non-branching
197 sequence is really only a win when optimizing for speed and we
198 can avoid transformations by gimple if-conversion that result
199 in poor RTL generation.
201 Ideally either gimple if-conversion or the RTL expanders will
202 be improved and the code to emit branchless conditional negation
203 can be removed. */
204 bool replace_conditional_negation = false;
205 if (!do_store_elim)
206 replace_conditional_negation
207 = ((!optimize_size && optimize >= 2)
208 || (((flag_tree_loop_vectorize || cfun->has_force_vectorize_loops)
209 && flag_tree_loop_if_convert != 0)
210 || flag_tree_loop_if_convert == 1
211 || flag_tree_loop_if_convert_stores == 1));
213 /* Search every basic block for COND_EXPR we may be able to optimize.
215 We walk the blocks in order that guarantees that a block with
216 a single predecessor is processed before the predecessor.
217 This ensures that we collapse inner ifs before visiting the
218 outer ones, and also that we do not try to visit a removed
219 block. */
220 bb_order = single_pred_before_succ_order ();
221 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
223 for (i = 0; i < n; i++)
225 gimple cond_stmt;
226 gphi *phi;
227 basic_block bb1, bb2;
228 edge e1, e2;
229 tree arg0, arg1;
231 bb = bb_order[i];
233 cond_stmt = last_stmt (bb);
234 /* Check to see if the last statement is a GIMPLE_COND. */
235 if (!cond_stmt
236 || gimple_code (cond_stmt) != GIMPLE_COND)
237 continue;
239 e1 = EDGE_SUCC (bb, 0);
240 bb1 = e1->dest;
241 e2 = EDGE_SUCC (bb, 1);
242 bb2 = e2->dest;
244 /* We cannot do the optimization on abnormal edges. */
245 if ((e1->flags & EDGE_ABNORMAL) != 0
246 || (e2->flags & EDGE_ABNORMAL) != 0)
247 continue;
249 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
250 if (EDGE_COUNT (bb1->succs) == 0
251 || bb2 == NULL
252 || EDGE_COUNT (bb2->succs) == 0)
253 continue;
255 /* Find the bb which is the fall through to the other. */
256 if (EDGE_SUCC (bb1, 0)->dest == bb2)
258 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
260 basic_block bb_tmp = bb1;
261 edge e_tmp = e1;
262 bb1 = bb2;
263 bb2 = bb_tmp;
264 e1 = e2;
265 e2 = e_tmp;
267 else if (do_store_elim
268 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
270 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
272 if (!single_succ_p (bb1)
273 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
274 || !single_succ_p (bb2)
275 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
276 || EDGE_COUNT (bb3->preds) != 2)
277 continue;
278 if (cond_if_else_store_replacement (bb1, bb2, bb3))
279 cfgchanged = true;
280 continue;
282 else if (do_hoist_loads
283 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
285 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
287 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
288 && single_succ_p (bb1)
289 && single_succ_p (bb2)
290 && single_pred_p (bb1)
291 && single_pred_p (bb2)
292 && EDGE_COUNT (bb->succs) == 2
293 && EDGE_COUNT (bb3->preds) == 2
294 /* If one edge or the other is dominant, a conditional move
295 is likely to perform worse than the well-predicted branch. */
296 && !predictable_edge_p (EDGE_SUCC (bb, 0))
297 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
298 hoist_adjacent_loads (bb, bb1, bb2, bb3);
299 continue;
301 else
302 continue;
304 e1 = EDGE_SUCC (bb1, 0);
306 /* Make sure that bb1 is just a fall through. */
307 if (!single_succ_p (bb1)
308 || (e1->flags & EDGE_FALLTHRU) == 0)
309 continue;
311 /* Also make sure that bb1 only have one predecessor and that it
312 is bb. */
313 if (!single_pred_p (bb1)
314 || single_pred (bb1) != bb)
315 continue;
317 if (do_store_elim)
319 /* bb1 is the middle block, bb2 the join block, bb the split block,
320 e1 the fallthrough edge from bb1 to bb2. We can't do the
321 optimization if the join block has more than two predecessors. */
322 if (EDGE_COUNT (bb2->preds) > 2)
323 continue;
324 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
325 cfgchanged = true;
327 else
329 gimple_seq phis = phi_nodes (bb2);
330 gimple_stmt_iterator gsi;
331 bool candorest = true;
333 /* Value replacement can work with more than one PHI
334 so try that first. */
335 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
337 phi = as_a <gphi *> (gsi_stmt (gsi));
338 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
339 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
340 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
342 candorest = false;
343 cfgchanged = true;
344 break;
348 if (!candorest)
349 continue;
351 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
352 if (!phi)
353 continue;
355 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
356 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
358 /* Something is wrong if we cannot find the arguments in the PHI
359 node. */
360 gcc_assert (arg0 != NULL && arg1 != NULL);
362 /* Do the replacement of conditional if it can be done. */
363 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
364 cfgchanged = true;
365 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
366 cfgchanged = true;
367 else if (replace_conditional_negation
368 && neg_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
369 cfgchanged = true;
370 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
371 cfgchanged = true;
375 free (bb_order);
377 if (do_store_elim)
378 delete nontrap;
379 /* If the CFG has changed, we should cleanup the CFG. */
380 if (cfgchanged && do_store_elim)
382 /* In cond-store replacement we have added some loads on edges
383 and new VOPS (as we moved the store, and created a load). */
384 gsi_commit_edge_inserts ();
385 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
387 else if (cfgchanged)
388 return TODO_cleanup_cfg;
389 return 0;
392 /* Replace PHI node element whose edge is E in block BB with variable NEW.
393 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
394 is known to have two edges, one of which must reach BB). */
396 static void
397 replace_phi_edge_with_variable (basic_block cond_block,
398 edge e, gimple phi, tree new_tree)
400 basic_block bb = gimple_bb (phi);
401 basic_block block_to_remove;
402 gimple_stmt_iterator gsi;
404 /* Change the PHI argument to new. */
405 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
407 /* Remove the empty basic block. */
408 if (EDGE_SUCC (cond_block, 0)->dest == bb)
410 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
411 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
412 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
413 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
415 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
417 else
419 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
420 EDGE_SUCC (cond_block, 1)->flags
421 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
422 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
423 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
425 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
427 delete_basic_block (block_to_remove);
429 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
430 gsi = gsi_last_bb (cond_block);
431 gsi_remove (&gsi, true);
433 if (dump_file && (dump_flags & TDF_DETAILS))
434 fprintf (dump_file,
435 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
436 cond_block->index,
437 bb->index);
440 /* The function conditional_replacement does the main work of doing the
441 conditional replacement. Return true if the replacement is done.
442 Otherwise return false.
443 BB is the basic block where the replacement is going to be done on. ARG0
444 is argument 0 from PHI. Likewise for ARG1. */
446 static bool
447 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
448 edge e0, edge e1, gphi *phi,
449 tree arg0, tree arg1)
451 tree result;
452 gimple stmt;
453 gassign *new_stmt;
454 tree cond;
455 gimple_stmt_iterator gsi;
456 edge true_edge, false_edge;
457 tree new_var, new_var2;
458 bool neg;
460 /* FIXME: Gimplification of complex type is too hard for now. */
461 /* We aren't prepared to handle vectors either (and it is a question
462 if it would be worthwhile anyway). */
463 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
464 || POINTER_TYPE_P (TREE_TYPE (arg0)))
465 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
466 || POINTER_TYPE_P (TREE_TYPE (arg1))))
467 return false;
469 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
470 convert it to the conditional. */
471 if ((integer_zerop (arg0) && integer_onep (arg1))
472 || (integer_zerop (arg1) && integer_onep (arg0)))
473 neg = false;
474 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
475 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
476 neg = true;
477 else
478 return false;
480 if (!empty_block_p (middle_bb))
481 return false;
483 /* At this point we know we have a GIMPLE_COND with two successors.
484 One successor is BB, the other successor is an empty block which
485 falls through into BB.
487 There is a single PHI node at the join point (BB) and its arguments
488 are constants (0, 1) or (0, -1).
490 So, given the condition COND, and the two PHI arguments, we can
491 rewrite this PHI into non-branching code:
493 dest = (COND) or dest = COND'
495 We use the condition as-is if the argument associated with the
496 true edge has the value one or the argument associated with the
497 false edge as the value zero. Note that those conditions are not
498 the same since only one of the outgoing edges from the GIMPLE_COND
499 will directly reach BB and thus be associated with an argument. */
501 stmt = last_stmt (cond_bb);
502 result = PHI_RESULT (phi);
504 /* To handle special cases like floating point comparison, it is easier and
505 less error-prone to build a tree and gimplify it on the fly though it is
506 less efficient. */
507 cond = fold_build2_loc (gimple_location (stmt),
508 gimple_cond_code (stmt), boolean_type_node,
509 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
511 /* We need to know which is the true edge and which is the false
512 edge so that we know when to invert the condition below. */
513 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
514 if ((e0 == true_edge && integer_zerop (arg0))
515 || (e0 == false_edge && !integer_zerop (arg0))
516 || (e1 == true_edge && integer_zerop (arg1))
517 || (e1 == false_edge && !integer_zerop (arg1)))
518 cond = fold_build1_loc (gimple_location (stmt),
519 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
521 if (neg)
523 cond = fold_convert_loc (gimple_location (stmt),
524 TREE_TYPE (result), cond);
525 cond = fold_build1_loc (gimple_location (stmt),
526 NEGATE_EXPR, TREE_TYPE (cond), cond);
529 /* Insert our new statements at the end of conditional block before the
530 COND_STMT. */
531 gsi = gsi_for_stmt (stmt);
532 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
533 GSI_SAME_STMT);
535 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
537 source_location locus_0, locus_1;
539 new_var2 = make_ssa_name (TREE_TYPE (result), NULL);
540 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
541 new_var, NULL);
542 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
543 new_var = new_var2;
545 /* Set the locus to the first argument, unless is doesn't have one. */
546 locus_0 = gimple_phi_arg_location (phi, 0);
547 locus_1 = gimple_phi_arg_location (phi, 1);
548 if (locus_0 == UNKNOWN_LOCATION)
549 locus_0 = locus_1;
550 gimple_set_location (new_stmt, locus_0);
553 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
555 /* Note that we optimized this PHI. */
556 return true;
559 /* Update *ARG which is defined in STMT so that it contains the
560 computed value if that seems profitable. Return true if the
561 statement is made dead by that rewriting. */
563 static bool
564 jump_function_from_stmt (tree *arg, gimple stmt)
566 enum tree_code code = gimple_assign_rhs_code (stmt);
567 if (code == ADDR_EXPR)
569 /* For arg = &p->i transform it to p, if possible. */
570 tree rhs1 = gimple_assign_rhs1 (stmt);
571 HOST_WIDE_INT offset;
572 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
573 &offset);
574 if (tem
575 && TREE_CODE (tem) == MEM_REF
576 && (mem_ref_offset (tem) + offset) == 0)
578 *arg = TREE_OPERAND (tem, 0);
579 return true;
582 /* TODO: Much like IPA-CP jump-functions we want to handle constant
583 additions symbolically here, and we'd need to update the comparison
584 code that compares the arg + cst tuples in our caller. For now the
585 code above exactly handles the VEC_BASE pattern from vec.h. */
586 return false;
589 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
590 of the form SSA_NAME NE 0.
592 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
593 the two input values of the EQ_EXPR match arg0 and arg1.
595 If so update *code and return TRUE. Otherwise return FALSE. */
597 static bool
598 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
599 enum tree_code *code, const_tree rhs)
601 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
602 statement. */
603 if (TREE_CODE (rhs) == SSA_NAME)
605 gimple def1 = SSA_NAME_DEF_STMT (rhs);
607 /* Verify the defining statement has an EQ_EXPR on the RHS. */
608 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
610 /* Finally verify the source operands of the EQ_EXPR are equal
611 to arg0 and arg1. */
612 tree op0 = gimple_assign_rhs1 (def1);
613 tree op1 = gimple_assign_rhs2 (def1);
614 if ((operand_equal_for_phi_arg_p (arg0, op0)
615 && operand_equal_for_phi_arg_p (arg1, op1))
616 || (operand_equal_for_phi_arg_p (arg0, op1)
617 && operand_equal_for_phi_arg_p (arg1, op0)))
619 /* We will perform the optimization. */
620 *code = gimple_assign_rhs_code (def1);
621 return true;
625 return false;
628 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
630 Also return TRUE if arg0/arg1 are equal to the source arguments of a
631 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
633 Return FALSE otherwise. */
635 static bool
636 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
637 enum tree_code *code, gimple cond)
639 gimple def;
640 tree lhs = gimple_cond_lhs (cond);
641 tree rhs = gimple_cond_rhs (cond);
643 if ((operand_equal_for_phi_arg_p (arg0, lhs)
644 && operand_equal_for_phi_arg_p (arg1, rhs))
645 || (operand_equal_for_phi_arg_p (arg1, lhs)
646 && operand_equal_for_phi_arg_p (arg0, rhs)))
647 return true;
649 /* Now handle more complex case where we have an EQ comparison
650 which feeds a BIT_AND_EXPR which feeds COND.
652 First verify that COND is of the form SSA_NAME NE 0. */
653 if (*code != NE_EXPR || !integer_zerop (rhs)
654 || TREE_CODE (lhs) != SSA_NAME)
655 return false;
657 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
658 def = SSA_NAME_DEF_STMT (lhs);
659 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
660 return false;
662 /* Now verify arg0/arg1 correspond to the source arguments of an
663 EQ comparison feeding the BIT_AND_EXPR. */
665 tree tmp = gimple_assign_rhs1 (def);
666 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
667 return true;
669 tmp = gimple_assign_rhs2 (def);
670 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
671 return true;
673 return false;
676 /* Returns true if ARG is a neutral element for operation CODE
677 on the RIGHT side. */
679 static bool
680 neutral_element_p (tree_code code, tree arg, bool right)
682 switch (code)
684 case PLUS_EXPR:
685 case BIT_IOR_EXPR:
686 case BIT_XOR_EXPR:
687 return integer_zerop (arg);
689 case LROTATE_EXPR:
690 case RROTATE_EXPR:
691 case LSHIFT_EXPR:
692 case RSHIFT_EXPR:
693 case MINUS_EXPR:
694 case POINTER_PLUS_EXPR:
695 return right && integer_zerop (arg);
697 case MULT_EXPR:
698 return integer_onep (arg);
700 case TRUNC_DIV_EXPR:
701 case CEIL_DIV_EXPR:
702 case FLOOR_DIV_EXPR:
703 case ROUND_DIV_EXPR:
704 case EXACT_DIV_EXPR:
705 return right && integer_onep (arg);
707 case BIT_AND_EXPR:
708 return integer_all_onesp (arg);
710 default:
711 return false;
715 /* Returns true if ARG is an absorbing element for operation CODE. */
717 static bool
718 absorbing_element_p (tree_code code, tree arg)
720 switch (code)
722 case BIT_IOR_EXPR:
723 return integer_all_onesp (arg);
725 case MULT_EXPR:
726 case BIT_AND_EXPR:
727 return integer_zerop (arg);
729 default:
730 return false;
734 /* The function value_replacement does the main work of doing the value
735 replacement. Return non-zero if the replacement is done. Otherwise return
736 0. If we remove the middle basic block, return 2.
737 BB is the basic block where the replacement is going to be done on. ARG0
738 is argument 0 from the PHI. Likewise for ARG1. */
740 static int
741 value_replacement (basic_block cond_bb, basic_block middle_bb,
742 edge e0, edge e1, gimple phi,
743 tree arg0, tree arg1)
745 gimple_stmt_iterator gsi;
746 gimple cond;
747 edge true_edge, false_edge;
748 enum tree_code code;
749 bool emtpy_or_with_defined_p = true;
751 /* If the type says honor signed zeros we cannot do this
752 optimization. */
753 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
754 return 0;
756 /* If there is a statement in MIDDLE_BB that defines one of the PHI
757 arguments, then adjust arg0 or arg1. */
758 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
759 while (!gsi_end_p (gsi))
761 gimple stmt = gsi_stmt (gsi);
762 tree lhs;
763 gsi_next_nondebug (&gsi);
764 if (!is_gimple_assign (stmt))
766 emtpy_or_with_defined_p = false;
767 continue;
769 /* Now try to adjust arg0 or arg1 according to the computation
770 in the statement. */
771 lhs = gimple_assign_lhs (stmt);
772 if (!(lhs == arg0
773 && jump_function_from_stmt (&arg0, stmt))
774 || (lhs == arg1
775 && jump_function_from_stmt (&arg1, stmt)))
776 emtpy_or_with_defined_p = false;
779 cond = last_stmt (cond_bb);
780 code = gimple_cond_code (cond);
782 /* This transformation is only valid for equality comparisons. */
783 if (code != NE_EXPR && code != EQ_EXPR)
784 return 0;
786 /* We need to know which is the true edge and which is the false
787 edge so that we know if have abs or negative abs. */
788 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
790 /* At this point we know we have a COND_EXPR with two successors.
791 One successor is BB, the other successor is an empty block which
792 falls through into BB.
794 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
796 There is a single PHI node at the join point (BB) with two arguments.
798 We now need to verify that the two arguments in the PHI node match
799 the two arguments to the equality comparison. */
801 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
803 edge e;
804 tree arg;
806 /* For NE_EXPR, we want to build an assignment result = arg where
807 arg is the PHI argument associated with the true edge. For
808 EQ_EXPR we want the PHI argument associated with the false edge. */
809 e = (code == NE_EXPR ? true_edge : false_edge);
811 /* Unfortunately, E may not reach BB (it may instead have gone to
812 OTHER_BLOCK). If that is the case, then we want the single outgoing
813 edge from OTHER_BLOCK which reaches BB and represents the desired
814 path from COND_BLOCK. */
815 if (e->dest == middle_bb)
816 e = single_succ_edge (e->dest);
818 /* Now we know the incoming edge to BB that has the argument for the
819 RHS of our new assignment statement. */
820 if (e0 == e)
821 arg = arg0;
822 else
823 arg = arg1;
825 /* If the middle basic block was empty or is defining the
826 PHI arguments and this is a single phi where the args are different
827 for the edges e0 and e1 then we can remove the middle basic block. */
828 if (emtpy_or_with_defined_p
829 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
830 e0, e1) == phi)
832 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
833 /* Note that we optimized this PHI. */
834 return 2;
836 else
838 /* Replace the PHI arguments with arg. */
839 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
840 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
841 if (dump_file && (dump_flags & TDF_DETAILS))
843 fprintf (dump_file, "PHI ");
844 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
845 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
846 cond_bb->index);
847 print_generic_expr (dump_file, arg, 0);
848 fprintf (dump_file, ".\n");
850 return 1;
855 /* Now optimize (x != 0) ? x + y : y to just y.
856 The following condition is too restrictive, there can easily be another
857 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
858 gimple assign = last_and_only_stmt (middle_bb);
859 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
860 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
861 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
862 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
863 return 0;
865 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
866 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
867 return 0;
869 /* Only transform if it removes the condition. */
870 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
871 return 0;
873 /* Size-wise, this is always profitable. */
874 if (optimize_bb_for_speed_p (cond_bb)
875 /* The special case is useless if it has a low probability. */
876 && profile_status_for_fn (cfun) != PROFILE_ABSENT
877 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
878 /* If assign is cheap, there is no point avoiding it. */
879 && estimate_num_insns (assign, &eni_time_weights)
880 >= 3 * estimate_num_insns (cond, &eni_time_weights))
881 return 0;
883 tree lhs = gimple_assign_lhs (assign);
884 tree rhs1 = gimple_assign_rhs1 (assign);
885 tree rhs2 = gimple_assign_rhs2 (assign);
886 enum tree_code code_def = gimple_assign_rhs_code (assign);
887 tree cond_lhs = gimple_cond_lhs (cond);
888 tree cond_rhs = gimple_cond_rhs (cond);
890 if (((code == NE_EXPR && e1 == false_edge)
891 || (code == EQ_EXPR && e1 == true_edge))
892 && arg0 == lhs
893 && ((arg1 == rhs1
894 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
895 && neutral_element_p (code_def, cond_rhs, true))
896 || (arg1 == rhs2
897 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
898 && neutral_element_p (code_def, cond_rhs, false))
899 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
900 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
901 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
902 && absorbing_element_p (code_def, cond_rhs))))
904 gsi = gsi_for_stmt (cond);
905 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
906 gsi_move_before (&gsi_from, &gsi);
907 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
908 return 2;
911 return 0;
914 /* The function minmax_replacement does the main work of doing the minmax
915 replacement. Return true if the replacement is done. Otherwise return
916 false.
917 BB is the basic block where the replacement is going to be done on. ARG0
918 is argument 0 from the PHI. Likewise for ARG1. */
920 static bool
921 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
922 edge e0, edge e1, gimple phi,
923 tree arg0, tree arg1)
925 tree result, type;
926 gcond *cond;
927 gassign *new_stmt;
928 edge true_edge, false_edge;
929 enum tree_code cmp, minmax, ass_code;
930 tree smaller, larger, arg_true, arg_false;
931 gimple_stmt_iterator gsi, gsi_from;
933 type = TREE_TYPE (PHI_RESULT (phi));
935 /* The optimization may be unsafe due to NaNs. */
936 if (HONOR_NANS (TYPE_MODE (type)))
937 return false;
939 cond = as_a <gcond *> (last_stmt (cond_bb));
940 cmp = gimple_cond_code (cond);
942 /* This transformation is only valid for order comparisons. Record which
943 operand is smaller/larger if the result of the comparison is true. */
944 if (cmp == LT_EXPR || cmp == LE_EXPR)
946 smaller = gimple_cond_lhs (cond);
947 larger = gimple_cond_rhs (cond);
949 else if (cmp == GT_EXPR || cmp == GE_EXPR)
951 smaller = gimple_cond_rhs (cond);
952 larger = gimple_cond_lhs (cond);
954 else
955 return false;
957 /* We need to know which is the true edge and which is the false
958 edge so that we know if have abs or negative abs. */
959 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
961 /* Forward the edges over the middle basic block. */
962 if (true_edge->dest == middle_bb)
963 true_edge = EDGE_SUCC (true_edge->dest, 0);
964 if (false_edge->dest == middle_bb)
965 false_edge = EDGE_SUCC (false_edge->dest, 0);
967 if (true_edge == e0)
969 gcc_assert (false_edge == e1);
970 arg_true = arg0;
971 arg_false = arg1;
973 else
975 gcc_assert (false_edge == e0);
976 gcc_assert (true_edge == e1);
977 arg_true = arg1;
978 arg_false = arg0;
981 if (empty_block_p (middle_bb))
983 if (operand_equal_for_phi_arg_p (arg_true, smaller)
984 && operand_equal_for_phi_arg_p (arg_false, larger))
986 /* Case
988 if (smaller < larger)
989 rslt = smaller;
990 else
991 rslt = larger; */
992 minmax = MIN_EXPR;
994 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
995 && operand_equal_for_phi_arg_p (arg_true, larger))
996 minmax = MAX_EXPR;
997 else
998 return false;
1000 else
1002 /* Recognize the following case, assuming d <= u:
1004 if (a <= u)
1005 b = MAX (a, d);
1006 x = PHI <b, u>
1008 This is equivalent to
1010 b = MAX (a, d);
1011 x = MIN (b, u); */
1013 gimple assign = last_and_only_stmt (middle_bb);
1014 tree lhs, op0, op1, bound;
1016 if (!assign
1017 || gimple_code (assign) != GIMPLE_ASSIGN)
1018 return false;
1020 lhs = gimple_assign_lhs (assign);
1021 ass_code = gimple_assign_rhs_code (assign);
1022 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1023 return false;
1024 op0 = gimple_assign_rhs1 (assign);
1025 op1 = gimple_assign_rhs2 (assign);
1027 if (true_edge->src == middle_bb)
1029 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1030 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1031 return false;
1033 if (operand_equal_for_phi_arg_p (arg_false, larger))
1035 /* Case
1037 if (smaller < larger)
1039 r' = MAX_EXPR (smaller, bound)
1041 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1042 if (ass_code != MAX_EXPR)
1043 return false;
1045 minmax = MIN_EXPR;
1046 if (operand_equal_for_phi_arg_p (op0, smaller))
1047 bound = op1;
1048 else if (operand_equal_for_phi_arg_p (op1, smaller))
1049 bound = op0;
1050 else
1051 return false;
1053 /* We need BOUND <= LARGER. */
1054 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1055 bound, larger)))
1056 return false;
1058 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1060 /* Case
1062 if (smaller < larger)
1064 r' = MIN_EXPR (larger, bound)
1066 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1067 if (ass_code != MIN_EXPR)
1068 return false;
1070 minmax = MAX_EXPR;
1071 if (operand_equal_for_phi_arg_p (op0, larger))
1072 bound = op1;
1073 else if (operand_equal_for_phi_arg_p (op1, larger))
1074 bound = op0;
1075 else
1076 return false;
1078 /* We need BOUND >= SMALLER. */
1079 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1080 bound, smaller)))
1081 return false;
1083 else
1084 return false;
1086 else
1088 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1089 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1090 return false;
1092 if (operand_equal_for_phi_arg_p (arg_true, larger))
1094 /* Case
1096 if (smaller > larger)
1098 r' = MIN_EXPR (smaller, bound)
1100 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1101 if (ass_code != MIN_EXPR)
1102 return false;
1104 minmax = MAX_EXPR;
1105 if (operand_equal_for_phi_arg_p (op0, smaller))
1106 bound = op1;
1107 else if (operand_equal_for_phi_arg_p (op1, smaller))
1108 bound = op0;
1109 else
1110 return false;
1112 /* We need BOUND >= LARGER. */
1113 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1114 bound, larger)))
1115 return false;
1117 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1119 /* Case
1121 if (smaller > larger)
1123 r' = MAX_EXPR (larger, bound)
1125 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1126 if (ass_code != MAX_EXPR)
1127 return false;
1129 minmax = MIN_EXPR;
1130 if (operand_equal_for_phi_arg_p (op0, larger))
1131 bound = op1;
1132 else if (operand_equal_for_phi_arg_p (op1, larger))
1133 bound = op0;
1134 else
1135 return false;
1137 /* We need BOUND <= SMALLER. */
1138 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1139 bound, smaller)))
1140 return false;
1142 else
1143 return false;
1146 /* Move the statement from the middle block. */
1147 gsi = gsi_last_bb (cond_bb);
1148 gsi_from = gsi_last_nondebug_bb (middle_bb);
1149 gsi_move_before (&gsi_from, &gsi);
1152 /* Emit the statement to compute min/max. */
1153 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1154 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
1155 gsi = gsi_last_bb (cond_bb);
1156 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1158 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1159 return true;
1162 /* The function absolute_replacement does the main work of doing the absolute
1163 replacement. Return true if the replacement is done. Otherwise return
1164 false.
1165 bb is the basic block where the replacement is going to be done on. arg0
1166 is argument 0 from the phi. Likewise for arg1. */
1168 static bool
1169 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1170 edge e0 ATTRIBUTE_UNUSED, edge e1,
1171 gimple phi, tree arg0, tree arg1)
1173 tree result;
1174 gassign *new_stmt;
1175 gimple cond;
1176 gimple_stmt_iterator gsi;
1177 edge true_edge, false_edge;
1178 gimple assign;
1179 edge e;
1180 tree rhs, lhs;
1181 bool negate;
1182 enum tree_code cond_code;
1184 /* If the type says honor signed zeros we cannot do this
1185 optimization. */
1186 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1187 return false;
1189 /* OTHER_BLOCK must have only one executable statement which must have the
1190 form arg0 = -arg1 or arg1 = -arg0. */
1192 assign = last_and_only_stmt (middle_bb);
1193 /* If we did not find the proper negation assignment, then we can not
1194 optimize. */
1195 if (assign == NULL)
1196 return false;
1198 /* If we got here, then we have found the only executable statement
1199 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1200 arg1 = -arg0, then we can not optimize. */
1201 if (gimple_code (assign) != GIMPLE_ASSIGN)
1202 return false;
1204 lhs = gimple_assign_lhs (assign);
1206 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1207 return false;
1209 rhs = gimple_assign_rhs1 (assign);
1211 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1212 if (!(lhs == arg0 && rhs == arg1)
1213 && !(lhs == arg1 && rhs == arg0))
1214 return false;
1216 cond = last_stmt (cond_bb);
1217 result = PHI_RESULT (phi);
1219 /* Only relationals comparing arg[01] against zero are interesting. */
1220 cond_code = gimple_cond_code (cond);
1221 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1222 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1223 return false;
1225 /* Make sure the conditional is arg[01] OP y. */
1226 if (gimple_cond_lhs (cond) != rhs)
1227 return false;
1229 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1230 ? real_zerop (gimple_cond_rhs (cond))
1231 : integer_zerop (gimple_cond_rhs (cond)))
1233 else
1234 return false;
1236 /* We need to know which is the true edge and which is the false
1237 edge so that we know if have abs or negative abs. */
1238 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1240 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1241 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1242 the false edge goes to OTHER_BLOCK. */
1243 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1244 e = true_edge;
1245 else
1246 e = false_edge;
1248 if (e->dest == middle_bb)
1249 negate = true;
1250 else
1251 negate = false;
1253 result = duplicate_ssa_name (result, NULL);
1255 if (negate)
1256 lhs = make_ssa_name (TREE_TYPE (result), NULL);
1257 else
1258 lhs = result;
1260 /* Build the modify expression with abs expression. */
1261 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1263 gsi = gsi_last_bb (cond_bb);
1264 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1266 if (negate)
1268 /* Get the right GSI. We want to insert after the recently
1269 added ABS_EXPR statement (which we know is the first statement
1270 in the block. */
1271 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1273 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1276 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1278 /* Note that we optimized this PHI. */
1279 return true;
1282 /* The function neg_replacement replaces conditional negation with
1283 equivalent straight line code. Returns TRUE if replacement is done,
1284 otherwise returns FALSE.
1286 COND_BB branches around negation occuring in MIDDLE_BB.
1288 E0 and E1 are edges out of COND_BB. E0 reaches MIDDLE_BB and
1289 E1 reaches the other successor which should contain PHI with
1290 arguments ARG0 and ARG1.
1292 Assuming negation is to occur when the condition is true,
1293 then the non-branching sequence is:
1295 result = (rhs ^ -cond) + cond
1297 Inverting the condition or its result gives us negation
1298 when the original condition is false. */
1300 static bool
1301 neg_replacement (basic_block cond_bb, basic_block middle_bb,
1302 edge e0 ATTRIBUTE_UNUSED, edge e1,
1303 gimple phi, tree arg0, tree arg1)
1305 gimple new_stmt, cond;
1306 gimple_stmt_iterator gsi;
1307 gimple assign;
1308 edge true_edge, false_edge;
1309 tree rhs, lhs;
1310 enum tree_code cond_code;
1311 bool invert = false;
1313 /* This transformation performs logical operations on the
1314 incoming arguments. So force them to be integral types. */
1315 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1316 return false;
1318 /* OTHER_BLOCK must have only one executable statement which must have the
1319 form arg0 = -arg1 or arg1 = -arg0. */
1321 assign = last_and_only_stmt (middle_bb);
1322 /* If we did not find the proper negation assignment, then we can not
1323 optimize. */
1324 if (assign == NULL)
1325 return false;
1327 /* If we got here, then we have found the only executable statement
1328 in OTHER_BLOCK. If it is anything other than arg0 = -arg1 or
1329 arg1 = -arg0, then we can not optimize. */
1330 if (gimple_code (assign) != GIMPLE_ASSIGN)
1331 return false;
1333 lhs = gimple_assign_lhs (assign);
1335 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1336 return false;
1338 rhs = gimple_assign_rhs1 (assign);
1340 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1341 if (!(lhs == arg0 && rhs == arg1)
1342 && !(lhs == arg1 && rhs == arg0))
1343 return false;
1345 /* The basic sequence assumes we negate when the condition is true.
1346 If we need the opposite, then we will either need to invert the
1347 condition or its result. */
1348 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1349 invert = false_edge->dest == middle_bb;
1351 /* Unlike abs_replacement, we can handle arbitrary conditionals here. */
1352 cond = last_stmt (cond_bb);
1353 cond_code = gimple_cond_code (cond);
1355 /* If inversion is needed, first try to invert the test since
1356 that's cheapest. */
1357 if (invert)
1359 bool honor_nans
1360 = HONOR_NANS (TYPE_MODE (TREE_TYPE (gimple_cond_lhs (cond))));
1361 enum tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
1363 /* If invert_tree_comparison was successful, then use its return
1364 value as the new code and note that inversion is no longer
1365 needed. */
1366 if (new_code != ERROR_MARK)
1368 cond_code = new_code;
1369 invert = false;
1373 tree cond_val = make_ssa_name (boolean_type_node, NULL);
1374 new_stmt = gimple_build_assign_with_ops (cond_code, cond_val,
1375 gimple_cond_lhs (cond),
1376 gimple_cond_rhs (cond));
1377 gsi = gsi_last_bb (cond_bb);
1378 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1380 /* If we still need inversion, then invert the result of the
1381 condition. */
1382 if (invert)
1384 tree tmp = make_ssa_name (boolean_type_node, NULL);
1385 new_stmt = gimple_build_assign_with_ops (BIT_XOR_EXPR, tmp,
1386 cond_val, boolean_true_node);
1387 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1388 cond_val = tmp;
1391 /* Get the condition in the right type so that we can perform
1392 logical and arithmetic operations on it. */
1393 tree cond_val_converted = make_ssa_name (TREE_TYPE (rhs), NULL);
1394 new_stmt = gimple_build_assign_with_ops (NOP_EXPR, cond_val_converted,
1395 cond_val, NULL_TREE);
1396 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1398 tree neg_cond_val_converted = make_ssa_name (TREE_TYPE (rhs), NULL);
1399 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, neg_cond_val_converted,
1400 cond_val_converted, NULL_TREE);
1401 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1403 tree tmp = make_ssa_name (TREE_TYPE (rhs), NULL);
1404 new_stmt = gimple_build_assign_with_ops (BIT_XOR_EXPR, tmp,
1405 rhs, neg_cond_val_converted);
1406 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1408 tree new_lhs = make_ssa_name (TREE_TYPE (rhs), NULL);
1409 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, new_lhs,
1410 tmp, cond_val_converted);
1411 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1413 replace_phi_edge_with_variable (cond_bb, e1, phi, new_lhs);
1415 /* Note that we optimized this PHI. */
1416 return true;
1419 /* Auxiliary functions to determine the set of memory accesses which
1420 can't trap because they are preceded by accesses to the same memory
1421 portion. We do that for MEM_REFs, so we only need to track
1422 the SSA_NAME of the pointer indirectly referenced. The algorithm
1423 simply is a walk over all instructions in dominator order. When
1424 we see an MEM_REF we determine if we've already seen a same
1425 ref anywhere up to the root of the dominator tree. If we do the
1426 current access can't trap. If we don't see any dominating access
1427 the current access might trap, but might also make later accesses
1428 non-trapping, so we remember it. We need to be careful with loads
1429 or stores, for instance a load might not trap, while a store would,
1430 so if we see a dominating read access this doesn't mean that a later
1431 write access would not trap. Hence we also need to differentiate the
1432 type of access(es) seen.
1434 ??? We currently are very conservative and assume that a load might
1435 trap even if a store doesn't (write-only memory). This probably is
1436 overly conservative. */
1438 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1439 through it was seen, which would constitute a no-trap region for
1440 same accesses. */
1441 struct name_to_bb
1443 unsigned int ssa_name_ver;
1444 unsigned int phase;
1445 bool store;
1446 HOST_WIDE_INT offset, size;
1447 basic_block bb;
1450 /* Hashtable helpers. */
1452 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1454 typedef name_to_bb value_type;
1455 typedef name_to_bb compare_type;
1456 static inline hashval_t hash (const value_type *);
1457 static inline bool equal (const value_type *, const compare_type *);
1460 /* Used for quick clearing of the hash-table when we see calls.
1461 Hash entries with phase < nt_call_phase are invalid. */
1462 static unsigned int nt_call_phase;
1464 /* The hash function. */
1466 inline hashval_t
1467 ssa_names_hasher::hash (const value_type *n)
1469 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1470 ^ (n->offset << 6) ^ (n->size << 3);
1473 /* The equality function of *P1 and *P2. */
1475 inline bool
1476 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1478 return n1->ssa_name_ver == n2->ssa_name_ver
1479 && n1->store == n2->store
1480 && n1->offset == n2->offset
1481 && n1->size == n2->size;
1484 class nontrapping_dom_walker : public dom_walker
1486 public:
1487 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1488 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1490 virtual void before_dom_children (basic_block);
1491 virtual void after_dom_children (basic_block);
1493 private:
1495 /* We see the expression EXP in basic block BB. If it's an interesting
1496 expression (an MEM_REF through an SSA_NAME) possibly insert the
1497 expression into the set NONTRAP or the hash table of seen expressions.
1498 STORE is true if this expression is on the LHS, otherwise it's on
1499 the RHS. */
1500 void add_or_mark_expr (basic_block, tree, bool);
1502 hash_set<tree> *m_nontrapping;
1504 /* The hash table for remembering what we've seen. */
1505 hash_table<ssa_names_hasher> m_seen_ssa_names;
1508 /* Called by walk_dominator_tree, when entering the block BB. */
1509 void
1510 nontrapping_dom_walker::before_dom_children (basic_block bb)
1512 edge e;
1513 edge_iterator ei;
1514 gimple_stmt_iterator gsi;
1516 /* If we haven't seen all our predecessors, clear the hash-table. */
1517 FOR_EACH_EDGE (e, ei, bb->preds)
1518 if ((((size_t)e->src->aux) & 2) == 0)
1520 nt_call_phase++;
1521 break;
1524 /* Mark this BB as being on the path to dominator root and as visited. */
1525 bb->aux = (void*)(1 | 2);
1527 /* And walk the statements in order. */
1528 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1530 gimple stmt = gsi_stmt (gsi);
1532 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1533 nt_call_phase++;
1534 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1536 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1537 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1542 /* Called by walk_dominator_tree, when basic block BB is exited. */
1543 void
1544 nontrapping_dom_walker::after_dom_children (basic_block bb)
1546 /* This BB isn't on the path to dominator root anymore. */
1547 bb->aux = (void*)2;
1550 /* We see the expression EXP in basic block BB. If it's an interesting
1551 expression (an MEM_REF through an SSA_NAME) possibly insert the
1552 expression into the set NONTRAP or the hash table of seen expressions.
1553 STORE is true if this expression is on the LHS, otherwise it's on
1554 the RHS. */
1555 void
1556 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1558 HOST_WIDE_INT size;
1560 if (TREE_CODE (exp) == MEM_REF
1561 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1562 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1563 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1565 tree name = TREE_OPERAND (exp, 0);
1566 struct name_to_bb map;
1567 name_to_bb **slot;
1568 struct name_to_bb *n2bb;
1569 basic_block found_bb = 0;
1571 /* Try to find the last seen MEM_REF through the same
1572 SSA_NAME, which can trap. */
1573 map.ssa_name_ver = SSA_NAME_VERSION (name);
1574 map.phase = 0;
1575 map.bb = 0;
1576 map.store = store;
1577 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1578 map.size = size;
1580 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1581 n2bb = *slot;
1582 if (n2bb && n2bb->phase >= nt_call_phase)
1583 found_bb = n2bb->bb;
1585 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1586 (it's in a basic block on the path from us to the dominator root)
1587 then we can't trap. */
1588 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1590 m_nontrapping->add (exp);
1592 else
1594 /* EXP might trap, so insert it into the hash table. */
1595 if (n2bb)
1597 n2bb->phase = nt_call_phase;
1598 n2bb->bb = bb;
1600 else
1602 n2bb = XNEW (struct name_to_bb);
1603 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1604 n2bb->phase = nt_call_phase;
1605 n2bb->bb = bb;
1606 n2bb->store = store;
1607 n2bb->offset = map.offset;
1608 n2bb->size = size;
1609 *slot = n2bb;
1615 /* This is the entry point of gathering non trapping memory accesses.
1616 It will do a dominator walk over the whole function, and it will
1617 make use of the bb->aux pointers. It returns a set of trees
1618 (the MEM_REFs itself) which can't trap. */
1619 static hash_set<tree> *
1620 get_non_trapping (void)
1622 nt_call_phase = 0;
1623 hash_set<tree> *nontrap = new hash_set<tree>;
1624 /* We're going to do a dominator walk, so ensure that we have
1625 dominance information. */
1626 calculate_dominance_info (CDI_DOMINATORS);
1628 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1629 .walk (cfun->cfg->x_entry_block_ptr);
1631 clear_aux_for_blocks ();
1632 return nontrap;
1635 /* Do the main work of conditional store replacement. We already know
1636 that the recognized pattern looks like so:
1638 split:
1639 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1640 MIDDLE_BB:
1641 something
1642 fallthrough (edge E0)
1643 JOIN_BB:
1644 some more
1646 We check that MIDDLE_BB contains only one store, that that store
1647 doesn't trap (not via NOTRAP, but via checking if an access to the same
1648 memory location dominates us) and that the store has a "simple" RHS. */
1650 static bool
1651 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1652 edge e0, edge e1, hash_set<tree> *nontrap)
1654 gimple assign = last_and_only_stmt (middle_bb);
1655 tree lhs, rhs, name, name2;
1656 gphi *newphi;
1657 gassign *new_stmt;
1658 gimple_stmt_iterator gsi;
1659 source_location locus;
1661 /* Check if middle_bb contains of only one store. */
1662 if (!assign
1663 || !gimple_assign_single_p (assign)
1664 || gimple_has_volatile_ops (assign))
1665 return false;
1667 locus = gimple_location (assign);
1668 lhs = gimple_assign_lhs (assign);
1669 rhs = gimple_assign_rhs1 (assign);
1670 if (TREE_CODE (lhs) != MEM_REF
1671 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1672 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1673 return false;
1675 /* Prove that we can move the store down. We could also check
1676 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1677 whose value is not available readily, which we want to avoid. */
1678 if (!nontrap->contains (lhs))
1679 return false;
1681 /* Now we've checked the constraints, so do the transformation:
1682 1) Remove the single store. */
1683 gsi = gsi_for_stmt (assign);
1684 unlink_stmt_vdef (assign);
1685 gsi_remove (&gsi, true);
1686 release_defs (assign);
1688 /* 2) Insert a load from the memory of the store to the temporary
1689 on the edge which did not contain the store. */
1690 lhs = unshare_expr (lhs);
1691 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1692 new_stmt = gimple_build_assign (name, lhs);
1693 gimple_set_location (new_stmt, locus);
1694 gsi_insert_on_edge (e1, new_stmt);
1696 /* 3) Create a PHI node at the join block, with one argument
1697 holding the old RHS, and the other holding the temporary
1698 where we stored the old memory contents. */
1699 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1700 newphi = create_phi_node (name2, join_bb);
1701 add_phi_arg (newphi, rhs, e0, locus);
1702 add_phi_arg (newphi, name, e1, locus);
1704 lhs = unshare_expr (lhs);
1705 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1707 /* 4) Insert that PHI node. */
1708 gsi = gsi_after_labels (join_bb);
1709 if (gsi_end_p (gsi))
1711 gsi = gsi_last_bb (join_bb);
1712 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1714 else
1715 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1717 return true;
1720 /* Do the main work of conditional store replacement. */
1722 static bool
1723 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1724 basic_block join_bb, gimple then_assign,
1725 gimple else_assign)
1727 tree lhs_base, lhs, then_rhs, else_rhs, name;
1728 source_location then_locus, else_locus;
1729 gimple_stmt_iterator gsi;
1730 gphi *newphi;
1731 gassign *new_stmt;
1733 if (then_assign == NULL
1734 || !gimple_assign_single_p (then_assign)
1735 || gimple_clobber_p (then_assign)
1736 || gimple_has_volatile_ops (then_assign)
1737 || else_assign == NULL
1738 || !gimple_assign_single_p (else_assign)
1739 || gimple_clobber_p (else_assign)
1740 || gimple_has_volatile_ops (else_assign))
1741 return false;
1743 lhs = gimple_assign_lhs (then_assign);
1744 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1745 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1746 return false;
1748 lhs_base = get_base_address (lhs);
1749 if (lhs_base == NULL_TREE
1750 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1751 return false;
1753 then_rhs = gimple_assign_rhs1 (then_assign);
1754 else_rhs = gimple_assign_rhs1 (else_assign);
1755 then_locus = gimple_location (then_assign);
1756 else_locus = gimple_location (else_assign);
1758 /* Now we've checked the constraints, so do the transformation:
1759 1) Remove the stores. */
1760 gsi = gsi_for_stmt (then_assign);
1761 unlink_stmt_vdef (then_assign);
1762 gsi_remove (&gsi, true);
1763 release_defs (then_assign);
1765 gsi = gsi_for_stmt (else_assign);
1766 unlink_stmt_vdef (else_assign);
1767 gsi_remove (&gsi, true);
1768 release_defs (else_assign);
1770 /* 2) Create a PHI node at the join block, with one argument
1771 holding the old RHS, and the other holding the temporary
1772 where we stored the old memory contents. */
1773 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1774 newphi = create_phi_node (name, join_bb);
1775 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1776 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1778 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1780 /* 3) Insert that PHI node. */
1781 gsi = gsi_after_labels (join_bb);
1782 if (gsi_end_p (gsi))
1784 gsi = gsi_last_bb (join_bb);
1785 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1787 else
1788 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1790 return true;
1793 /* Conditional store replacement. We already know
1794 that the recognized pattern looks like so:
1796 split:
1797 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1798 THEN_BB:
1800 X = Y;
1802 goto JOIN_BB;
1803 ELSE_BB:
1805 X = Z;
1807 fallthrough (edge E0)
1808 JOIN_BB:
1809 some more
1811 We check that it is safe to sink the store to JOIN_BB by verifying that
1812 there are no read-after-write or write-after-write dependencies in
1813 THEN_BB and ELSE_BB. */
1815 static bool
1816 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1817 basic_block join_bb)
1819 gimple then_assign = last_and_only_stmt (then_bb);
1820 gimple else_assign = last_and_only_stmt (else_bb);
1821 vec<data_reference_p> then_datarefs, else_datarefs;
1822 vec<ddr_p> then_ddrs, else_ddrs;
1823 gimple then_store, else_store;
1824 bool found, ok = false, res;
1825 struct data_dependence_relation *ddr;
1826 data_reference_p then_dr, else_dr;
1827 int i, j;
1828 tree then_lhs, else_lhs;
1829 basic_block blocks[3];
1831 if (MAX_STORES_TO_SINK == 0)
1832 return false;
1834 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1835 if (then_assign && else_assign)
1836 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1837 then_assign, else_assign);
1839 /* Find data references. */
1840 then_datarefs.create (1);
1841 else_datarefs.create (1);
1842 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1843 == chrec_dont_know)
1844 || !then_datarefs.length ()
1845 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1846 == chrec_dont_know)
1847 || !else_datarefs.length ())
1849 free_data_refs (then_datarefs);
1850 free_data_refs (else_datarefs);
1851 return false;
1854 /* Find pairs of stores with equal LHS. */
1855 auto_vec<gimple, 1> then_stores, else_stores;
1856 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1858 if (DR_IS_READ (then_dr))
1859 continue;
1861 then_store = DR_STMT (then_dr);
1862 then_lhs = gimple_get_lhs (then_store);
1863 if (then_lhs == NULL_TREE)
1864 continue;
1865 found = false;
1867 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1869 if (DR_IS_READ (else_dr))
1870 continue;
1872 else_store = DR_STMT (else_dr);
1873 else_lhs = gimple_get_lhs (else_store);
1874 if (else_lhs == NULL_TREE)
1875 continue;
1877 if (operand_equal_p (then_lhs, else_lhs, 0))
1879 found = true;
1880 break;
1884 if (!found)
1885 continue;
1887 then_stores.safe_push (then_store);
1888 else_stores.safe_push (else_store);
1891 /* No pairs of stores found. */
1892 if (!then_stores.length ()
1893 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1895 free_data_refs (then_datarefs);
1896 free_data_refs (else_datarefs);
1897 return false;
1900 /* Compute and check data dependencies in both basic blocks. */
1901 then_ddrs.create (1);
1902 else_ddrs.create (1);
1903 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1904 vNULL, false)
1905 || !compute_all_dependences (else_datarefs, &else_ddrs,
1906 vNULL, false))
1908 free_dependence_relations (then_ddrs);
1909 free_dependence_relations (else_ddrs);
1910 free_data_refs (then_datarefs);
1911 free_data_refs (else_datarefs);
1912 return false;
1914 blocks[0] = then_bb;
1915 blocks[1] = else_bb;
1916 blocks[2] = join_bb;
1917 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1919 /* Check that there are no read-after-write or write-after-write dependencies
1920 in THEN_BB. */
1921 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1923 struct data_reference *dra = DDR_A (ddr);
1924 struct data_reference *drb = DDR_B (ddr);
1926 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1927 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1928 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1929 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1930 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1931 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1933 free_dependence_relations (then_ddrs);
1934 free_dependence_relations (else_ddrs);
1935 free_data_refs (then_datarefs);
1936 free_data_refs (else_datarefs);
1937 return false;
1941 /* Check that there are no read-after-write or write-after-write dependencies
1942 in ELSE_BB. */
1943 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1945 struct data_reference *dra = DDR_A (ddr);
1946 struct data_reference *drb = DDR_B (ddr);
1948 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1949 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1950 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1951 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1952 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1953 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1955 free_dependence_relations (then_ddrs);
1956 free_dependence_relations (else_ddrs);
1957 free_data_refs (then_datarefs);
1958 free_data_refs (else_datarefs);
1959 return false;
1963 /* Sink stores with same LHS. */
1964 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1966 else_store = else_stores[i];
1967 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1968 then_store, else_store);
1969 ok = ok || res;
1972 free_dependence_relations (then_ddrs);
1973 free_dependence_relations (else_ddrs);
1974 free_data_refs (then_datarefs);
1975 free_data_refs (else_datarefs);
1977 return ok;
1980 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1982 static bool
1983 local_mem_dependence (gimple stmt, basic_block bb)
1985 tree vuse = gimple_vuse (stmt);
1986 gimple def;
1988 if (!vuse)
1989 return false;
1991 def = SSA_NAME_DEF_STMT (vuse);
1992 return (def && gimple_bb (def) == bb);
1995 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1996 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1997 and BB3 rejoins control flow following BB1 and BB2, look for
1998 opportunities to hoist loads as follows. If BB3 contains a PHI of
1999 two loads, one each occurring in BB1 and BB2, and the loads are
2000 provably of adjacent fields in the same structure, then move both
2001 loads into BB0. Of course this can only be done if there are no
2002 dependencies preventing such motion.
2004 One of the hoisted loads will always be speculative, so the
2005 transformation is currently conservative:
2007 - The fields must be strictly adjacent.
2008 - The two fields must occupy a single memory block that is
2009 guaranteed to not cross a page boundary.
2011 The last is difficult to prove, as such memory blocks should be
2012 aligned on the minimum of the stack alignment boundary and the
2013 alignment guaranteed by heap allocation interfaces. Thus we rely
2014 on a parameter for the alignment value.
2016 Provided a good value is used for the last case, the first
2017 restriction could possibly be relaxed. */
2019 static void
2020 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2021 basic_block bb2, basic_block bb3)
2023 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2024 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2025 gphi_iterator gsi;
2027 /* Walk the phis in bb3 looking for an opportunity. We are looking
2028 for phis of two SSA names, one each of which is defined in bb1 and
2029 bb2. */
2030 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2032 gphi *phi_stmt = gsi.phi ();
2033 gimple def1, def2, defswap;
2034 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
2035 tree tree_offset1, tree_offset2, tree_size2, next;
2036 int offset1, offset2, size2;
2037 unsigned align1;
2038 gimple_stmt_iterator gsi2;
2039 basic_block bb_for_def1, bb_for_def2;
2041 if (gimple_phi_num_args (phi_stmt) != 2
2042 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2043 continue;
2045 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2046 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2048 if (TREE_CODE (arg1) != SSA_NAME
2049 || TREE_CODE (arg2) != SSA_NAME
2050 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2051 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2052 continue;
2054 def1 = SSA_NAME_DEF_STMT (arg1);
2055 def2 = SSA_NAME_DEF_STMT (arg2);
2057 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2058 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2059 continue;
2061 /* Check the mode of the arguments to be sure a conditional move
2062 can be generated for it. */
2063 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2064 == CODE_FOR_nothing)
2065 continue;
2067 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2068 if (!gimple_assign_single_p (def1)
2069 || !gimple_assign_single_p (def2)
2070 || gimple_has_volatile_ops (def1)
2071 || gimple_has_volatile_ops (def2))
2072 continue;
2074 ref1 = gimple_assign_rhs1 (def1);
2075 ref2 = gimple_assign_rhs1 (def2);
2077 if (TREE_CODE (ref1) != COMPONENT_REF
2078 || TREE_CODE (ref2) != COMPONENT_REF)
2079 continue;
2081 /* The zeroth operand of the two component references must be
2082 identical. It is not sufficient to compare get_base_address of
2083 the two references, because this could allow for different
2084 elements of the same array in the two trees. It is not safe to
2085 assume that the existence of one array element implies the
2086 existence of a different one. */
2087 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2088 continue;
2090 field1 = TREE_OPERAND (ref1, 1);
2091 field2 = TREE_OPERAND (ref2, 1);
2093 /* Check for field adjacency, and ensure field1 comes first. */
2094 for (next = DECL_CHAIN (field1);
2095 next && TREE_CODE (next) != FIELD_DECL;
2096 next = DECL_CHAIN (next))
2099 if (next != field2)
2101 for (next = DECL_CHAIN (field2);
2102 next && TREE_CODE (next) != FIELD_DECL;
2103 next = DECL_CHAIN (next))
2106 if (next != field1)
2107 continue;
2109 fieldswap = field1;
2110 field1 = field2;
2111 field2 = fieldswap;
2112 defswap = def1;
2113 def1 = def2;
2114 def2 = defswap;
2117 bb_for_def1 = gimple_bb (def1);
2118 bb_for_def2 = gimple_bb (def2);
2120 /* Check for proper alignment of the first field. */
2121 tree_offset1 = bit_position (field1);
2122 tree_offset2 = bit_position (field2);
2123 tree_size2 = DECL_SIZE (field2);
2125 if (!tree_fits_uhwi_p (tree_offset1)
2126 || !tree_fits_uhwi_p (tree_offset2)
2127 || !tree_fits_uhwi_p (tree_size2))
2128 continue;
2130 offset1 = tree_to_uhwi (tree_offset1);
2131 offset2 = tree_to_uhwi (tree_offset2);
2132 size2 = tree_to_uhwi (tree_size2);
2133 align1 = DECL_ALIGN (field1) % param_align_bits;
2135 if (offset1 % BITS_PER_UNIT != 0)
2136 continue;
2138 /* For profitability, the two field references should fit within
2139 a single cache line. */
2140 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2141 continue;
2143 /* The two expressions cannot be dependent upon vdefs defined
2144 in bb1/bb2. */
2145 if (local_mem_dependence (def1, bb_for_def1)
2146 || local_mem_dependence (def2, bb_for_def2))
2147 continue;
2149 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2150 bb0. We hoist the first one first so that a cache miss is handled
2151 efficiently regardless of hardware cache-fill policy. */
2152 gsi2 = gsi_for_stmt (def1);
2153 gsi_move_to_bb_end (&gsi2, bb0);
2154 gsi2 = gsi_for_stmt (def2);
2155 gsi_move_to_bb_end (&gsi2, bb0);
2157 if (dump_file && (dump_flags & TDF_DETAILS))
2159 fprintf (dump_file,
2160 "\nHoisting adjacent loads from %d and %d into %d: \n",
2161 bb_for_def1->index, bb_for_def2->index, bb0->index);
2162 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2163 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2168 /* Determine whether we should attempt to hoist adjacent loads out of
2169 diamond patterns in pass_phiopt. Always hoist loads if
2170 -fhoist-adjacent-loads is specified and the target machine has
2171 both a conditional move instruction and a defined cache line size. */
2173 static bool
2174 gate_hoist_loads (void)
2176 return (flag_hoist_adjacent_loads == 1
2177 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2178 && HAVE_conditional_move);
2181 /* This pass tries to replaces an if-then-else block with an
2182 assignment. We have four kinds of transformations. Some of these
2183 transformations are also performed by the ifcvt RTL optimizer.
2185 Conditional Replacement
2186 -----------------------
2188 This transformation, implemented in conditional_replacement,
2189 replaces
2191 bb0:
2192 if (cond) goto bb2; else goto bb1;
2193 bb1:
2194 bb2:
2195 x = PHI <0 (bb1), 1 (bb0), ...>;
2197 with
2199 bb0:
2200 x' = cond;
2201 goto bb2;
2202 bb2:
2203 x = PHI <x' (bb0), ...>;
2205 We remove bb1 as it becomes unreachable. This occurs often due to
2206 gimplification of conditionals.
2208 Value Replacement
2209 -----------------
2211 This transformation, implemented in value_replacement, replaces
2213 bb0:
2214 if (a != b) goto bb2; else goto bb1;
2215 bb1:
2216 bb2:
2217 x = PHI <a (bb1), b (bb0), ...>;
2219 with
2221 bb0:
2222 bb2:
2223 x = PHI <b (bb0), ...>;
2225 This opportunity can sometimes occur as a result of other
2226 optimizations.
2229 Another case caught by value replacement looks like this:
2231 bb0:
2232 t1 = a == CONST;
2233 t2 = b > c;
2234 t3 = t1 & t2;
2235 if (t3 != 0) goto bb1; else goto bb2;
2236 bb1:
2237 bb2:
2238 x = PHI (CONST, a)
2240 Gets replaced with:
2241 bb0:
2242 bb2:
2243 t1 = a == CONST;
2244 t2 = b > c;
2245 t3 = t1 & t2;
2246 x = a;
2248 ABS Replacement
2249 ---------------
2251 This transformation, implemented in abs_replacement, replaces
2253 bb0:
2254 if (a >= 0) goto bb2; else goto bb1;
2255 bb1:
2256 x = -a;
2257 bb2:
2258 x = PHI <x (bb1), a (bb0), ...>;
2260 with
2262 bb0:
2263 x' = ABS_EXPR< a >;
2264 bb2:
2265 x = PHI <x' (bb0), ...>;
2267 MIN/MAX Replacement
2268 -------------------
2270 This transformation, minmax_replacement replaces
2272 bb0:
2273 if (a <= b) goto bb2; else goto bb1;
2274 bb1:
2275 bb2:
2276 x = PHI <b (bb1), a (bb0), ...>;
2278 with
2280 bb0:
2281 x' = MIN_EXPR (a, b)
2282 bb2:
2283 x = PHI <x' (bb0), ...>;
2285 A similar transformation is done for MAX_EXPR.
2288 This pass also performs a fifth transformation of a slightly different
2289 flavor.
2291 Adjacent Load Hoisting
2292 ----------------------
2294 This transformation replaces
2296 bb0:
2297 if (...) goto bb2; else goto bb1;
2298 bb1:
2299 x1 = (<expr>).field1;
2300 goto bb3;
2301 bb2:
2302 x2 = (<expr>).field2;
2303 bb3:
2304 # x = PHI <x1, x2>;
2306 with
2308 bb0:
2309 x1 = (<expr>).field1;
2310 x2 = (<expr>).field2;
2311 if (...) goto bb2; else goto bb1;
2312 bb1:
2313 goto bb3;
2314 bb2:
2315 bb3:
2316 # x = PHI <x1, x2>;
2318 The purpose of this transformation is to enable generation of conditional
2319 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2320 the loads is speculative, the transformation is restricted to very
2321 specific cases to avoid introducing a page fault. We are looking for
2322 the common idiom:
2324 if (...)
2325 x = y->left;
2326 else
2327 x = y->right;
2329 where left and right are typically adjacent pointers in a tree structure. */
2331 namespace {
2333 const pass_data pass_data_phiopt =
2335 GIMPLE_PASS, /* type */
2336 "phiopt", /* name */
2337 OPTGROUP_NONE, /* optinfo_flags */
2338 TV_TREE_PHIOPT, /* tv_id */
2339 ( PROP_cfg | PROP_ssa ), /* properties_required */
2340 0, /* properties_provided */
2341 0, /* properties_destroyed */
2342 0, /* todo_flags_start */
2343 0, /* todo_flags_finish */
2346 class pass_phiopt : public gimple_opt_pass
2348 public:
2349 pass_phiopt (gcc::context *ctxt)
2350 : gimple_opt_pass (pass_data_phiopt, ctxt)
2353 /* opt_pass methods: */
2354 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2355 virtual bool gate (function *) { return flag_ssa_phiopt; }
2356 virtual unsigned int execute (function *)
2358 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2361 }; // class pass_phiopt
2363 } // anon namespace
2365 gimple_opt_pass *
2366 make_pass_phiopt (gcc::context *ctxt)
2368 return new pass_phiopt (ctxt);
2371 namespace {
2373 const pass_data pass_data_cselim =
2375 GIMPLE_PASS, /* type */
2376 "cselim", /* name */
2377 OPTGROUP_NONE, /* optinfo_flags */
2378 TV_TREE_PHIOPT, /* tv_id */
2379 ( PROP_cfg | PROP_ssa ), /* properties_required */
2380 0, /* properties_provided */
2381 0, /* properties_destroyed */
2382 0, /* todo_flags_start */
2383 0, /* todo_flags_finish */
2386 class pass_cselim : public gimple_opt_pass
2388 public:
2389 pass_cselim (gcc::context *ctxt)
2390 : gimple_opt_pass (pass_data_cselim, ctxt)
2393 /* opt_pass methods: */
2394 virtual bool gate (function *) { return flag_tree_cselim; }
2395 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2397 }; // class pass_cselim
2399 } // anon namespace
2401 gimple_opt_pass *
2402 make_pass_cselim (gcc::context *ctxt)
2404 return new pass_cselim (ctxt);