2015-03-02 Hristian Kirtchev <kirtchev@adacore.com>
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobc7fb0731296075fbcc3e64759ffe6b5777123f2a
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "hash-set.h"
26 #include "machmode.h"
27 #include "vec.h"
28 #include "double-int.h"
29 #include "input.h"
30 #include "alias.h"
31 #include "symtab.h"
32 #include "wide-int.h"
33 #include "inchash.h"
34 #include "tree.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "flags.h"
38 #include "tm_p.h"
39 #include "predict.h"
40 #include "hard-reg-set.h"
41 #include "function.h"
42 #include "dominance.h"
43 #include "cfg.h"
44 #include "cfganal.h"
45 #include "basic-block.h"
46 #include "tree-ssa-alias.h"
47 #include "internal-fn.h"
48 #include "gimple-expr.h"
49 #include "is-a.h"
50 #include "gimple.h"
51 #include "gimplify.h"
52 #include "gimple-iterator.h"
53 #include "gimplify-me.h"
54 #include "gimple-ssa.h"
55 #include "tree-cfg.h"
56 #include "tree-phinodes.h"
57 #include "ssa-iterators.h"
58 #include "stringpool.h"
59 #include "tree-ssanames.h"
60 #include "hashtab.h"
61 #include "rtl.h"
62 #include "statistics.h"
63 #include "real.h"
64 #include "fixed-value.h"
65 #include "insn-config.h"
66 #include "expmed.h"
67 #include "dojump.h"
68 #include "explow.h"
69 #include "calls.h"
70 #include "emit-rtl.h"
71 #include "varasm.h"
72 #include "stmt.h"
73 #include "expr.h"
74 #include "tree-dfa.h"
75 #include "tree-pass.h"
76 #include "langhooks.h"
77 #include "domwalk.h"
78 #include "cfgloop.h"
79 #include "tree-data-ref.h"
80 #include "gimple-pretty-print.h"
81 #include "insn-codes.h"
82 #include "optabs.h"
83 #include "tree-scalar-evolution.h"
84 #include "tree-inline.h"
86 #ifndef HAVE_conditional_move
87 #define HAVE_conditional_move (0)
88 #endif
90 static unsigned int tree_ssa_phiopt_worker (bool, bool);
91 static bool conditional_replacement (basic_block, basic_block,
92 edge, edge, gphi *, tree, tree);
93 static int value_replacement (basic_block, basic_block,
94 edge, edge, gimple, tree, tree);
95 static bool minmax_replacement (basic_block, basic_block,
96 edge, edge, gimple, tree, tree);
97 static bool abs_replacement (basic_block, basic_block,
98 edge, edge, gimple, tree, tree);
99 static bool neg_replacement (basic_block, basic_block,
100 edge, edge, gimple, tree, tree);
101 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
102 hash_set<tree> *);
103 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
104 static hash_set<tree> * get_non_trapping ();
105 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
106 static void hoist_adjacent_loads (basic_block, basic_block,
107 basic_block, basic_block);
108 static bool gate_hoist_loads (void);
110 /* This pass tries to transform conditional stores into unconditional
111 ones, enabling further simplifications with the simpler then and else
112 blocks. In particular it replaces this:
114 bb0:
115 if (cond) goto bb2; else goto bb1;
116 bb1:
117 *p = RHS;
118 bb2:
120 with
122 bb0:
123 if (cond) goto bb1; else goto bb2;
124 bb1:
125 condtmp' = *p;
126 bb2:
127 condtmp = PHI <RHS, condtmp'>
128 *p = condtmp;
130 This transformation can only be done under several constraints,
131 documented below. It also replaces:
133 bb0:
134 if (cond) goto bb2; else goto bb1;
135 bb1:
136 *p = RHS1;
137 goto bb3;
138 bb2:
139 *p = RHS2;
140 bb3:
142 with
144 bb0:
145 if (cond) goto bb3; else goto bb1;
146 bb1:
147 bb3:
148 condtmp = PHI <RHS1, RHS2>
149 *p = condtmp; */
151 static unsigned int
152 tree_ssa_cs_elim (void)
154 unsigned todo;
155 /* ??? We are not interested in loop related info, but the following
156 will create it, ICEing as we didn't init loops with pre-headers.
157 An interfacing issue of find_data_references_in_bb. */
158 loop_optimizer_init (LOOPS_NORMAL);
159 scev_initialize ();
160 todo = tree_ssa_phiopt_worker (true, false);
161 scev_finalize ();
162 loop_optimizer_finalize ();
163 return todo;
166 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
168 static gphi *
169 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
171 gimple_stmt_iterator i;
172 gphi *phi = NULL;
173 if (gimple_seq_singleton_p (seq))
174 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
175 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
177 gphi *p = as_a <gphi *> (gsi_stmt (i));
178 /* If the PHI arguments are equal then we can skip this PHI. */
179 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
180 gimple_phi_arg_def (p, e1->dest_idx)))
181 continue;
183 /* If we already have a PHI that has the two edge arguments are
184 different, then return it is not a singleton for these PHIs. */
185 if (phi)
186 return NULL;
188 phi = p;
190 return phi;
193 /* The core routine of conditional store replacement and normal
194 phi optimizations. Both share much of the infrastructure in how
195 to match applicable basic block patterns. DO_STORE_ELIM is true
196 when we want to do conditional store replacement, false otherwise.
197 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
198 of diamond control flow patterns, false otherwise. */
199 static unsigned int
200 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
202 basic_block bb;
203 basic_block *bb_order;
204 unsigned n, i;
205 bool cfgchanged = false;
206 hash_set<tree> *nontrap = 0;
208 if (do_store_elim)
209 /* Calculate the set of non-trapping memory accesses. */
210 nontrap = get_non_trapping ();
212 /* The replacement of conditional negation with a non-branching
213 sequence is really only a win when optimizing for speed and we
214 can avoid transformations by gimple if-conversion that result
215 in poor RTL generation.
217 Ideally either gimple if-conversion or the RTL expanders will
218 be improved and the code to emit branchless conditional negation
219 can be removed. */
220 bool replace_conditional_negation = false;
221 if (!do_store_elim)
222 replace_conditional_negation
223 = ((!optimize_size && optimize >= 2)
224 || (((flag_tree_loop_vectorize || cfun->has_force_vectorize_loops)
225 && flag_tree_loop_if_convert != 0)
226 || flag_tree_loop_if_convert == 1
227 || flag_tree_loop_if_convert_stores == 1));
229 /* Search every basic block for COND_EXPR we may be able to optimize.
231 We walk the blocks in order that guarantees that a block with
232 a single predecessor is processed before the predecessor.
233 This ensures that we collapse inner ifs before visiting the
234 outer ones, and also that we do not try to visit a removed
235 block. */
236 bb_order = single_pred_before_succ_order ();
237 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
239 for (i = 0; i < n; i++)
241 gimple cond_stmt;
242 gphi *phi;
243 basic_block bb1, bb2;
244 edge e1, e2;
245 tree arg0, arg1;
247 bb = bb_order[i];
249 cond_stmt = last_stmt (bb);
250 /* Check to see if the last statement is a GIMPLE_COND. */
251 if (!cond_stmt
252 || gimple_code (cond_stmt) != GIMPLE_COND)
253 continue;
255 e1 = EDGE_SUCC (bb, 0);
256 bb1 = e1->dest;
257 e2 = EDGE_SUCC (bb, 1);
258 bb2 = e2->dest;
260 /* We cannot do the optimization on abnormal edges. */
261 if ((e1->flags & EDGE_ABNORMAL) != 0
262 || (e2->flags & EDGE_ABNORMAL) != 0)
263 continue;
265 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
266 if (EDGE_COUNT (bb1->succs) == 0
267 || bb2 == NULL
268 || EDGE_COUNT (bb2->succs) == 0)
269 continue;
271 /* Find the bb which is the fall through to the other. */
272 if (EDGE_SUCC (bb1, 0)->dest == bb2)
274 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
276 basic_block bb_tmp = bb1;
277 edge e_tmp = e1;
278 bb1 = bb2;
279 bb2 = bb_tmp;
280 e1 = e2;
281 e2 = e_tmp;
283 else if (do_store_elim
284 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
286 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
288 if (!single_succ_p (bb1)
289 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
290 || !single_succ_p (bb2)
291 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
292 || EDGE_COUNT (bb3->preds) != 2)
293 continue;
294 if (cond_if_else_store_replacement (bb1, bb2, bb3))
295 cfgchanged = true;
296 continue;
298 else if (do_hoist_loads
299 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
301 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
303 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
304 && single_succ_p (bb1)
305 && single_succ_p (bb2)
306 && single_pred_p (bb1)
307 && single_pred_p (bb2)
308 && EDGE_COUNT (bb->succs) == 2
309 && EDGE_COUNT (bb3->preds) == 2
310 /* If one edge or the other is dominant, a conditional move
311 is likely to perform worse than the well-predicted branch. */
312 && !predictable_edge_p (EDGE_SUCC (bb, 0))
313 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
314 hoist_adjacent_loads (bb, bb1, bb2, bb3);
315 continue;
317 else
318 continue;
320 e1 = EDGE_SUCC (bb1, 0);
322 /* Make sure that bb1 is just a fall through. */
323 if (!single_succ_p (bb1)
324 || (e1->flags & EDGE_FALLTHRU) == 0)
325 continue;
327 /* Also make sure that bb1 only have one predecessor and that it
328 is bb. */
329 if (!single_pred_p (bb1)
330 || single_pred (bb1) != bb)
331 continue;
333 if (do_store_elim)
335 /* bb1 is the middle block, bb2 the join block, bb the split block,
336 e1 the fallthrough edge from bb1 to bb2. We can't do the
337 optimization if the join block has more than two predecessors. */
338 if (EDGE_COUNT (bb2->preds) > 2)
339 continue;
340 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
341 cfgchanged = true;
343 else
345 gimple_seq phis = phi_nodes (bb2);
346 gimple_stmt_iterator gsi;
347 bool candorest = true;
349 /* Value replacement can work with more than one PHI
350 so try that first. */
351 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
353 phi = as_a <gphi *> (gsi_stmt (gsi));
354 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
355 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
356 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
358 candorest = false;
359 cfgchanged = true;
360 break;
364 if (!candorest)
365 continue;
367 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
368 if (!phi)
369 continue;
371 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
372 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
374 /* Something is wrong if we cannot find the arguments in the PHI
375 node. */
376 gcc_assert (arg0 != NULL && arg1 != NULL);
378 /* Do the replacement of conditional if it can be done. */
379 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
380 cfgchanged = true;
381 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
382 cfgchanged = true;
383 else if (replace_conditional_negation
384 && neg_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
385 cfgchanged = true;
386 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
387 cfgchanged = true;
391 free (bb_order);
393 if (do_store_elim)
394 delete nontrap;
395 /* If the CFG has changed, we should cleanup the CFG. */
396 if (cfgchanged && do_store_elim)
398 /* In cond-store replacement we have added some loads on edges
399 and new VOPS (as we moved the store, and created a load). */
400 gsi_commit_edge_inserts ();
401 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
403 else if (cfgchanged)
404 return TODO_cleanup_cfg;
405 return 0;
408 /* Replace PHI node element whose edge is E in block BB with variable NEW.
409 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
410 is known to have two edges, one of which must reach BB). */
412 static void
413 replace_phi_edge_with_variable (basic_block cond_block,
414 edge e, gimple phi, tree new_tree)
416 basic_block bb = gimple_bb (phi);
417 basic_block block_to_remove;
418 gimple_stmt_iterator gsi;
420 /* Change the PHI argument to new. */
421 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
423 /* Remove the empty basic block. */
424 if (EDGE_SUCC (cond_block, 0)->dest == bb)
426 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
427 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
428 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
429 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
431 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
433 else
435 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
436 EDGE_SUCC (cond_block, 1)->flags
437 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
438 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
439 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
441 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
443 delete_basic_block (block_to_remove);
445 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
446 gsi = gsi_last_bb (cond_block);
447 gsi_remove (&gsi, true);
449 if (dump_file && (dump_flags & TDF_DETAILS))
450 fprintf (dump_file,
451 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
452 cond_block->index,
453 bb->index);
456 /* The function conditional_replacement does the main work of doing the
457 conditional replacement. Return true if the replacement is done.
458 Otherwise return false.
459 BB is the basic block where the replacement is going to be done on. ARG0
460 is argument 0 from PHI. Likewise for ARG1. */
462 static bool
463 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
464 edge e0, edge e1, gphi *phi,
465 tree arg0, tree arg1)
467 tree result;
468 gimple stmt;
469 gassign *new_stmt;
470 tree cond;
471 gimple_stmt_iterator gsi;
472 edge true_edge, false_edge;
473 tree new_var, new_var2;
474 bool neg;
476 /* FIXME: Gimplification of complex type is too hard for now. */
477 /* We aren't prepared to handle vectors either (and it is a question
478 if it would be worthwhile anyway). */
479 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
480 || POINTER_TYPE_P (TREE_TYPE (arg0)))
481 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
482 || POINTER_TYPE_P (TREE_TYPE (arg1))))
483 return false;
485 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
486 convert it to the conditional. */
487 if ((integer_zerop (arg0) && integer_onep (arg1))
488 || (integer_zerop (arg1) && integer_onep (arg0)))
489 neg = false;
490 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
491 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
492 neg = true;
493 else
494 return false;
496 if (!empty_block_p (middle_bb))
497 return false;
499 /* At this point we know we have a GIMPLE_COND with two successors.
500 One successor is BB, the other successor is an empty block which
501 falls through into BB.
503 There is a single PHI node at the join point (BB) and its arguments
504 are constants (0, 1) or (0, -1).
506 So, given the condition COND, and the two PHI arguments, we can
507 rewrite this PHI into non-branching code:
509 dest = (COND) or dest = COND'
511 We use the condition as-is if the argument associated with the
512 true edge has the value one or the argument associated with the
513 false edge as the value zero. Note that those conditions are not
514 the same since only one of the outgoing edges from the GIMPLE_COND
515 will directly reach BB and thus be associated with an argument. */
517 stmt = last_stmt (cond_bb);
518 result = PHI_RESULT (phi);
520 /* To handle special cases like floating point comparison, it is easier and
521 less error-prone to build a tree and gimplify it on the fly though it is
522 less efficient. */
523 cond = fold_build2_loc (gimple_location (stmt),
524 gimple_cond_code (stmt), boolean_type_node,
525 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
527 /* We need to know which is the true edge and which is the false
528 edge so that we know when to invert the condition below. */
529 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
530 if ((e0 == true_edge && integer_zerop (arg0))
531 || (e0 == false_edge && !integer_zerop (arg0))
532 || (e1 == true_edge && integer_zerop (arg1))
533 || (e1 == false_edge && !integer_zerop (arg1)))
534 cond = fold_build1_loc (gimple_location (stmt),
535 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
537 if (neg)
539 cond = fold_convert_loc (gimple_location (stmt),
540 TREE_TYPE (result), cond);
541 cond = fold_build1_loc (gimple_location (stmt),
542 NEGATE_EXPR, TREE_TYPE (cond), cond);
545 /* Insert our new statements at the end of conditional block before the
546 COND_STMT. */
547 gsi = gsi_for_stmt (stmt);
548 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
549 GSI_SAME_STMT);
551 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
553 source_location locus_0, locus_1;
555 new_var2 = make_ssa_name (TREE_TYPE (result));
556 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
557 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
558 new_var = new_var2;
560 /* Set the locus to the first argument, unless is doesn't have one. */
561 locus_0 = gimple_phi_arg_location (phi, 0);
562 locus_1 = gimple_phi_arg_location (phi, 1);
563 if (locus_0 == UNKNOWN_LOCATION)
564 locus_0 = locus_1;
565 gimple_set_location (new_stmt, locus_0);
568 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
570 /* Note that we optimized this PHI. */
571 return true;
574 /* Update *ARG which is defined in STMT so that it contains the
575 computed value if that seems profitable. Return true if the
576 statement is made dead by that rewriting. */
578 static bool
579 jump_function_from_stmt (tree *arg, gimple stmt)
581 enum tree_code code = gimple_assign_rhs_code (stmt);
582 if (code == ADDR_EXPR)
584 /* For arg = &p->i transform it to p, if possible. */
585 tree rhs1 = gimple_assign_rhs1 (stmt);
586 HOST_WIDE_INT offset;
587 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
588 &offset);
589 if (tem
590 && TREE_CODE (tem) == MEM_REF
591 && (mem_ref_offset (tem) + offset) == 0)
593 *arg = TREE_OPERAND (tem, 0);
594 return true;
597 /* TODO: Much like IPA-CP jump-functions we want to handle constant
598 additions symbolically here, and we'd need to update the comparison
599 code that compares the arg + cst tuples in our caller. For now the
600 code above exactly handles the VEC_BASE pattern from vec.h. */
601 return false;
604 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
605 of the form SSA_NAME NE 0.
607 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
608 the two input values of the EQ_EXPR match arg0 and arg1.
610 If so update *code and return TRUE. Otherwise return FALSE. */
612 static bool
613 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
614 enum tree_code *code, const_tree rhs)
616 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
617 statement. */
618 if (TREE_CODE (rhs) == SSA_NAME)
620 gimple def1 = SSA_NAME_DEF_STMT (rhs);
622 /* Verify the defining statement has an EQ_EXPR on the RHS. */
623 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
625 /* Finally verify the source operands of the EQ_EXPR are equal
626 to arg0 and arg1. */
627 tree op0 = gimple_assign_rhs1 (def1);
628 tree op1 = gimple_assign_rhs2 (def1);
629 if ((operand_equal_for_phi_arg_p (arg0, op0)
630 && operand_equal_for_phi_arg_p (arg1, op1))
631 || (operand_equal_for_phi_arg_p (arg0, op1)
632 && operand_equal_for_phi_arg_p (arg1, op0)))
634 /* We will perform the optimization. */
635 *code = gimple_assign_rhs_code (def1);
636 return true;
640 return false;
643 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
645 Also return TRUE if arg0/arg1 are equal to the source arguments of a
646 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
648 Return FALSE otherwise. */
650 static bool
651 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
652 enum tree_code *code, gimple cond)
654 gimple def;
655 tree lhs = gimple_cond_lhs (cond);
656 tree rhs = gimple_cond_rhs (cond);
658 if ((operand_equal_for_phi_arg_p (arg0, lhs)
659 && operand_equal_for_phi_arg_p (arg1, rhs))
660 || (operand_equal_for_phi_arg_p (arg1, lhs)
661 && operand_equal_for_phi_arg_p (arg0, rhs)))
662 return true;
664 /* Now handle more complex case where we have an EQ comparison
665 which feeds a BIT_AND_EXPR which feeds COND.
667 First verify that COND is of the form SSA_NAME NE 0. */
668 if (*code != NE_EXPR || !integer_zerop (rhs)
669 || TREE_CODE (lhs) != SSA_NAME)
670 return false;
672 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
673 def = SSA_NAME_DEF_STMT (lhs);
674 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
675 return false;
677 /* Now verify arg0/arg1 correspond to the source arguments of an
678 EQ comparison feeding the BIT_AND_EXPR. */
680 tree tmp = gimple_assign_rhs1 (def);
681 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
682 return true;
684 tmp = gimple_assign_rhs2 (def);
685 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
686 return true;
688 return false;
691 /* Returns true if ARG is a neutral element for operation CODE
692 on the RIGHT side. */
694 static bool
695 neutral_element_p (tree_code code, tree arg, bool right)
697 switch (code)
699 case PLUS_EXPR:
700 case BIT_IOR_EXPR:
701 case BIT_XOR_EXPR:
702 return integer_zerop (arg);
704 case LROTATE_EXPR:
705 case RROTATE_EXPR:
706 case LSHIFT_EXPR:
707 case RSHIFT_EXPR:
708 case MINUS_EXPR:
709 case POINTER_PLUS_EXPR:
710 return right && integer_zerop (arg);
712 case MULT_EXPR:
713 return integer_onep (arg);
715 case TRUNC_DIV_EXPR:
716 case CEIL_DIV_EXPR:
717 case FLOOR_DIV_EXPR:
718 case ROUND_DIV_EXPR:
719 case EXACT_DIV_EXPR:
720 return right && integer_onep (arg);
722 case BIT_AND_EXPR:
723 return integer_all_onesp (arg);
725 default:
726 return false;
730 /* Returns true if ARG is an absorbing element for operation CODE. */
732 static bool
733 absorbing_element_p (tree_code code, tree arg)
735 switch (code)
737 case BIT_IOR_EXPR:
738 return integer_all_onesp (arg);
740 case MULT_EXPR:
741 case BIT_AND_EXPR:
742 return integer_zerop (arg);
744 default:
745 return false;
749 /* The function value_replacement does the main work of doing the value
750 replacement. Return non-zero if the replacement is done. Otherwise return
751 0. If we remove the middle basic block, return 2.
752 BB is the basic block where the replacement is going to be done on. ARG0
753 is argument 0 from the PHI. Likewise for ARG1. */
755 static int
756 value_replacement (basic_block cond_bb, basic_block middle_bb,
757 edge e0, edge e1, gimple phi,
758 tree arg0, tree arg1)
760 gimple_stmt_iterator gsi;
761 gimple cond;
762 edge true_edge, false_edge;
763 enum tree_code code;
764 bool emtpy_or_with_defined_p = true;
766 /* If the type says honor signed zeros we cannot do this
767 optimization. */
768 if (HONOR_SIGNED_ZEROS (arg1))
769 return 0;
771 /* If there is a statement in MIDDLE_BB that defines one of the PHI
772 arguments, then adjust arg0 or arg1. */
773 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
774 while (!gsi_end_p (gsi))
776 gimple stmt = gsi_stmt (gsi);
777 tree lhs;
778 gsi_next_nondebug (&gsi);
779 if (!is_gimple_assign (stmt))
781 emtpy_or_with_defined_p = false;
782 continue;
784 /* Now try to adjust arg0 or arg1 according to the computation
785 in the statement. */
786 lhs = gimple_assign_lhs (stmt);
787 if (!(lhs == arg0
788 && jump_function_from_stmt (&arg0, stmt))
789 || (lhs == arg1
790 && jump_function_from_stmt (&arg1, stmt)))
791 emtpy_or_with_defined_p = false;
794 cond = last_stmt (cond_bb);
795 code = gimple_cond_code (cond);
797 /* This transformation is only valid for equality comparisons. */
798 if (code != NE_EXPR && code != EQ_EXPR)
799 return 0;
801 /* We need to know which is the true edge and which is the false
802 edge so that we know if have abs or negative abs. */
803 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
805 /* At this point we know we have a COND_EXPR with two successors.
806 One successor is BB, the other successor is an empty block which
807 falls through into BB.
809 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
811 There is a single PHI node at the join point (BB) with two arguments.
813 We now need to verify that the two arguments in the PHI node match
814 the two arguments to the equality comparison. */
816 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
818 edge e;
819 tree arg;
821 /* For NE_EXPR, we want to build an assignment result = arg where
822 arg is the PHI argument associated with the true edge. For
823 EQ_EXPR we want the PHI argument associated with the false edge. */
824 e = (code == NE_EXPR ? true_edge : false_edge);
826 /* Unfortunately, E may not reach BB (it may instead have gone to
827 OTHER_BLOCK). If that is the case, then we want the single outgoing
828 edge from OTHER_BLOCK which reaches BB and represents the desired
829 path from COND_BLOCK. */
830 if (e->dest == middle_bb)
831 e = single_succ_edge (e->dest);
833 /* Now we know the incoming edge to BB that has the argument for the
834 RHS of our new assignment statement. */
835 if (e0 == e)
836 arg = arg0;
837 else
838 arg = arg1;
840 /* If the middle basic block was empty or is defining the
841 PHI arguments and this is a single phi where the args are different
842 for the edges e0 and e1 then we can remove the middle basic block. */
843 if (emtpy_or_with_defined_p
844 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
845 e0, e1) == phi)
847 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
848 /* Note that we optimized this PHI. */
849 return 2;
851 else
853 /* Replace the PHI arguments with arg. */
854 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
855 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
856 if (dump_file && (dump_flags & TDF_DETAILS))
858 fprintf (dump_file, "PHI ");
859 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
860 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
861 cond_bb->index);
862 print_generic_expr (dump_file, arg, 0);
863 fprintf (dump_file, ".\n");
865 return 1;
870 /* Now optimize (x != 0) ? x + y : y to just y.
871 The following condition is too restrictive, there can easily be another
872 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
873 gimple assign = last_and_only_stmt (middle_bb);
874 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
875 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
876 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
877 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
878 return 0;
880 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
881 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
882 return 0;
884 /* Only transform if it removes the condition. */
885 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
886 return 0;
888 /* Size-wise, this is always profitable. */
889 if (optimize_bb_for_speed_p (cond_bb)
890 /* The special case is useless if it has a low probability. */
891 && profile_status_for_fn (cfun) != PROFILE_ABSENT
892 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
893 /* If assign is cheap, there is no point avoiding it. */
894 && estimate_num_insns (assign, &eni_time_weights)
895 >= 3 * estimate_num_insns (cond, &eni_time_weights))
896 return 0;
898 tree lhs = gimple_assign_lhs (assign);
899 tree rhs1 = gimple_assign_rhs1 (assign);
900 tree rhs2 = gimple_assign_rhs2 (assign);
901 enum tree_code code_def = gimple_assign_rhs_code (assign);
902 tree cond_lhs = gimple_cond_lhs (cond);
903 tree cond_rhs = gimple_cond_rhs (cond);
905 if (((code == NE_EXPR && e1 == false_edge)
906 || (code == EQ_EXPR && e1 == true_edge))
907 && arg0 == lhs
908 && ((arg1 == rhs1
909 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
910 && neutral_element_p (code_def, cond_rhs, true))
911 || (arg1 == rhs2
912 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
913 && neutral_element_p (code_def, cond_rhs, false))
914 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
915 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
916 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
917 && absorbing_element_p (code_def, cond_rhs))))
919 gsi = gsi_for_stmt (cond);
920 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
922 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
923 def-stmt in:
924 if (n_5 != 0)
925 goto <bb 3>;
926 else
927 goto <bb 4>;
929 <bb 3>:
930 # RANGE [0, 4294967294]
931 u_6 = n_5 + 4294967295;
933 <bb 4>:
934 # u_3 = PHI <u_6(3), 4294967295(2)> */
935 SSA_NAME_RANGE_INFO (lhs) = NULL;
936 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
937 /* If available, we can use VR of phi result at least. */
938 tree phires = gimple_phi_result (phi);
939 struct range_info_def *phires_range_info
940 = SSA_NAME_RANGE_INFO (phires);
941 if (phires_range_info)
942 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
943 phires_range_info);
945 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
946 gsi_move_before (&gsi_from, &gsi);
947 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
948 return 2;
951 return 0;
954 /* The function minmax_replacement does the main work of doing the minmax
955 replacement. Return true if the replacement is done. Otherwise return
956 false.
957 BB is the basic block where the replacement is going to be done on. ARG0
958 is argument 0 from the PHI. Likewise for ARG1. */
960 static bool
961 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
962 edge e0, edge e1, gimple phi,
963 tree arg0, tree arg1)
965 tree result, type;
966 gcond *cond;
967 gassign *new_stmt;
968 edge true_edge, false_edge;
969 enum tree_code cmp, minmax, ass_code;
970 tree smaller, larger, arg_true, arg_false;
971 gimple_stmt_iterator gsi, gsi_from;
973 type = TREE_TYPE (PHI_RESULT (phi));
975 /* The optimization may be unsafe due to NaNs. */
976 if (HONOR_NANS (type))
977 return false;
979 cond = as_a <gcond *> (last_stmt (cond_bb));
980 cmp = gimple_cond_code (cond);
982 /* This transformation is only valid for order comparisons. Record which
983 operand is smaller/larger if the result of the comparison is true. */
984 if (cmp == LT_EXPR || cmp == LE_EXPR)
986 smaller = gimple_cond_lhs (cond);
987 larger = gimple_cond_rhs (cond);
989 else if (cmp == GT_EXPR || cmp == GE_EXPR)
991 smaller = gimple_cond_rhs (cond);
992 larger = gimple_cond_lhs (cond);
994 else
995 return false;
997 /* We need to know which is the true edge and which is the false
998 edge so that we know if have abs or negative abs. */
999 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1001 /* Forward the edges over the middle basic block. */
1002 if (true_edge->dest == middle_bb)
1003 true_edge = EDGE_SUCC (true_edge->dest, 0);
1004 if (false_edge->dest == middle_bb)
1005 false_edge = EDGE_SUCC (false_edge->dest, 0);
1007 if (true_edge == e0)
1009 gcc_assert (false_edge == e1);
1010 arg_true = arg0;
1011 arg_false = arg1;
1013 else
1015 gcc_assert (false_edge == e0);
1016 gcc_assert (true_edge == e1);
1017 arg_true = arg1;
1018 arg_false = arg0;
1021 if (empty_block_p (middle_bb))
1023 if (operand_equal_for_phi_arg_p (arg_true, smaller)
1024 && operand_equal_for_phi_arg_p (arg_false, larger))
1026 /* Case
1028 if (smaller < larger)
1029 rslt = smaller;
1030 else
1031 rslt = larger; */
1032 minmax = MIN_EXPR;
1034 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1035 && operand_equal_for_phi_arg_p (arg_true, larger))
1036 minmax = MAX_EXPR;
1037 else
1038 return false;
1040 else
1042 /* Recognize the following case, assuming d <= u:
1044 if (a <= u)
1045 b = MAX (a, d);
1046 x = PHI <b, u>
1048 This is equivalent to
1050 b = MAX (a, d);
1051 x = MIN (b, u); */
1053 gimple assign = last_and_only_stmt (middle_bb);
1054 tree lhs, op0, op1, bound;
1056 if (!assign
1057 || gimple_code (assign) != GIMPLE_ASSIGN)
1058 return false;
1060 lhs = gimple_assign_lhs (assign);
1061 ass_code = gimple_assign_rhs_code (assign);
1062 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1063 return false;
1064 op0 = gimple_assign_rhs1 (assign);
1065 op1 = gimple_assign_rhs2 (assign);
1067 if (true_edge->src == middle_bb)
1069 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1070 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1071 return false;
1073 if (operand_equal_for_phi_arg_p (arg_false, larger))
1075 /* Case
1077 if (smaller < larger)
1079 r' = MAX_EXPR (smaller, bound)
1081 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1082 if (ass_code != MAX_EXPR)
1083 return false;
1085 minmax = MIN_EXPR;
1086 if (operand_equal_for_phi_arg_p (op0, smaller))
1087 bound = op1;
1088 else if (operand_equal_for_phi_arg_p (op1, smaller))
1089 bound = op0;
1090 else
1091 return false;
1093 /* We need BOUND <= LARGER. */
1094 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1095 bound, larger)))
1096 return false;
1098 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1100 /* Case
1102 if (smaller < larger)
1104 r' = MIN_EXPR (larger, bound)
1106 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1107 if (ass_code != MIN_EXPR)
1108 return false;
1110 minmax = MAX_EXPR;
1111 if (operand_equal_for_phi_arg_p (op0, larger))
1112 bound = op1;
1113 else if (operand_equal_for_phi_arg_p (op1, larger))
1114 bound = op0;
1115 else
1116 return false;
1118 /* We need BOUND >= SMALLER. */
1119 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1120 bound, smaller)))
1121 return false;
1123 else
1124 return false;
1126 else
1128 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1129 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1130 return false;
1132 if (operand_equal_for_phi_arg_p (arg_true, larger))
1134 /* Case
1136 if (smaller > larger)
1138 r' = MIN_EXPR (smaller, bound)
1140 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1141 if (ass_code != MIN_EXPR)
1142 return false;
1144 minmax = MAX_EXPR;
1145 if (operand_equal_for_phi_arg_p (op0, smaller))
1146 bound = op1;
1147 else if (operand_equal_for_phi_arg_p (op1, smaller))
1148 bound = op0;
1149 else
1150 return false;
1152 /* We need BOUND >= LARGER. */
1153 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1154 bound, larger)))
1155 return false;
1157 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1159 /* Case
1161 if (smaller > larger)
1163 r' = MAX_EXPR (larger, bound)
1165 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1166 if (ass_code != MAX_EXPR)
1167 return false;
1169 minmax = MIN_EXPR;
1170 if (operand_equal_for_phi_arg_p (op0, larger))
1171 bound = op1;
1172 else if (operand_equal_for_phi_arg_p (op1, larger))
1173 bound = op0;
1174 else
1175 return false;
1177 /* We need BOUND <= SMALLER. */
1178 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1179 bound, smaller)))
1180 return false;
1182 else
1183 return false;
1186 /* Move the statement from the middle block. */
1187 gsi = gsi_last_bb (cond_bb);
1188 gsi_from = gsi_last_nondebug_bb (middle_bb);
1189 gsi_move_before (&gsi_from, &gsi);
1192 /* Emit the statement to compute min/max. */
1193 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1194 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1195 gsi = gsi_last_bb (cond_bb);
1196 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1198 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1199 return true;
1202 /* The function absolute_replacement does the main work of doing the absolute
1203 replacement. Return true if the replacement is done. Otherwise return
1204 false.
1205 bb is the basic block where the replacement is going to be done on. arg0
1206 is argument 0 from the phi. Likewise for arg1. */
1208 static bool
1209 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1210 edge e0 ATTRIBUTE_UNUSED, edge e1,
1211 gimple phi, tree arg0, tree arg1)
1213 tree result;
1214 gassign *new_stmt;
1215 gimple cond;
1216 gimple_stmt_iterator gsi;
1217 edge true_edge, false_edge;
1218 gimple assign;
1219 edge e;
1220 tree rhs, lhs;
1221 bool negate;
1222 enum tree_code cond_code;
1224 /* If the type says honor signed zeros we cannot do this
1225 optimization. */
1226 if (HONOR_SIGNED_ZEROS (arg1))
1227 return false;
1229 /* OTHER_BLOCK must have only one executable statement which must have the
1230 form arg0 = -arg1 or arg1 = -arg0. */
1232 assign = last_and_only_stmt (middle_bb);
1233 /* If we did not find the proper negation assignment, then we can not
1234 optimize. */
1235 if (assign == NULL)
1236 return false;
1238 /* If we got here, then we have found the only executable statement
1239 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1240 arg1 = -arg0, then we can not optimize. */
1241 if (gimple_code (assign) != GIMPLE_ASSIGN)
1242 return false;
1244 lhs = gimple_assign_lhs (assign);
1246 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1247 return false;
1249 rhs = gimple_assign_rhs1 (assign);
1251 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1252 if (!(lhs == arg0 && rhs == arg1)
1253 && !(lhs == arg1 && rhs == arg0))
1254 return false;
1256 cond = last_stmt (cond_bb);
1257 result = PHI_RESULT (phi);
1259 /* Only relationals comparing arg[01] against zero are interesting. */
1260 cond_code = gimple_cond_code (cond);
1261 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1262 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1263 return false;
1265 /* Make sure the conditional is arg[01] OP y. */
1266 if (gimple_cond_lhs (cond) != rhs)
1267 return false;
1269 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1270 ? real_zerop (gimple_cond_rhs (cond))
1271 : integer_zerop (gimple_cond_rhs (cond)))
1273 else
1274 return false;
1276 /* We need to know which is the true edge and which is the false
1277 edge so that we know if have abs or negative abs. */
1278 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1280 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1281 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1282 the false edge goes to OTHER_BLOCK. */
1283 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1284 e = true_edge;
1285 else
1286 e = false_edge;
1288 if (e->dest == middle_bb)
1289 negate = true;
1290 else
1291 negate = false;
1293 result = duplicate_ssa_name (result, NULL);
1295 if (negate)
1296 lhs = make_ssa_name (TREE_TYPE (result));
1297 else
1298 lhs = result;
1300 /* Build the modify expression with abs expression. */
1301 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1303 gsi = gsi_last_bb (cond_bb);
1304 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1306 if (negate)
1308 /* Get the right GSI. We want to insert after the recently
1309 added ABS_EXPR statement (which we know is the first statement
1310 in the block. */
1311 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1313 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1316 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1318 /* Note that we optimized this PHI. */
1319 return true;
1322 /* The function neg_replacement replaces conditional negation with
1323 equivalent straight line code. Returns TRUE if replacement is done,
1324 otherwise returns FALSE.
1326 COND_BB branches around negation occuring in MIDDLE_BB.
1328 E0 and E1 are edges out of COND_BB. E0 reaches MIDDLE_BB and
1329 E1 reaches the other successor which should contain PHI with
1330 arguments ARG0 and ARG1.
1332 Assuming negation is to occur when the condition is true,
1333 then the non-branching sequence is:
1335 result = (rhs ^ -cond) + cond
1337 Inverting the condition or its result gives us negation
1338 when the original condition is false. */
1340 static bool
1341 neg_replacement (basic_block cond_bb, basic_block middle_bb,
1342 edge e0 ATTRIBUTE_UNUSED, edge e1,
1343 gimple phi, tree arg0, tree arg1)
1345 gimple new_stmt, cond;
1346 gimple_stmt_iterator gsi;
1347 gimple assign;
1348 edge true_edge, false_edge;
1349 tree rhs, lhs;
1350 enum tree_code cond_code;
1351 bool invert = false;
1353 /* This transformation performs logical operations on the
1354 incoming arguments. So force them to be integral types. */
1355 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1356 return false;
1358 /* OTHER_BLOCK must have only one executable statement which must have the
1359 form arg0 = -arg1 or arg1 = -arg0. */
1361 assign = last_and_only_stmt (middle_bb);
1362 /* If we did not find the proper negation assignment, then we can not
1363 optimize. */
1364 if (assign == NULL)
1365 return false;
1367 /* If we got here, then we have found the only executable statement
1368 in OTHER_BLOCK. If it is anything other than arg0 = -arg1 or
1369 arg1 = -arg0, then we can not optimize. */
1370 if (gimple_code (assign) != GIMPLE_ASSIGN)
1371 return false;
1373 lhs = gimple_assign_lhs (assign);
1375 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1376 return false;
1378 rhs = gimple_assign_rhs1 (assign);
1380 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1381 if (!(lhs == arg0 && rhs == arg1)
1382 && !(lhs == arg1 && rhs == arg0))
1383 return false;
1385 /* The basic sequence assumes we negate when the condition is true.
1386 If we need the opposite, then we will either need to invert the
1387 condition or its result. */
1388 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1389 invert = false_edge->dest == middle_bb;
1391 /* Unlike abs_replacement, we can handle arbitrary conditionals here. */
1392 cond = last_stmt (cond_bb);
1393 cond_code = gimple_cond_code (cond);
1395 /* If inversion is needed, first try to invert the test since
1396 that's cheapest. */
1397 if (invert)
1399 bool honor_nans = HONOR_NANS (gimple_cond_lhs (cond));
1400 enum tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
1402 /* If invert_tree_comparison was successful, then use its return
1403 value as the new code and note that inversion is no longer
1404 needed. */
1405 if (new_code != ERROR_MARK)
1407 cond_code = new_code;
1408 invert = false;
1412 tree cond_val = make_ssa_name (boolean_type_node);
1413 new_stmt = gimple_build_assign (cond_val, cond_code,
1414 gimple_cond_lhs (cond),
1415 gimple_cond_rhs (cond));
1416 gsi = gsi_last_bb (cond_bb);
1417 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1419 /* If we still need inversion, then invert the result of the
1420 condition. */
1421 if (invert)
1423 tree tmp = make_ssa_name (boolean_type_node);
1424 new_stmt = gimple_build_assign (tmp, BIT_XOR_EXPR, cond_val,
1425 boolean_true_node);
1426 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1427 cond_val = tmp;
1430 /* Get the condition in the right type so that we can perform
1431 logical and arithmetic operations on it. */
1432 tree cond_val_converted = make_ssa_name (TREE_TYPE (rhs));
1433 new_stmt = gimple_build_assign (cond_val_converted, NOP_EXPR, cond_val);
1434 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1436 tree neg_cond_val_converted = make_ssa_name (TREE_TYPE (rhs));
1437 new_stmt = gimple_build_assign (neg_cond_val_converted, NEGATE_EXPR,
1438 cond_val_converted);
1439 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1441 tree tmp = make_ssa_name (TREE_TYPE (rhs));
1442 new_stmt = gimple_build_assign (tmp, BIT_XOR_EXPR, rhs,
1443 neg_cond_val_converted);
1444 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1446 tree new_lhs = make_ssa_name (TREE_TYPE (rhs));
1447 new_stmt = gimple_build_assign (new_lhs, PLUS_EXPR, tmp, cond_val_converted);
1448 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1450 replace_phi_edge_with_variable (cond_bb, e1, phi, new_lhs);
1452 /* Note that we optimized this PHI. */
1453 return true;
1456 /* Auxiliary functions to determine the set of memory accesses which
1457 can't trap because they are preceded by accesses to the same memory
1458 portion. We do that for MEM_REFs, so we only need to track
1459 the SSA_NAME of the pointer indirectly referenced. The algorithm
1460 simply is a walk over all instructions in dominator order. When
1461 we see an MEM_REF we determine if we've already seen a same
1462 ref anywhere up to the root of the dominator tree. If we do the
1463 current access can't trap. If we don't see any dominating access
1464 the current access might trap, but might also make later accesses
1465 non-trapping, so we remember it. We need to be careful with loads
1466 or stores, for instance a load might not trap, while a store would,
1467 so if we see a dominating read access this doesn't mean that a later
1468 write access would not trap. Hence we also need to differentiate the
1469 type of access(es) seen.
1471 ??? We currently are very conservative and assume that a load might
1472 trap even if a store doesn't (write-only memory). This probably is
1473 overly conservative. */
1475 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1476 through it was seen, which would constitute a no-trap region for
1477 same accesses. */
1478 struct name_to_bb
1480 unsigned int ssa_name_ver;
1481 unsigned int phase;
1482 bool store;
1483 HOST_WIDE_INT offset, size;
1484 basic_block bb;
1487 /* Hashtable helpers. */
1489 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1491 typedef name_to_bb value_type;
1492 typedef name_to_bb compare_type;
1493 static inline hashval_t hash (const value_type *);
1494 static inline bool equal (const value_type *, const compare_type *);
1497 /* Used for quick clearing of the hash-table when we see calls.
1498 Hash entries with phase < nt_call_phase are invalid. */
1499 static unsigned int nt_call_phase;
1501 /* The hash function. */
1503 inline hashval_t
1504 ssa_names_hasher::hash (const value_type *n)
1506 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1507 ^ (n->offset << 6) ^ (n->size << 3);
1510 /* The equality function of *P1 and *P2. */
1512 inline bool
1513 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1515 return n1->ssa_name_ver == n2->ssa_name_ver
1516 && n1->store == n2->store
1517 && n1->offset == n2->offset
1518 && n1->size == n2->size;
1521 class nontrapping_dom_walker : public dom_walker
1523 public:
1524 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1525 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1527 virtual void before_dom_children (basic_block);
1528 virtual void after_dom_children (basic_block);
1530 private:
1532 /* We see the expression EXP in basic block BB. If it's an interesting
1533 expression (an MEM_REF through an SSA_NAME) possibly insert the
1534 expression into the set NONTRAP or the hash table of seen expressions.
1535 STORE is true if this expression is on the LHS, otherwise it's on
1536 the RHS. */
1537 void add_or_mark_expr (basic_block, tree, bool);
1539 hash_set<tree> *m_nontrapping;
1541 /* The hash table for remembering what we've seen. */
1542 hash_table<ssa_names_hasher> m_seen_ssa_names;
1545 /* Called by walk_dominator_tree, when entering the block BB. */
1546 void
1547 nontrapping_dom_walker::before_dom_children (basic_block bb)
1549 edge e;
1550 edge_iterator ei;
1551 gimple_stmt_iterator gsi;
1553 /* If we haven't seen all our predecessors, clear the hash-table. */
1554 FOR_EACH_EDGE (e, ei, bb->preds)
1555 if ((((size_t)e->src->aux) & 2) == 0)
1557 nt_call_phase++;
1558 break;
1561 /* Mark this BB as being on the path to dominator root and as visited. */
1562 bb->aux = (void*)(1 | 2);
1564 /* And walk the statements in order. */
1565 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1567 gimple stmt = gsi_stmt (gsi);
1569 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1570 nt_call_phase++;
1571 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1573 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1574 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1579 /* Called by walk_dominator_tree, when basic block BB is exited. */
1580 void
1581 nontrapping_dom_walker::after_dom_children (basic_block bb)
1583 /* This BB isn't on the path to dominator root anymore. */
1584 bb->aux = (void*)2;
1587 /* We see the expression EXP in basic block BB. If it's an interesting
1588 expression (an MEM_REF through an SSA_NAME) possibly insert the
1589 expression into the set NONTRAP or the hash table of seen expressions.
1590 STORE is true if this expression is on the LHS, otherwise it's on
1591 the RHS. */
1592 void
1593 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1595 HOST_WIDE_INT size;
1597 if (TREE_CODE (exp) == MEM_REF
1598 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1599 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1600 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1602 tree name = TREE_OPERAND (exp, 0);
1603 struct name_to_bb map;
1604 name_to_bb **slot;
1605 struct name_to_bb *n2bb;
1606 basic_block found_bb = 0;
1608 /* Try to find the last seen MEM_REF through the same
1609 SSA_NAME, which can trap. */
1610 map.ssa_name_ver = SSA_NAME_VERSION (name);
1611 map.phase = 0;
1612 map.bb = 0;
1613 map.store = store;
1614 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1615 map.size = size;
1617 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1618 n2bb = *slot;
1619 if (n2bb && n2bb->phase >= nt_call_phase)
1620 found_bb = n2bb->bb;
1622 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1623 (it's in a basic block on the path from us to the dominator root)
1624 then we can't trap. */
1625 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1627 m_nontrapping->add (exp);
1629 else
1631 /* EXP might trap, so insert it into the hash table. */
1632 if (n2bb)
1634 n2bb->phase = nt_call_phase;
1635 n2bb->bb = bb;
1637 else
1639 n2bb = XNEW (struct name_to_bb);
1640 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1641 n2bb->phase = nt_call_phase;
1642 n2bb->bb = bb;
1643 n2bb->store = store;
1644 n2bb->offset = map.offset;
1645 n2bb->size = size;
1646 *slot = n2bb;
1652 /* This is the entry point of gathering non trapping memory accesses.
1653 It will do a dominator walk over the whole function, and it will
1654 make use of the bb->aux pointers. It returns a set of trees
1655 (the MEM_REFs itself) which can't trap. */
1656 static hash_set<tree> *
1657 get_non_trapping (void)
1659 nt_call_phase = 0;
1660 hash_set<tree> *nontrap = new hash_set<tree>;
1661 /* We're going to do a dominator walk, so ensure that we have
1662 dominance information. */
1663 calculate_dominance_info (CDI_DOMINATORS);
1665 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1666 .walk (cfun->cfg->x_entry_block_ptr);
1668 clear_aux_for_blocks ();
1669 return nontrap;
1672 /* Do the main work of conditional store replacement. We already know
1673 that the recognized pattern looks like so:
1675 split:
1676 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1677 MIDDLE_BB:
1678 something
1679 fallthrough (edge E0)
1680 JOIN_BB:
1681 some more
1683 We check that MIDDLE_BB contains only one store, that that store
1684 doesn't trap (not via NOTRAP, but via checking if an access to the same
1685 memory location dominates us) and that the store has a "simple" RHS. */
1687 static bool
1688 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1689 edge e0, edge e1, hash_set<tree> *nontrap)
1691 gimple assign = last_and_only_stmt (middle_bb);
1692 tree lhs, rhs, name, name2;
1693 gphi *newphi;
1694 gassign *new_stmt;
1695 gimple_stmt_iterator gsi;
1696 source_location locus;
1698 /* Check if middle_bb contains of only one store. */
1699 if (!assign
1700 || !gimple_assign_single_p (assign)
1701 || gimple_has_volatile_ops (assign))
1702 return false;
1704 locus = gimple_location (assign);
1705 lhs = gimple_assign_lhs (assign);
1706 rhs = gimple_assign_rhs1 (assign);
1707 if (TREE_CODE (lhs) != MEM_REF
1708 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1709 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1710 return false;
1712 /* Prove that we can move the store down. We could also check
1713 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1714 whose value is not available readily, which we want to avoid. */
1715 if (!nontrap->contains (lhs))
1716 return false;
1718 /* Now we've checked the constraints, so do the transformation:
1719 1) Remove the single store. */
1720 gsi = gsi_for_stmt (assign);
1721 unlink_stmt_vdef (assign);
1722 gsi_remove (&gsi, true);
1723 release_defs (assign);
1725 /* 2) Insert a load from the memory of the store to the temporary
1726 on the edge which did not contain the store. */
1727 lhs = unshare_expr (lhs);
1728 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1729 new_stmt = gimple_build_assign (name, lhs);
1730 gimple_set_location (new_stmt, locus);
1731 gsi_insert_on_edge (e1, new_stmt);
1733 /* 3) Create a PHI node at the join block, with one argument
1734 holding the old RHS, and the other holding the temporary
1735 where we stored the old memory contents. */
1736 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1737 newphi = create_phi_node (name2, join_bb);
1738 add_phi_arg (newphi, rhs, e0, locus);
1739 add_phi_arg (newphi, name, e1, locus);
1741 lhs = unshare_expr (lhs);
1742 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1744 /* 4) Insert that PHI node. */
1745 gsi = gsi_after_labels (join_bb);
1746 if (gsi_end_p (gsi))
1748 gsi = gsi_last_bb (join_bb);
1749 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1751 else
1752 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1754 return true;
1757 /* Do the main work of conditional store replacement. */
1759 static bool
1760 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1761 basic_block join_bb, gimple then_assign,
1762 gimple else_assign)
1764 tree lhs_base, lhs, then_rhs, else_rhs, name;
1765 source_location then_locus, else_locus;
1766 gimple_stmt_iterator gsi;
1767 gphi *newphi;
1768 gassign *new_stmt;
1770 if (then_assign == NULL
1771 || !gimple_assign_single_p (then_assign)
1772 || gimple_clobber_p (then_assign)
1773 || gimple_has_volatile_ops (then_assign)
1774 || else_assign == NULL
1775 || !gimple_assign_single_p (else_assign)
1776 || gimple_clobber_p (else_assign)
1777 || gimple_has_volatile_ops (else_assign))
1778 return false;
1780 lhs = gimple_assign_lhs (then_assign);
1781 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1782 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1783 return false;
1785 lhs_base = get_base_address (lhs);
1786 if (lhs_base == NULL_TREE
1787 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1788 return false;
1790 then_rhs = gimple_assign_rhs1 (then_assign);
1791 else_rhs = gimple_assign_rhs1 (else_assign);
1792 then_locus = gimple_location (then_assign);
1793 else_locus = gimple_location (else_assign);
1795 /* Now we've checked the constraints, so do the transformation:
1796 1) Remove the stores. */
1797 gsi = gsi_for_stmt (then_assign);
1798 unlink_stmt_vdef (then_assign);
1799 gsi_remove (&gsi, true);
1800 release_defs (then_assign);
1802 gsi = gsi_for_stmt (else_assign);
1803 unlink_stmt_vdef (else_assign);
1804 gsi_remove (&gsi, true);
1805 release_defs (else_assign);
1807 /* 2) Create a PHI node at the join block, with one argument
1808 holding the old RHS, and the other holding the temporary
1809 where we stored the old memory contents. */
1810 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1811 newphi = create_phi_node (name, join_bb);
1812 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1813 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1815 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1817 /* 3) Insert that PHI node. */
1818 gsi = gsi_after_labels (join_bb);
1819 if (gsi_end_p (gsi))
1821 gsi = gsi_last_bb (join_bb);
1822 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1824 else
1825 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1827 return true;
1830 /* Conditional store replacement. We already know
1831 that the recognized pattern looks like so:
1833 split:
1834 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1835 THEN_BB:
1837 X = Y;
1839 goto JOIN_BB;
1840 ELSE_BB:
1842 X = Z;
1844 fallthrough (edge E0)
1845 JOIN_BB:
1846 some more
1848 We check that it is safe to sink the store to JOIN_BB by verifying that
1849 there are no read-after-write or write-after-write dependencies in
1850 THEN_BB and ELSE_BB. */
1852 static bool
1853 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1854 basic_block join_bb)
1856 gimple then_assign = last_and_only_stmt (then_bb);
1857 gimple else_assign = last_and_only_stmt (else_bb);
1858 vec<data_reference_p> then_datarefs, else_datarefs;
1859 vec<ddr_p> then_ddrs, else_ddrs;
1860 gimple then_store, else_store;
1861 bool found, ok = false, res;
1862 struct data_dependence_relation *ddr;
1863 data_reference_p then_dr, else_dr;
1864 int i, j;
1865 tree then_lhs, else_lhs;
1866 basic_block blocks[3];
1868 if (MAX_STORES_TO_SINK == 0)
1869 return false;
1871 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1872 if (then_assign && else_assign)
1873 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1874 then_assign, else_assign);
1876 /* Find data references. */
1877 then_datarefs.create (1);
1878 else_datarefs.create (1);
1879 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1880 == chrec_dont_know)
1881 || !then_datarefs.length ()
1882 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1883 == chrec_dont_know)
1884 || !else_datarefs.length ())
1886 free_data_refs (then_datarefs);
1887 free_data_refs (else_datarefs);
1888 return false;
1891 /* Find pairs of stores with equal LHS. */
1892 auto_vec<gimple, 1> then_stores, else_stores;
1893 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1895 if (DR_IS_READ (then_dr))
1896 continue;
1898 then_store = DR_STMT (then_dr);
1899 then_lhs = gimple_get_lhs (then_store);
1900 if (then_lhs == NULL_TREE)
1901 continue;
1902 found = false;
1904 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1906 if (DR_IS_READ (else_dr))
1907 continue;
1909 else_store = DR_STMT (else_dr);
1910 else_lhs = gimple_get_lhs (else_store);
1911 if (else_lhs == NULL_TREE)
1912 continue;
1914 if (operand_equal_p (then_lhs, else_lhs, 0))
1916 found = true;
1917 break;
1921 if (!found)
1922 continue;
1924 then_stores.safe_push (then_store);
1925 else_stores.safe_push (else_store);
1928 /* No pairs of stores found. */
1929 if (!then_stores.length ()
1930 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1932 free_data_refs (then_datarefs);
1933 free_data_refs (else_datarefs);
1934 return false;
1937 /* Compute and check data dependencies in both basic blocks. */
1938 then_ddrs.create (1);
1939 else_ddrs.create (1);
1940 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1941 vNULL, false)
1942 || !compute_all_dependences (else_datarefs, &else_ddrs,
1943 vNULL, false))
1945 free_dependence_relations (then_ddrs);
1946 free_dependence_relations (else_ddrs);
1947 free_data_refs (then_datarefs);
1948 free_data_refs (else_datarefs);
1949 return false;
1951 blocks[0] = then_bb;
1952 blocks[1] = else_bb;
1953 blocks[2] = join_bb;
1954 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1956 /* Check that there are no read-after-write or write-after-write dependencies
1957 in THEN_BB. */
1958 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1960 struct data_reference *dra = DDR_A (ddr);
1961 struct data_reference *drb = DDR_B (ddr);
1963 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1964 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1965 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1966 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1967 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1968 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1970 free_dependence_relations (then_ddrs);
1971 free_dependence_relations (else_ddrs);
1972 free_data_refs (then_datarefs);
1973 free_data_refs (else_datarefs);
1974 return false;
1978 /* Check that there are no read-after-write or write-after-write dependencies
1979 in ELSE_BB. */
1980 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1982 struct data_reference *dra = DDR_A (ddr);
1983 struct data_reference *drb = DDR_B (ddr);
1985 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1986 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1987 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1988 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1989 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1990 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1992 free_dependence_relations (then_ddrs);
1993 free_dependence_relations (else_ddrs);
1994 free_data_refs (then_datarefs);
1995 free_data_refs (else_datarefs);
1996 return false;
2000 /* Sink stores with same LHS. */
2001 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2003 else_store = else_stores[i];
2004 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2005 then_store, else_store);
2006 ok = ok || res;
2009 free_dependence_relations (then_ddrs);
2010 free_dependence_relations (else_ddrs);
2011 free_data_refs (then_datarefs);
2012 free_data_refs (else_datarefs);
2014 return ok;
2017 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2019 static bool
2020 local_mem_dependence (gimple stmt, basic_block bb)
2022 tree vuse = gimple_vuse (stmt);
2023 gimple def;
2025 if (!vuse)
2026 return false;
2028 def = SSA_NAME_DEF_STMT (vuse);
2029 return (def && gimple_bb (def) == bb);
2032 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2033 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2034 and BB3 rejoins control flow following BB1 and BB2, look for
2035 opportunities to hoist loads as follows. If BB3 contains a PHI of
2036 two loads, one each occurring in BB1 and BB2, and the loads are
2037 provably of adjacent fields in the same structure, then move both
2038 loads into BB0. Of course this can only be done if there are no
2039 dependencies preventing such motion.
2041 One of the hoisted loads will always be speculative, so the
2042 transformation is currently conservative:
2044 - The fields must be strictly adjacent.
2045 - The two fields must occupy a single memory block that is
2046 guaranteed to not cross a page boundary.
2048 The last is difficult to prove, as such memory blocks should be
2049 aligned on the minimum of the stack alignment boundary and the
2050 alignment guaranteed by heap allocation interfaces. Thus we rely
2051 on a parameter for the alignment value.
2053 Provided a good value is used for the last case, the first
2054 restriction could possibly be relaxed. */
2056 static void
2057 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2058 basic_block bb2, basic_block bb3)
2060 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2061 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2062 gphi_iterator gsi;
2064 /* Walk the phis in bb3 looking for an opportunity. We are looking
2065 for phis of two SSA names, one each of which is defined in bb1 and
2066 bb2. */
2067 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2069 gphi *phi_stmt = gsi.phi ();
2070 gimple def1, def2, defswap;
2071 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
2072 tree tree_offset1, tree_offset2, tree_size2, next;
2073 int offset1, offset2, size2;
2074 unsigned align1;
2075 gimple_stmt_iterator gsi2;
2076 basic_block bb_for_def1, bb_for_def2;
2078 if (gimple_phi_num_args (phi_stmt) != 2
2079 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2080 continue;
2082 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2083 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2085 if (TREE_CODE (arg1) != SSA_NAME
2086 || TREE_CODE (arg2) != SSA_NAME
2087 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2088 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2089 continue;
2091 def1 = SSA_NAME_DEF_STMT (arg1);
2092 def2 = SSA_NAME_DEF_STMT (arg2);
2094 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2095 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2096 continue;
2098 /* Check the mode of the arguments to be sure a conditional move
2099 can be generated for it. */
2100 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2101 == CODE_FOR_nothing)
2102 continue;
2104 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2105 if (!gimple_assign_single_p (def1)
2106 || !gimple_assign_single_p (def2)
2107 || gimple_has_volatile_ops (def1)
2108 || gimple_has_volatile_ops (def2))
2109 continue;
2111 ref1 = gimple_assign_rhs1 (def1);
2112 ref2 = gimple_assign_rhs1 (def2);
2114 if (TREE_CODE (ref1) != COMPONENT_REF
2115 || TREE_CODE (ref2) != COMPONENT_REF)
2116 continue;
2118 /* The zeroth operand of the two component references must be
2119 identical. It is not sufficient to compare get_base_address of
2120 the two references, because this could allow for different
2121 elements of the same array in the two trees. It is not safe to
2122 assume that the existence of one array element implies the
2123 existence of a different one. */
2124 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2125 continue;
2127 field1 = TREE_OPERAND (ref1, 1);
2128 field2 = TREE_OPERAND (ref2, 1);
2130 /* Check for field adjacency, and ensure field1 comes first. */
2131 for (next = DECL_CHAIN (field1);
2132 next && TREE_CODE (next) != FIELD_DECL;
2133 next = DECL_CHAIN (next))
2136 if (next != field2)
2138 for (next = DECL_CHAIN (field2);
2139 next && TREE_CODE (next) != FIELD_DECL;
2140 next = DECL_CHAIN (next))
2143 if (next != field1)
2144 continue;
2146 fieldswap = field1;
2147 field1 = field2;
2148 field2 = fieldswap;
2149 defswap = def1;
2150 def1 = def2;
2151 def2 = defswap;
2154 bb_for_def1 = gimple_bb (def1);
2155 bb_for_def2 = gimple_bb (def2);
2157 /* Check for proper alignment of the first field. */
2158 tree_offset1 = bit_position (field1);
2159 tree_offset2 = bit_position (field2);
2160 tree_size2 = DECL_SIZE (field2);
2162 if (!tree_fits_uhwi_p (tree_offset1)
2163 || !tree_fits_uhwi_p (tree_offset2)
2164 || !tree_fits_uhwi_p (tree_size2))
2165 continue;
2167 offset1 = tree_to_uhwi (tree_offset1);
2168 offset2 = tree_to_uhwi (tree_offset2);
2169 size2 = tree_to_uhwi (tree_size2);
2170 align1 = DECL_ALIGN (field1) % param_align_bits;
2172 if (offset1 % BITS_PER_UNIT != 0)
2173 continue;
2175 /* For profitability, the two field references should fit within
2176 a single cache line. */
2177 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2178 continue;
2180 /* The two expressions cannot be dependent upon vdefs defined
2181 in bb1/bb2. */
2182 if (local_mem_dependence (def1, bb_for_def1)
2183 || local_mem_dependence (def2, bb_for_def2))
2184 continue;
2186 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2187 bb0. We hoist the first one first so that a cache miss is handled
2188 efficiently regardless of hardware cache-fill policy. */
2189 gsi2 = gsi_for_stmt (def1);
2190 gsi_move_to_bb_end (&gsi2, bb0);
2191 gsi2 = gsi_for_stmt (def2);
2192 gsi_move_to_bb_end (&gsi2, bb0);
2194 if (dump_file && (dump_flags & TDF_DETAILS))
2196 fprintf (dump_file,
2197 "\nHoisting adjacent loads from %d and %d into %d: \n",
2198 bb_for_def1->index, bb_for_def2->index, bb0->index);
2199 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2200 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2205 /* Determine whether we should attempt to hoist adjacent loads out of
2206 diamond patterns in pass_phiopt. Always hoist loads if
2207 -fhoist-adjacent-loads is specified and the target machine has
2208 both a conditional move instruction and a defined cache line size. */
2210 static bool
2211 gate_hoist_loads (void)
2213 return (flag_hoist_adjacent_loads == 1
2214 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2215 && HAVE_conditional_move);
2218 /* This pass tries to replaces an if-then-else block with an
2219 assignment. We have four kinds of transformations. Some of these
2220 transformations are also performed by the ifcvt RTL optimizer.
2222 Conditional Replacement
2223 -----------------------
2225 This transformation, implemented in conditional_replacement,
2226 replaces
2228 bb0:
2229 if (cond) goto bb2; else goto bb1;
2230 bb1:
2231 bb2:
2232 x = PHI <0 (bb1), 1 (bb0), ...>;
2234 with
2236 bb0:
2237 x' = cond;
2238 goto bb2;
2239 bb2:
2240 x = PHI <x' (bb0), ...>;
2242 We remove bb1 as it becomes unreachable. This occurs often due to
2243 gimplification of conditionals.
2245 Value Replacement
2246 -----------------
2248 This transformation, implemented in value_replacement, replaces
2250 bb0:
2251 if (a != b) goto bb2; else goto bb1;
2252 bb1:
2253 bb2:
2254 x = PHI <a (bb1), b (bb0), ...>;
2256 with
2258 bb0:
2259 bb2:
2260 x = PHI <b (bb0), ...>;
2262 This opportunity can sometimes occur as a result of other
2263 optimizations.
2266 Another case caught by value replacement looks like this:
2268 bb0:
2269 t1 = a == CONST;
2270 t2 = b > c;
2271 t3 = t1 & t2;
2272 if (t3 != 0) goto bb1; else goto bb2;
2273 bb1:
2274 bb2:
2275 x = PHI (CONST, a)
2277 Gets replaced with:
2278 bb0:
2279 bb2:
2280 t1 = a == CONST;
2281 t2 = b > c;
2282 t3 = t1 & t2;
2283 x = a;
2285 ABS Replacement
2286 ---------------
2288 This transformation, implemented in abs_replacement, replaces
2290 bb0:
2291 if (a >= 0) goto bb2; else goto bb1;
2292 bb1:
2293 x = -a;
2294 bb2:
2295 x = PHI <x (bb1), a (bb0), ...>;
2297 with
2299 bb0:
2300 x' = ABS_EXPR< a >;
2301 bb2:
2302 x = PHI <x' (bb0), ...>;
2304 MIN/MAX Replacement
2305 -------------------
2307 This transformation, minmax_replacement replaces
2309 bb0:
2310 if (a <= b) goto bb2; else goto bb1;
2311 bb1:
2312 bb2:
2313 x = PHI <b (bb1), a (bb0), ...>;
2315 with
2317 bb0:
2318 x' = MIN_EXPR (a, b)
2319 bb2:
2320 x = PHI <x' (bb0), ...>;
2322 A similar transformation is done for MAX_EXPR.
2325 This pass also performs a fifth transformation of a slightly different
2326 flavor.
2328 Adjacent Load Hoisting
2329 ----------------------
2331 This transformation replaces
2333 bb0:
2334 if (...) goto bb2; else goto bb1;
2335 bb1:
2336 x1 = (<expr>).field1;
2337 goto bb3;
2338 bb2:
2339 x2 = (<expr>).field2;
2340 bb3:
2341 # x = PHI <x1, x2>;
2343 with
2345 bb0:
2346 x1 = (<expr>).field1;
2347 x2 = (<expr>).field2;
2348 if (...) goto bb2; else goto bb1;
2349 bb1:
2350 goto bb3;
2351 bb2:
2352 bb3:
2353 # x = PHI <x1, x2>;
2355 The purpose of this transformation is to enable generation of conditional
2356 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2357 the loads is speculative, the transformation is restricted to very
2358 specific cases to avoid introducing a page fault. We are looking for
2359 the common idiom:
2361 if (...)
2362 x = y->left;
2363 else
2364 x = y->right;
2366 where left and right are typically adjacent pointers in a tree structure. */
2368 namespace {
2370 const pass_data pass_data_phiopt =
2372 GIMPLE_PASS, /* type */
2373 "phiopt", /* name */
2374 OPTGROUP_NONE, /* optinfo_flags */
2375 TV_TREE_PHIOPT, /* tv_id */
2376 ( PROP_cfg | PROP_ssa ), /* properties_required */
2377 0, /* properties_provided */
2378 0, /* properties_destroyed */
2379 0, /* todo_flags_start */
2380 0, /* todo_flags_finish */
2383 class pass_phiopt : public gimple_opt_pass
2385 public:
2386 pass_phiopt (gcc::context *ctxt)
2387 : gimple_opt_pass (pass_data_phiopt, ctxt)
2390 /* opt_pass methods: */
2391 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2392 virtual bool gate (function *) { return flag_ssa_phiopt; }
2393 virtual unsigned int execute (function *)
2395 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2398 }; // class pass_phiopt
2400 } // anon namespace
2402 gimple_opt_pass *
2403 make_pass_phiopt (gcc::context *ctxt)
2405 return new pass_phiopt (ctxt);
2408 namespace {
2410 const pass_data pass_data_cselim =
2412 GIMPLE_PASS, /* type */
2413 "cselim", /* name */
2414 OPTGROUP_NONE, /* optinfo_flags */
2415 TV_TREE_PHIOPT, /* tv_id */
2416 ( PROP_cfg | PROP_ssa ), /* properties_required */
2417 0, /* properties_provided */
2418 0, /* properties_destroyed */
2419 0, /* todo_flags_start */
2420 0, /* todo_flags_finish */
2423 class pass_cselim : public gimple_opt_pass
2425 public:
2426 pass_cselim (gcc::context *ctxt)
2427 : gimple_opt_pass (pass_data_cselim, ctxt)
2430 /* opt_pass methods: */
2431 virtual bool gate (function *) { return flag_tree_cselim; }
2432 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2434 }; // class pass_cselim
2436 } // anon namespace
2438 gimple_opt_pass *
2439 make_pass_cselim (gcc::context *ctxt)
2441 return new pass_cselim (ctxt);