2014-12-08 Michael Meissner <meissner@linux.vnet.ibm.com>
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blob5a3519d3b52d72c867f8fe2c0af182f6e3a869da
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "stor-layout.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "predict.h"
30 #include "vec.h"
31 #include "hashtab.h"
32 #include "hash-set.h"
33 #include "machmode.h"
34 #include "hard-reg-set.h"
35 #include "input.h"
36 #include "function.h"
37 #include "dominance.h"
38 #include "cfg.h"
39 #include "cfganal.h"
40 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimplify.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-ssa.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "expr.h"
56 #include "tree-dfa.h"
57 #include "tree-pass.h"
58 #include "langhooks.h"
59 #include "domwalk.h"
60 #include "cfgloop.h"
61 #include "tree-data-ref.h"
62 #include "gimple-pretty-print.h"
63 #include "insn-config.h"
64 #include "expr.h"
65 #include "insn-codes.h"
66 #include "optabs.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-inline.h"
70 #ifndef HAVE_conditional_move
71 #define HAVE_conditional_move (0)
72 #endif
74 static unsigned int tree_ssa_phiopt_worker (bool, bool);
75 static bool conditional_replacement (basic_block, basic_block,
76 edge, edge, gphi *, tree, tree);
77 static int value_replacement (basic_block, basic_block,
78 edge, edge, gimple, tree, tree);
79 static bool minmax_replacement (basic_block, basic_block,
80 edge, edge, gimple, tree, tree);
81 static bool abs_replacement (basic_block, basic_block,
82 edge, edge, gimple, tree, tree);
83 static bool neg_replacement (basic_block, basic_block,
84 edge, edge, gimple, tree, tree);
85 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
86 hash_set<tree> *);
87 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
88 static hash_set<tree> * get_non_trapping ();
89 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
90 static void hoist_adjacent_loads (basic_block, basic_block,
91 basic_block, basic_block);
92 static bool gate_hoist_loads (void);
94 /* This pass tries to transform conditional stores into unconditional
95 ones, enabling further simplifications with the simpler then and else
96 blocks. In particular it replaces this:
98 bb0:
99 if (cond) goto bb2; else goto bb1;
100 bb1:
101 *p = RHS;
102 bb2:
104 with
106 bb0:
107 if (cond) goto bb1; else goto bb2;
108 bb1:
109 condtmp' = *p;
110 bb2:
111 condtmp = PHI <RHS, condtmp'>
112 *p = condtmp;
114 This transformation can only be done under several constraints,
115 documented below. It also replaces:
117 bb0:
118 if (cond) goto bb2; else goto bb1;
119 bb1:
120 *p = RHS1;
121 goto bb3;
122 bb2:
123 *p = RHS2;
124 bb3:
126 with
128 bb0:
129 if (cond) goto bb3; else goto bb1;
130 bb1:
131 bb3:
132 condtmp = PHI <RHS1, RHS2>
133 *p = condtmp; */
135 static unsigned int
136 tree_ssa_cs_elim (void)
138 unsigned todo;
139 /* ??? We are not interested in loop related info, but the following
140 will create it, ICEing as we didn't init loops with pre-headers.
141 An interfacing issue of find_data_references_in_bb. */
142 loop_optimizer_init (LOOPS_NORMAL);
143 scev_initialize ();
144 todo = tree_ssa_phiopt_worker (true, false);
145 scev_finalize ();
146 loop_optimizer_finalize ();
147 return todo;
150 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
152 static gphi *
153 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
155 gimple_stmt_iterator i;
156 gphi *phi = NULL;
157 if (gimple_seq_singleton_p (seq))
158 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
159 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
161 gphi *p = as_a <gphi *> (gsi_stmt (i));
162 /* If the PHI arguments are equal then we can skip this PHI. */
163 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
164 gimple_phi_arg_def (p, e1->dest_idx)))
165 continue;
167 /* If we already have a PHI that has the two edge arguments are
168 different, then return it is not a singleton for these PHIs. */
169 if (phi)
170 return NULL;
172 phi = p;
174 return phi;
177 /* The core routine of conditional store replacement and normal
178 phi optimizations. Both share much of the infrastructure in how
179 to match applicable basic block patterns. DO_STORE_ELIM is true
180 when we want to do conditional store replacement, false otherwise.
181 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
182 of diamond control flow patterns, false otherwise. */
183 static unsigned int
184 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
186 basic_block bb;
187 basic_block *bb_order;
188 unsigned n, i;
189 bool cfgchanged = false;
190 hash_set<tree> *nontrap = 0;
192 if (do_store_elim)
193 /* Calculate the set of non-trapping memory accesses. */
194 nontrap = get_non_trapping ();
196 /* The replacement of conditional negation with a non-branching
197 sequence is really only a win when optimizing for speed and we
198 can avoid transformations by gimple if-conversion that result
199 in poor RTL generation.
201 Ideally either gimple if-conversion or the RTL expanders will
202 be improved and the code to emit branchless conditional negation
203 can be removed. */
204 bool replace_conditional_negation = false;
205 if (!do_store_elim)
206 replace_conditional_negation
207 = ((!optimize_size && optimize >= 2)
208 || (((flag_tree_loop_vectorize || cfun->has_force_vectorize_loops)
209 && flag_tree_loop_if_convert != 0)
210 || flag_tree_loop_if_convert == 1
211 || flag_tree_loop_if_convert_stores == 1));
213 /* Search every basic block for COND_EXPR we may be able to optimize.
215 We walk the blocks in order that guarantees that a block with
216 a single predecessor is processed before the predecessor.
217 This ensures that we collapse inner ifs before visiting the
218 outer ones, and also that we do not try to visit a removed
219 block. */
220 bb_order = single_pred_before_succ_order ();
221 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
223 for (i = 0; i < n; i++)
225 gimple cond_stmt;
226 gphi *phi;
227 basic_block bb1, bb2;
228 edge e1, e2;
229 tree arg0, arg1;
231 bb = bb_order[i];
233 cond_stmt = last_stmt (bb);
234 /* Check to see if the last statement is a GIMPLE_COND. */
235 if (!cond_stmt
236 || gimple_code (cond_stmt) != GIMPLE_COND)
237 continue;
239 e1 = EDGE_SUCC (bb, 0);
240 bb1 = e1->dest;
241 e2 = EDGE_SUCC (bb, 1);
242 bb2 = e2->dest;
244 /* We cannot do the optimization on abnormal edges. */
245 if ((e1->flags & EDGE_ABNORMAL) != 0
246 || (e2->flags & EDGE_ABNORMAL) != 0)
247 continue;
249 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
250 if (EDGE_COUNT (bb1->succs) == 0
251 || bb2 == NULL
252 || EDGE_COUNT (bb2->succs) == 0)
253 continue;
255 /* Find the bb which is the fall through to the other. */
256 if (EDGE_SUCC (bb1, 0)->dest == bb2)
258 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
260 basic_block bb_tmp = bb1;
261 edge e_tmp = e1;
262 bb1 = bb2;
263 bb2 = bb_tmp;
264 e1 = e2;
265 e2 = e_tmp;
267 else if (do_store_elim
268 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
270 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
272 if (!single_succ_p (bb1)
273 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
274 || !single_succ_p (bb2)
275 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
276 || EDGE_COUNT (bb3->preds) != 2)
277 continue;
278 if (cond_if_else_store_replacement (bb1, bb2, bb3))
279 cfgchanged = true;
280 continue;
282 else if (do_hoist_loads
283 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
285 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
287 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
288 && single_succ_p (bb1)
289 && single_succ_p (bb2)
290 && single_pred_p (bb1)
291 && single_pred_p (bb2)
292 && EDGE_COUNT (bb->succs) == 2
293 && EDGE_COUNT (bb3->preds) == 2
294 /* If one edge or the other is dominant, a conditional move
295 is likely to perform worse than the well-predicted branch. */
296 && !predictable_edge_p (EDGE_SUCC (bb, 0))
297 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
298 hoist_adjacent_loads (bb, bb1, bb2, bb3);
299 continue;
301 else
302 continue;
304 e1 = EDGE_SUCC (bb1, 0);
306 /* Make sure that bb1 is just a fall through. */
307 if (!single_succ_p (bb1)
308 || (e1->flags & EDGE_FALLTHRU) == 0)
309 continue;
311 /* Also make sure that bb1 only have one predecessor and that it
312 is bb. */
313 if (!single_pred_p (bb1)
314 || single_pred (bb1) != bb)
315 continue;
317 if (do_store_elim)
319 /* bb1 is the middle block, bb2 the join block, bb the split block,
320 e1 the fallthrough edge from bb1 to bb2. We can't do the
321 optimization if the join block has more than two predecessors. */
322 if (EDGE_COUNT (bb2->preds) > 2)
323 continue;
324 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
325 cfgchanged = true;
327 else
329 gimple_seq phis = phi_nodes (bb2);
330 gimple_stmt_iterator gsi;
331 bool candorest = true;
333 /* Value replacement can work with more than one PHI
334 so try that first. */
335 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
337 phi = as_a <gphi *> (gsi_stmt (gsi));
338 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
339 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
340 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
342 candorest = false;
343 cfgchanged = true;
344 break;
348 if (!candorest)
349 continue;
351 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
352 if (!phi)
353 continue;
355 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
356 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
358 /* Something is wrong if we cannot find the arguments in the PHI
359 node. */
360 gcc_assert (arg0 != NULL && arg1 != NULL);
362 /* Do the replacement of conditional if it can be done. */
363 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
364 cfgchanged = true;
365 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
366 cfgchanged = true;
367 else if (replace_conditional_negation
368 && neg_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
369 cfgchanged = true;
370 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
371 cfgchanged = true;
375 free (bb_order);
377 if (do_store_elim)
378 delete nontrap;
379 /* If the CFG has changed, we should cleanup the CFG. */
380 if (cfgchanged && do_store_elim)
382 /* In cond-store replacement we have added some loads on edges
383 and new VOPS (as we moved the store, and created a load). */
384 gsi_commit_edge_inserts ();
385 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
387 else if (cfgchanged)
388 return TODO_cleanup_cfg;
389 return 0;
392 /* Replace PHI node element whose edge is E in block BB with variable NEW.
393 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
394 is known to have two edges, one of which must reach BB). */
396 static void
397 replace_phi_edge_with_variable (basic_block cond_block,
398 edge e, gimple phi, tree new_tree)
400 basic_block bb = gimple_bb (phi);
401 basic_block block_to_remove;
402 gimple_stmt_iterator gsi;
404 /* Change the PHI argument to new. */
405 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
407 /* Remove the empty basic block. */
408 if (EDGE_SUCC (cond_block, 0)->dest == bb)
410 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
411 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
412 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
413 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
415 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
417 else
419 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
420 EDGE_SUCC (cond_block, 1)->flags
421 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
422 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
423 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
425 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
427 delete_basic_block (block_to_remove);
429 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
430 gsi = gsi_last_bb (cond_block);
431 gsi_remove (&gsi, true);
433 if (dump_file && (dump_flags & TDF_DETAILS))
434 fprintf (dump_file,
435 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
436 cond_block->index,
437 bb->index);
440 /* The function conditional_replacement does the main work of doing the
441 conditional replacement. Return true if the replacement is done.
442 Otherwise return false.
443 BB is the basic block where the replacement is going to be done on. ARG0
444 is argument 0 from PHI. Likewise for ARG1. */
446 static bool
447 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
448 edge e0, edge e1, gphi *phi,
449 tree arg0, tree arg1)
451 tree result;
452 gimple stmt;
453 gassign *new_stmt;
454 tree cond;
455 gimple_stmt_iterator gsi;
456 edge true_edge, false_edge;
457 tree new_var, new_var2;
458 bool neg;
460 /* FIXME: Gimplification of complex type is too hard for now. */
461 /* We aren't prepared to handle vectors either (and it is a question
462 if it would be worthwhile anyway). */
463 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
464 || POINTER_TYPE_P (TREE_TYPE (arg0)))
465 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
466 || POINTER_TYPE_P (TREE_TYPE (arg1))))
467 return false;
469 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
470 convert it to the conditional. */
471 if ((integer_zerop (arg0) && integer_onep (arg1))
472 || (integer_zerop (arg1) && integer_onep (arg0)))
473 neg = false;
474 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
475 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
476 neg = true;
477 else
478 return false;
480 if (!empty_block_p (middle_bb))
481 return false;
483 /* At this point we know we have a GIMPLE_COND with two successors.
484 One successor is BB, the other successor is an empty block which
485 falls through into BB.
487 There is a single PHI node at the join point (BB) and its arguments
488 are constants (0, 1) or (0, -1).
490 So, given the condition COND, and the two PHI arguments, we can
491 rewrite this PHI into non-branching code:
493 dest = (COND) or dest = COND'
495 We use the condition as-is if the argument associated with the
496 true edge has the value one or the argument associated with the
497 false edge as the value zero. Note that those conditions are not
498 the same since only one of the outgoing edges from the GIMPLE_COND
499 will directly reach BB and thus be associated with an argument. */
501 stmt = last_stmt (cond_bb);
502 result = PHI_RESULT (phi);
504 /* To handle special cases like floating point comparison, it is easier and
505 less error-prone to build a tree and gimplify it on the fly though it is
506 less efficient. */
507 cond = fold_build2_loc (gimple_location (stmt),
508 gimple_cond_code (stmt), boolean_type_node,
509 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
511 /* We need to know which is the true edge and which is the false
512 edge so that we know when to invert the condition below. */
513 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
514 if ((e0 == true_edge && integer_zerop (arg0))
515 || (e0 == false_edge && !integer_zerop (arg0))
516 || (e1 == true_edge && integer_zerop (arg1))
517 || (e1 == false_edge && !integer_zerop (arg1)))
518 cond = fold_build1_loc (gimple_location (stmt),
519 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
521 if (neg)
523 cond = fold_convert_loc (gimple_location (stmt),
524 TREE_TYPE (result), cond);
525 cond = fold_build1_loc (gimple_location (stmt),
526 NEGATE_EXPR, TREE_TYPE (cond), cond);
529 /* Insert our new statements at the end of conditional block before the
530 COND_STMT. */
531 gsi = gsi_for_stmt (stmt);
532 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
533 GSI_SAME_STMT);
535 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
537 source_location locus_0, locus_1;
539 new_var2 = make_ssa_name (TREE_TYPE (result));
540 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
541 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
542 new_var = new_var2;
544 /* Set the locus to the first argument, unless is doesn't have one. */
545 locus_0 = gimple_phi_arg_location (phi, 0);
546 locus_1 = gimple_phi_arg_location (phi, 1);
547 if (locus_0 == UNKNOWN_LOCATION)
548 locus_0 = locus_1;
549 gimple_set_location (new_stmt, locus_0);
552 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
554 /* Note that we optimized this PHI. */
555 return true;
558 /* Update *ARG which is defined in STMT so that it contains the
559 computed value if that seems profitable. Return true if the
560 statement is made dead by that rewriting. */
562 static bool
563 jump_function_from_stmt (tree *arg, gimple stmt)
565 enum tree_code code = gimple_assign_rhs_code (stmt);
566 if (code == ADDR_EXPR)
568 /* For arg = &p->i transform it to p, if possible. */
569 tree rhs1 = gimple_assign_rhs1 (stmt);
570 HOST_WIDE_INT offset;
571 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
572 &offset);
573 if (tem
574 && TREE_CODE (tem) == MEM_REF
575 && (mem_ref_offset (tem) + offset) == 0)
577 *arg = TREE_OPERAND (tem, 0);
578 return true;
581 /* TODO: Much like IPA-CP jump-functions we want to handle constant
582 additions symbolically here, and we'd need to update the comparison
583 code that compares the arg + cst tuples in our caller. For now the
584 code above exactly handles the VEC_BASE pattern from vec.h. */
585 return false;
588 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
589 of the form SSA_NAME NE 0.
591 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
592 the two input values of the EQ_EXPR match arg0 and arg1.
594 If so update *code and return TRUE. Otherwise return FALSE. */
596 static bool
597 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
598 enum tree_code *code, const_tree rhs)
600 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
601 statement. */
602 if (TREE_CODE (rhs) == SSA_NAME)
604 gimple def1 = SSA_NAME_DEF_STMT (rhs);
606 /* Verify the defining statement has an EQ_EXPR on the RHS. */
607 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
609 /* Finally verify the source operands of the EQ_EXPR are equal
610 to arg0 and arg1. */
611 tree op0 = gimple_assign_rhs1 (def1);
612 tree op1 = gimple_assign_rhs2 (def1);
613 if ((operand_equal_for_phi_arg_p (arg0, op0)
614 && operand_equal_for_phi_arg_p (arg1, op1))
615 || (operand_equal_for_phi_arg_p (arg0, op1)
616 && operand_equal_for_phi_arg_p (arg1, op0)))
618 /* We will perform the optimization. */
619 *code = gimple_assign_rhs_code (def1);
620 return true;
624 return false;
627 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
629 Also return TRUE if arg0/arg1 are equal to the source arguments of a
630 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
632 Return FALSE otherwise. */
634 static bool
635 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
636 enum tree_code *code, gimple cond)
638 gimple def;
639 tree lhs = gimple_cond_lhs (cond);
640 tree rhs = gimple_cond_rhs (cond);
642 if ((operand_equal_for_phi_arg_p (arg0, lhs)
643 && operand_equal_for_phi_arg_p (arg1, rhs))
644 || (operand_equal_for_phi_arg_p (arg1, lhs)
645 && operand_equal_for_phi_arg_p (arg0, rhs)))
646 return true;
648 /* Now handle more complex case where we have an EQ comparison
649 which feeds a BIT_AND_EXPR which feeds COND.
651 First verify that COND is of the form SSA_NAME NE 0. */
652 if (*code != NE_EXPR || !integer_zerop (rhs)
653 || TREE_CODE (lhs) != SSA_NAME)
654 return false;
656 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
657 def = SSA_NAME_DEF_STMT (lhs);
658 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
659 return false;
661 /* Now verify arg0/arg1 correspond to the source arguments of an
662 EQ comparison feeding the BIT_AND_EXPR. */
664 tree tmp = gimple_assign_rhs1 (def);
665 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
666 return true;
668 tmp = gimple_assign_rhs2 (def);
669 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
670 return true;
672 return false;
675 /* Returns true if ARG is a neutral element for operation CODE
676 on the RIGHT side. */
678 static bool
679 neutral_element_p (tree_code code, tree arg, bool right)
681 switch (code)
683 case PLUS_EXPR:
684 case BIT_IOR_EXPR:
685 case BIT_XOR_EXPR:
686 return integer_zerop (arg);
688 case LROTATE_EXPR:
689 case RROTATE_EXPR:
690 case LSHIFT_EXPR:
691 case RSHIFT_EXPR:
692 case MINUS_EXPR:
693 case POINTER_PLUS_EXPR:
694 return right && integer_zerop (arg);
696 case MULT_EXPR:
697 return integer_onep (arg);
699 case TRUNC_DIV_EXPR:
700 case CEIL_DIV_EXPR:
701 case FLOOR_DIV_EXPR:
702 case ROUND_DIV_EXPR:
703 case EXACT_DIV_EXPR:
704 return right && integer_onep (arg);
706 case BIT_AND_EXPR:
707 return integer_all_onesp (arg);
709 default:
710 return false;
714 /* Returns true if ARG is an absorbing element for operation CODE. */
716 static bool
717 absorbing_element_p (tree_code code, tree arg)
719 switch (code)
721 case BIT_IOR_EXPR:
722 return integer_all_onesp (arg);
724 case MULT_EXPR:
725 case BIT_AND_EXPR:
726 return integer_zerop (arg);
728 default:
729 return false;
733 /* The function value_replacement does the main work of doing the value
734 replacement. Return non-zero if the replacement is done. Otherwise return
735 0. If we remove the middle basic block, return 2.
736 BB is the basic block where the replacement is going to be done on. ARG0
737 is argument 0 from the PHI. Likewise for ARG1. */
739 static int
740 value_replacement (basic_block cond_bb, basic_block middle_bb,
741 edge e0, edge e1, gimple phi,
742 tree arg0, tree arg1)
744 gimple_stmt_iterator gsi;
745 gimple cond;
746 edge true_edge, false_edge;
747 enum tree_code code;
748 bool emtpy_or_with_defined_p = true;
750 /* If the type says honor signed zeros we cannot do this
751 optimization. */
752 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
753 return 0;
755 /* If there is a statement in MIDDLE_BB that defines one of the PHI
756 arguments, then adjust arg0 or arg1. */
757 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
758 while (!gsi_end_p (gsi))
760 gimple stmt = gsi_stmt (gsi);
761 tree lhs;
762 gsi_next_nondebug (&gsi);
763 if (!is_gimple_assign (stmt))
765 emtpy_or_with_defined_p = false;
766 continue;
768 /* Now try to adjust arg0 or arg1 according to the computation
769 in the statement. */
770 lhs = gimple_assign_lhs (stmt);
771 if (!(lhs == arg0
772 && jump_function_from_stmt (&arg0, stmt))
773 || (lhs == arg1
774 && jump_function_from_stmt (&arg1, stmt)))
775 emtpy_or_with_defined_p = false;
778 cond = last_stmt (cond_bb);
779 code = gimple_cond_code (cond);
781 /* This transformation is only valid for equality comparisons. */
782 if (code != NE_EXPR && code != EQ_EXPR)
783 return 0;
785 /* We need to know which is the true edge and which is the false
786 edge so that we know if have abs or negative abs. */
787 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
789 /* At this point we know we have a COND_EXPR with two successors.
790 One successor is BB, the other successor is an empty block which
791 falls through into BB.
793 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
795 There is a single PHI node at the join point (BB) with two arguments.
797 We now need to verify that the two arguments in the PHI node match
798 the two arguments to the equality comparison. */
800 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
802 edge e;
803 tree arg;
805 /* For NE_EXPR, we want to build an assignment result = arg where
806 arg is the PHI argument associated with the true edge. For
807 EQ_EXPR we want the PHI argument associated with the false edge. */
808 e = (code == NE_EXPR ? true_edge : false_edge);
810 /* Unfortunately, E may not reach BB (it may instead have gone to
811 OTHER_BLOCK). If that is the case, then we want the single outgoing
812 edge from OTHER_BLOCK which reaches BB and represents the desired
813 path from COND_BLOCK. */
814 if (e->dest == middle_bb)
815 e = single_succ_edge (e->dest);
817 /* Now we know the incoming edge to BB that has the argument for the
818 RHS of our new assignment statement. */
819 if (e0 == e)
820 arg = arg0;
821 else
822 arg = arg1;
824 /* If the middle basic block was empty or is defining the
825 PHI arguments and this is a single phi where the args are different
826 for the edges e0 and e1 then we can remove the middle basic block. */
827 if (emtpy_or_with_defined_p
828 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
829 e0, e1) == phi)
831 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
832 /* Note that we optimized this PHI. */
833 return 2;
835 else
837 /* Replace the PHI arguments with arg. */
838 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
839 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
840 if (dump_file && (dump_flags & TDF_DETAILS))
842 fprintf (dump_file, "PHI ");
843 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
844 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
845 cond_bb->index);
846 print_generic_expr (dump_file, arg, 0);
847 fprintf (dump_file, ".\n");
849 return 1;
854 /* Now optimize (x != 0) ? x + y : y to just y.
855 The following condition is too restrictive, there can easily be another
856 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
857 gimple assign = last_and_only_stmt (middle_bb);
858 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
859 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
860 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
861 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
862 return 0;
864 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
865 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
866 return 0;
868 /* Only transform if it removes the condition. */
869 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
870 return 0;
872 /* Size-wise, this is always profitable. */
873 if (optimize_bb_for_speed_p (cond_bb)
874 /* The special case is useless if it has a low probability. */
875 && profile_status_for_fn (cfun) != PROFILE_ABSENT
876 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
877 /* If assign is cheap, there is no point avoiding it. */
878 && estimate_num_insns (assign, &eni_time_weights)
879 >= 3 * estimate_num_insns (cond, &eni_time_weights))
880 return 0;
882 tree lhs = gimple_assign_lhs (assign);
883 tree rhs1 = gimple_assign_rhs1 (assign);
884 tree rhs2 = gimple_assign_rhs2 (assign);
885 enum tree_code code_def = gimple_assign_rhs_code (assign);
886 tree cond_lhs = gimple_cond_lhs (cond);
887 tree cond_rhs = gimple_cond_rhs (cond);
889 if (((code == NE_EXPR && e1 == false_edge)
890 || (code == EQ_EXPR && e1 == true_edge))
891 && arg0 == lhs
892 && ((arg1 == rhs1
893 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
894 && neutral_element_p (code_def, cond_rhs, true))
895 || (arg1 == rhs2
896 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
897 && neutral_element_p (code_def, cond_rhs, false))
898 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
899 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
900 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
901 && absorbing_element_p (code_def, cond_rhs))))
903 gsi = gsi_for_stmt (cond);
904 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
905 gsi_move_before (&gsi_from, &gsi);
906 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
907 return 2;
910 return 0;
913 /* The function minmax_replacement does the main work of doing the minmax
914 replacement. Return true if the replacement is done. Otherwise return
915 false.
916 BB is the basic block where the replacement is going to be done on. ARG0
917 is argument 0 from the PHI. Likewise for ARG1. */
919 static bool
920 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
921 edge e0, edge e1, gimple phi,
922 tree arg0, tree arg1)
924 tree result, type;
925 gcond *cond;
926 gassign *new_stmt;
927 edge true_edge, false_edge;
928 enum tree_code cmp, minmax, ass_code;
929 tree smaller, larger, arg_true, arg_false;
930 gimple_stmt_iterator gsi, gsi_from;
932 type = TREE_TYPE (PHI_RESULT (phi));
934 /* The optimization may be unsafe due to NaNs. */
935 if (HONOR_NANS (TYPE_MODE (type)))
936 return false;
938 cond = as_a <gcond *> (last_stmt (cond_bb));
939 cmp = gimple_cond_code (cond);
941 /* This transformation is only valid for order comparisons. Record which
942 operand is smaller/larger if the result of the comparison is true. */
943 if (cmp == LT_EXPR || cmp == LE_EXPR)
945 smaller = gimple_cond_lhs (cond);
946 larger = gimple_cond_rhs (cond);
948 else if (cmp == GT_EXPR || cmp == GE_EXPR)
950 smaller = gimple_cond_rhs (cond);
951 larger = gimple_cond_lhs (cond);
953 else
954 return false;
956 /* We need to know which is the true edge and which is the false
957 edge so that we know if have abs or negative abs. */
958 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
960 /* Forward the edges over the middle basic block. */
961 if (true_edge->dest == middle_bb)
962 true_edge = EDGE_SUCC (true_edge->dest, 0);
963 if (false_edge->dest == middle_bb)
964 false_edge = EDGE_SUCC (false_edge->dest, 0);
966 if (true_edge == e0)
968 gcc_assert (false_edge == e1);
969 arg_true = arg0;
970 arg_false = arg1;
972 else
974 gcc_assert (false_edge == e0);
975 gcc_assert (true_edge == e1);
976 arg_true = arg1;
977 arg_false = arg0;
980 if (empty_block_p (middle_bb))
982 if (operand_equal_for_phi_arg_p (arg_true, smaller)
983 && operand_equal_for_phi_arg_p (arg_false, larger))
985 /* Case
987 if (smaller < larger)
988 rslt = smaller;
989 else
990 rslt = larger; */
991 minmax = MIN_EXPR;
993 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
994 && operand_equal_for_phi_arg_p (arg_true, larger))
995 minmax = MAX_EXPR;
996 else
997 return false;
999 else
1001 /* Recognize the following case, assuming d <= u:
1003 if (a <= u)
1004 b = MAX (a, d);
1005 x = PHI <b, u>
1007 This is equivalent to
1009 b = MAX (a, d);
1010 x = MIN (b, u); */
1012 gimple assign = last_and_only_stmt (middle_bb);
1013 tree lhs, op0, op1, bound;
1015 if (!assign
1016 || gimple_code (assign) != GIMPLE_ASSIGN)
1017 return false;
1019 lhs = gimple_assign_lhs (assign);
1020 ass_code = gimple_assign_rhs_code (assign);
1021 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1022 return false;
1023 op0 = gimple_assign_rhs1 (assign);
1024 op1 = gimple_assign_rhs2 (assign);
1026 if (true_edge->src == middle_bb)
1028 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1029 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1030 return false;
1032 if (operand_equal_for_phi_arg_p (arg_false, larger))
1034 /* Case
1036 if (smaller < larger)
1038 r' = MAX_EXPR (smaller, bound)
1040 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1041 if (ass_code != MAX_EXPR)
1042 return false;
1044 minmax = MIN_EXPR;
1045 if (operand_equal_for_phi_arg_p (op0, smaller))
1046 bound = op1;
1047 else if (operand_equal_for_phi_arg_p (op1, smaller))
1048 bound = op0;
1049 else
1050 return false;
1052 /* We need BOUND <= LARGER. */
1053 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1054 bound, larger)))
1055 return false;
1057 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1059 /* Case
1061 if (smaller < larger)
1063 r' = MIN_EXPR (larger, bound)
1065 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1066 if (ass_code != MIN_EXPR)
1067 return false;
1069 minmax = MAX_EXPR;
1070 if (operand_equal_for_phi_arg_p (op0, larger))
1071 bound = op1;
1072 else if (operand_equal_for_phi_arg_p (op1, larger))
1073 bound = op0;
1074 else
1075 return false;
1077 /* We need BOUND >= SMALLER. */
1078 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1079 bound, smaller)))
1080 return false;
1082 else
1083 return false;
1085 else
1087 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1088 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1089 return false;
1091 if (operand_equal_for_phi_arg_p (arg_true, larger))
1093 /* Case
1095 if (smaller > larger)
1097 r' = MIN_EXPR (smaller, bound)
1099 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1100 if (ass_code != MIN_EXPR)
1101 return false;
1103 minmax = MAX_EXPR;
1104 if (operand_equal_for_phi_arg_p (op0, smaller))
1105 bound = op1;
1106 else if (operand_equal_for_phi_arg_p (op1, smaller))
1107 bound = op0;
1108 else
1109 return false;
1111 /* We need BOUND >= LARGER. */
1112 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1113 bound, larger)))
1114 return false;
1116 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1118 /* Case
1120 if (smaller > larger)
1122 r' = MAX_EXPR (larger, bound)
1124 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1125 if (ass_code != MAX_EXPR)
1126 return false;
1128 minmax = MIN_EXPR;
1129 if (operand_equal_for_phi_arg_p (op0, larger))
1130 bound = op1;
1131 else if (operand_equal_for_phi_arg_p (op1, larger))
1132 bound = op0;
1133 else
1134 return false;
1136 /* We need BOUND <= SMALLER. */
1137 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1138 bound, smaller)))
1139 return false;
1141 else
1142 return false;
1145 /* Move the statement from the middle block. */
1146 gsi = gsi_last_bb (cond_bb);
1147 gsi_from = gsi_last_nondebug_bb (middle_bb);
1148 gsi_move_before (&gsi_from, &gsi);
1151 /* Emit the statement to compute min/max. */
1152 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1153 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1154 gsi = gsi_last_bb (cond_bb);
1155 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1157 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1158 return true;
1161 /* The function absolute_replacement does the main work of doing the absolute
1162 replacement. Return true if the replacement is done. Otherwise return
1163 false.
1164 bb is the basic block where the replacement is going to be done on. arg0
1165 is argument 0 from the phi. Likewise for arg1. */
1167 static bool
1168 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1169 edge e0 ATTRIBUTE_UNUSED, edge e1,
1170 gimple phi, tree arg0, tree arg1)
1172 tree result;
1173 gassign *new_stmt;
1174 gimple cond;
1175 gimple_stmt_iterator gsi;
1176 edge true_edge, false_edge;
1177 gimple assign;
1178 edge e;
1179 tree rhs, lhs;
1180 bool negate;
1181 enum tree_code cond_code;
1183 /* If the type says honor signed zeros we cannot do this
1184 optimization. */
1185 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1186 return false;
1188 /* OTHER_BLOCK must have only one executable statement which must have the
1189 form arg0 = -arg1 or arg1 = -arg0. */
1191 assign = last_and_only_stmt (middle_bb);
1192 /* If we did not find the proper negation assignment, then we can not
1193 optimize. */
1194 if (assign == NULL)
1195 return false;
1197 /* If we got here, then we have found the only executable statement
1198 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1199 arg1 = -arg0, then we can not optimize. */
1200 if (gimple_code (assign) != GIMPLE_ASSIGN)
1201 return false;
1203 lhs = gimple_assign_lhs (assign);
1205 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1206 return false;
1208 rhs = gimple_assign_rhs1 (assign);
1210 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1211 if (!(lhs == arg0 && rhs == arg1)
1212 && !(lhs == arg1 && rhs == arg0))
1213 return false;
1215 cond = last_stmt (cond_bb);
1216 result = PHI_RESULT (phi);
1218 /* Only relationals comparing arg[01] against zero are interesting. */
1219 cond_code = gimple_cond_code (cond);
1220 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1221 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1222 return false;
1224 /* Make sure the conditional is arg[01] OP y. */
1225 if (gimple_cond_lhs (cond) != rhs)
1226 return false;
1228 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1229 ? real_zerop (gimple_cond_rhs (cond))
1230 : integer_zerop (gimple_cond_rhs (cond)))
1232 else
1233 return false;
1235 /* We need to know which is the true edge and which is the false
1236 edge so that we know if have abs or negative abs. */
1237 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1239 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1240 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1241 the false edge goes to OTHER_BLOCK. */
1242 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1243 e = true_edge;
1244 else
1245 e = false_edge;
1247 if (e->dest == middle_bb)
1248 negate = true;
1249 else
1250 negate = false;
1252 result = duplicate_ssa_name (result, NULL);
1254 if (negate)
1255 lhs = make_ssa_name (TREE_TYPE (result));
1256 else
1257 lhs = result;
1259 /* Build the modify expression with abs expression. */
1260 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1262 gsi = gsi_last_bb (cond_bb);
1263 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1265 if (negate)
1267 /* Get the right GSI. We want to insert after the recently
1268 added ABS_EXPR statement (which we know is the first statement
1269 in the block. */
1270 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1272 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1275 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1277 /* Note that we optimized this PHI. */
1278 return true;
1281 /* The function neg_replacement replaces conditional negation with
1282 equivalent straight line code. Returns TRUE if replacement is done,
1283 otherwise returns FALSE.
1285 COND_BB branches around negation occuring in MIDDLE_BB.
1287 E0 and E1 are edges out of COND_BB. E0 reaches MIDDLE_BB and
1288 E1 reaches the other successor which should contain PHI with
1289 arguments ARG0 and ARG1.
1291 Assuming negation is to occur when the condition is true,
1292 then the non-branching sequence is:
1294 result = (rhs ^ -cond) + cond
1296 Inverting the condition or its result gives us negation
1297 when the original condition is false. */
1299 static bool
1300 neg_replacement (basic_block cond_bb, basic_block middle_bb,
1301 edge e0 ATTRIBUTE_UNUSED, edge e1,
1302 gimple phi, tree arg0, tree arg1)
1304 gimple new_stmt, cond;
1305 gimple_stmt_iterator gsi;
1306 gimple assign;
1307 edge true_edge, false_edge;
1308 tree rhs, lhs;
1309 enum tree_code cond_code;
1310 bool invert = false;
1312 /* This transformation performs logical operations on the
1313 incoming arguments. So force them to be integral types. */
1314 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1315 return false;
1317 /* OTHER_BLOCK must have only one executable statement which must have the
1318 form arg0 = -arg1 or arg1 = -arg0. */
1320 assign = last_and_only_stmt (middle_bb);
1321 /* If we did not find the proper negation assignment, then we can not
1322 optimize. */
1323 if (assign == NULL)
1324 return false;
1326 /* If we got here, then we have found the only executable statement
1327 in OTHER_BLOCK. If it is anything other than arg0 = -arg1 or
1328 arg1 = -arg0, then we can not optimize. */
1329 if (gimple_code (assign) != GIMPLE_ASSIGN)
1330 return false;
1332 lhs = gimple_assign_lhs (assign);
1334 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1335 return false;
1337 rhs = gimple_assign_rhs1 (assign);
1339 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1340 if (!(lhs == arg0 && rhs == arg1)
1341 && !(lhs == arg1 && rhs == arg0))
1342 return false;
1344 /* The basic sequence assumes we negate when the condition is true.
1345 If we need the opposite, then we will either need to invert the
1346 condition or its result. */
1347 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1348 invert = false_edge->dest == middle_bb;
1350 /* Unlike abs_replacement, we can handle arbitrary conditionals here. */
1351 cond = last_stmt (cond_bb);
1352 cond_code = gimple_cond_code (cond);
1354 /* If inversion is needed, first try to invert the test since
1355 that's cheapest. */
1356 if (invert)
1358 bool honor_nans
1359 = HONOR_NANS (TYPE_MODE (TREE_TYPE (gimple_cond_lhs (cond))));
1360 enum tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
1362 /* If invert_tree_comparison was successful, then use its return
1363 value as the new code and note that inversion is no longer
1364 needed. */
1365 if (new_code != ERROR_MARK)
1367 cond_code = new_code;
1368 invert = false;
1372 tree cond_val = make_ssa_name (boolean_type_node);
1373 new_stmt = gimple_build_assign (cond_val, cond_code,
1374 gimple_cond_lhs (cond),
1375 gimple_cond_rhs (cond));
1376 gsi = gsi_last_bb (cond_bb);
1377 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1379 /* If we still need inversion, then invert the result of the
1380 condition. */
1381 if (invert)
1383 tree tmp = make_ssa_name (boolean_type_node);
1384 new_stmt = gimple_build_assign (tmp, BIT_XOR_EXPR, cond_val,
1385 boolean_true_node);
1386 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1387 cond_val = tmp;
1390 /* Get the condition in the right type so that we can perform
1391 logical and arithmetic operations on it. */
1392 tree cond_val_converted = make_ssa_name (TREE_TYPE (rhs));
1393 new_stmt = gimple_build_assign (cond_val_converted, NOP_EXPR, cond_val);
1394 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1396 tree neg_cond_val_converted = make_ssa_name (TREE_TYPE (rhs));
1397 new_stmt = gimple_build_assign (neg_cond_val_converted, NEGATE_EXPR,
1398 cond_val_converted);
1399 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1401 tree tmp = make_ssa_name (TREE_TYPE (rhs));
1402 new_stmt = gimple_build_assign (tmp, BIT_XOR_EXPR, rhs,
1403 neg_cond_val_converted);
1404 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1406 tree new_lhs = make_ssa_name (TREE_TYPE (rhs));
1407 new_stmt = gimple_build_assign (new_lhs, PLUS_EXPR, tmp, cond_val_converted);
1408 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1410 replace_phi_edge_with_variable (cond_bb, e1, phi, new_lhs);
1412 /* Note that we optimized this PHI. */
1413 return true;
1416 /* Auxiliary functions to determine the set of memory accesses which
1417 can't trap because they are preceded by accesses to the same memory
1418 portion. We do that for MEM_REFs, so we only need to track
1419 the SSA_NAME of the pointer indirectly referenced. The algorithm
1420 simply is a walk over all instructions in dominator order. When
1421 we see an MEM_REF we determine if we've already seen a same
1422 ref anywhere up to the root of the dominator tree. If we do the
1423 current access can't trap. If we don't see any dominating access
1424 the current access might trap, but might also make later accesses
1425 non-trapping, so we remember it. We need to be careful with loads
1426 or stores, for instance a load might not trap, while a store would,
1427 so if we see a dominating read access this doesn't mean that a later
1428 write access would not trap. Hence we also need to differentiate the
1429 type of access(es) seen.
1431 ??? We currently are very conservative and assume that a load might
1432 trap even if a store doesn't (write-only memory). This probably is
1433 overly conservative. */
1435 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1436 through it was seen, which would constitute a no-trap region for
1437 same accesses. */
1438 struct name_to_bb
1440 unsigned int ssa_name_ver;
1441 unsigned int phase;
1442 bool store;
1443 HOST_WIDE_INT offset, size;
1444 basic_block bb;
1447 /* Hashtable helpers. */
1449 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1451 typedef name_to_bb value_type;
1452 typedef name_to_bb compare_type;
1453 static inline hashval_t hash (const value_type *);
1454 static inline bool equal (const value_type *, const compare_type *);
1457 /* Used for quick clearing of the hash-table when we see calls.
1458 Hash entries with phase < nt_call_phase are invalid. */
1459 static unsigned int nt_call_phase;
1461 /* The hash function. */
1463 inline hashval_t
1464 ssa_names_hasher::hash (const value_type *n)
1466 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1467 ^ (n->offset << 6) ^ (n->size << 3);
1470 /* The equality function of *P1 and *P2. */
1472 inline bool
1473 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1475 return n1->ssa_name_ver == n2->ssa_name_ver
1476 && n1->store == n2->store
1477 && n1->offset == n2->offset
1478 && n1->size == n2->size;
1481 class nontrapping_dom_walker : public dom_walker
1483 public:
1484 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1485 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1487 virtual void before_dom_children (basic_block);
1488 virtual void after_dom_children (basic_block);
1490 private:
1492 /* We see the expression EXP in basic block BB. If it's an interesting
1493 expression (an MEM_REF through an SSA_NAME) possibly insert the
1494 expression into the set NONTRAP or the hash table of seen expressions.
1495 STORE is true if this expression is on the LHS, otherwise it's on
1496 the RHS. */
1497 void add_or_mark_expr (basic_block, tree, bool);
1499 hash_set<tree> *m_nontrapping;
1501 /* The hash table for remembering what we've seen. */
1502 hash_table<ssa_names_hasher> m_seen_ssa_names;
1505 /* Called by walk_dominator_tree, when entering the block BB. */
1506 void
1507 nontrapping_dom_walker::before_dom_children (basic_block bb)
1509 edge e;
1510 edge_iterator ei;
1511 gimple_stmt_iterator gsi;
1513 /* If we haven't seen all our predecessors, clear the hash-table. */
1514 FOR_EACH_EDGE (e, ei, bb->preds)
1515 if ((((size_t)e->src->aux) & 2) == 0)
1517 nt_call_phase++;
1518 break;
1521 /* Mark this BB as being on the path to dominator root and as visited. */
1522 bb->aux = (void*)(1 | 2);
1524 /* And walk the statements in order. */
1525 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1527 gimple stmt = gsi_stmt (gsi);
1529 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1530 nt_call_phase++;
1531 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1533 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1534 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1539 /* Called by walk_dominator_tree, when basic block BB is exited. */
1540 void
1541 nontrapping_dom_walker::after_dom_children (basic_block bb)
1543 /* This BB isn't on the path to dominator root anymore. */
1544 bb->aux = (void*)2;
1547 /* We see the expression EXP in basic block BB. If it's an interesting
1548 expression (an MEM_REF through an SSA_NAME) possibly insert the
1549 expression into the set NONTRAP or the hash table of seen expressions.
1550 STORE is true if this expression is on the LHS, otherwise it's on
1551 the RHS. */
1552 void
1553 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1555 HOST_WIDE_INT size;
1557 if (TREE_CODE (exp) == MEM_REF
1558 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1559 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1560 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1562 tree name = TREE_OPERAND (exp, 0);
1563 struct name_to_bb map;
1564 name_to_bb **slot;
1565 struct name_to_bb *n2bb;
1566 basic_block found_bb = 0;
1568 /* Try to find the last seen MEM_REF through the same
1569 SSA_NAME, which can trap. */
1570 map.ssa_name_ver = SSA_NAME_VERSION (name);
1571 map.phase = 0;
1572 map.bb = 0;
1573 map.store = store;
1574 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1575 map.size = size;
1577 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1578 n2bb = *slot;
1579 if (n2bb && n2bb->phase >= nt_call_phase)
1580 found_bb = n2bb->bb;
1582 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1583 (it's in a basic block on the path from us to the dominator root)
1584 then we can't trap. */
1585 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1587 m_nontrapping->add (exp);
1589 else
1591 /* EXP might trap, so insert it into the hash table. */
1592 if (n2bb)
1594 n2bb->phase = nt_call_phase;
1595 n2bb->bb = bb;
1597 else
1599 n2bb = XNEW (struct name_to_bb);
1600 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1601 n2bb->phase = nt_call_phase;
1602 n2bb->bb = bb;
1603 n2bb->store = store;
1604 n2bb->offset = map.offset;
1605 n2bb->size = size;
1606 *slot = n2bb;
1612 /* This is the entry point of gathering non trapping memory accesses.
1613 It will do a dominator walk over the whole function, and it will
1614 make use of the bb->aux pointers. It returns a set of trees
1615 (the MEM_REFs itself) which can't trap. */
1616 static hash_set<tree> *
1617 get_non_trapping (void)
1619 nt_call_phase = 0;
1620 hash_set<tree> *nontrap = new hash_set<tree>;
1621 /* We're going to do a dominator walk, so ensure that we have
1622 dominance information. */
1623 calculate_dominance_info (CDI_DOMINATORS);
1625 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1626 .walk (cfun->cfg->x_entry_block_ptr);
1628 clear_aux_for_blocks ();
1629 return nontrap;
1632 /* Do the main work of conditional store replacement. We already know
1633 that the recognized pattern looks like so:
1635 split:
1636 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1637 MIDDLE_BB:
1638 something
1639 fallthrough (edge E0)
1640 JOIN_BB:
1641 some more
1643 We check that MIDDLE_BB contains only one store, that that store
1644 doesn't trap (not via NOTRAP, but via checking if an access to the same
1645 memory location dominates us) and that the store has a "simple" RHS. */
1647 static bool
1648 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1649 edge e0, edge e1, hash_set<tree> *nontrap)
1651 gimple assign = last_and_only_stmt (middle_bb);
1652 tree lhs, rhs, name, name2;
1653 gphi *newphi;
1654 gassign *new_stmt;
1655 gimple_stmt_iterator gsi;
1656 source_location locus;
1658 /* Check if middle_bb contains of only one store. */
1659 if (!assign
1660 || !gimple_assign_single_p (assign)
1661 || gimple_has_volatile_ops (assign))
1662 return false;
1664 locus = gimple_location (assign);
1665 lhs = gimple_assign_lhs (assign);
1666 rhs = gimple_assign_rhs1 (assign);
1667 if (TREE_CODE (lhs) != MEM_REF
1668 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1669 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1670 return false;
1672 /* Prove that we can move the store down. We could also check
1673 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1674 whose value is not available readily, which we want to avoid. */
1675 if (!nontrap->contains (lhs))
1676 return false;
1678 /* Now we've checked the constraints, so do the transformation:
1679 1) Remove the single store. */
1680 gsi = gsi_for_stmt (assign);
1681 unlink_stmt_vdef (assign);
1682 gsi_remove (&gsi, true);
1683 release_defs (assign);
1685 /* 2) Insert a load from the memory of the store to the temporary
1686 on the edge which did not contain the store. */
1687 lhs = unshare_expr (lhs);
1688 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1689 new_stmt = gimple_build_assign (name, lhs);
1690 gimple_set_location (new_stmt, locus);
1691 gsi_insert_on_edge (e1, new_stmt);
1693 /* 3) Create a PHI node at the join block, with one argument
1694 holding the old RHS, and the other holding the temporary
1695 where we stored the old memory contents. */
1696 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1697 newphi = create_phi_node (name2, join_bb);
1698 add_phi_arg (newphi, rhs, e0, locus);
1699 add_phi_arg (newphi, name, e1, locus);
1701 lhs = unshare_expr (lhs);
1702 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1704 /* 4) Insert that PHI node. */
1705 gsi = gsi_after_labels (join_bb);
1706 if (gsi_end_p (gsi))
1708 gsi = gsi_last_bb (join_bb);
1709 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1711 else
1712 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1714 return true;
1717 /* Do the main work of conditional store replacement. */
1719 static bool
1720 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1721 basic_block join_bb, gimple then_assign,
1722 gimple else_assign)
1724 tree lhs_base, lhs, then_rhs, else_rhs, name;
1725 source_location then_locus, else_locus;
1726 gimple_stmt_iterator gsi;
1727 gphi *newphi;
1728 gassign *new_stmt;
1730 if (then_assign == NULL
1731 || !gimple_assign_single_p (then_assign)
1732 || gimple_clobber_p (then_assign)
1733 || gimple_has_volatile_ops (then_assign)
1734 || else_assign == NULL
1735 || !gimple_assign_single_p (else_assign)
1736 || gimple_clobber_p (else_assign)
1737 || gimple_has_volatile_ops (else_assign))
1738 return false;
1740 lhs = gimple_assign_lhs (then_assign);
1741 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1742 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1743 return false;
1745 lhs_base = get_base_address (lhs);
1746 if (lhs_base == NULL_TREE
1747 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1748 return false;
1750 then_rhs = gimple_assign_rhs1 (then_assign);
1751 else_rhs = gimple_assign_rhs1 (else_assign);
1752 then_locus = gimple_location (then_assign);
1753 else_locus = gimple_location (else_assign);
1755 /* Now we've checked the constraints, so do the transformation:
1756 1) Remove the stores. */
1757 gsi = gsi_for_stmt (then_assign);
1758 unlink_stmt_vdef (then_assign);
1759 gsi_remove (&gsi, true);
1760 release_defs (then_assign);
1762 gsi = gsi_for_stmt (else_assign);
1763 unlink_stmt_vdef (else_assign);
1764 gsi_remove (&gsi, true);
1765 release_defs (else_assign);
1767 /* 2) Create a PHI node at the join block, with one argument
1768 holding the old RHS, and the other holding the temporary
1769 where we stored the old memory contents. */
1770 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1771 newphi = create_phi_node (name, join_bb);
1772 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1773 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1775 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1777 /* 3) Insert that PHI node. */
1778 gsi = gsi_after_labels (join_bb);
1779 if (gsi_end_p (gsi))
1781 gsi = gsi_last_bb (join_bb);
1782 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1784 else
1785 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1787 return true;
1790 /* Conditional store replacement. We already know
1791 that the recognized pattern looks like so:
1793 split:
1794 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1795 THEN_BB:
1797 X = Y;
1799 goto JOIN_BB;
1800 ELSE_BB:
1802 X = Z;
1804 fallthrough (edge E0)
1805 JOIN_BB:
1806 some more
1808 We check that it is safe to sink the store to JOIN_BB by verifying that
1809 there are no read-after-write or write-after-write dependencies in
1810 THEN_BB and ELSE_BB. */
1812 static bool
1813 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1814 basic_block join_bb)
1816 gimple then_assign = last_and_only_stmt (then_bb);
1817 gimple else_assign = last_and_only_stmt (else_bb);
1818 vec<data_reference_p> then_datarefs, else_datarefs;
1819 vec<ddr_p> then_ddrs, else_ddrs;
1820 gimple then_store, else_store;
1821 bool found, ok = false, res;
1822 struct data_dependence_relation *ddr;
1823 data_reference_p then_dr, else_dr;
1824 int i, j;
1825 tree then_lhs, else_lhs;
1826 basic_block blocks[3];
1828 if (MAX_STORES_TO_SINK == 0)
1829 return false;
1831 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1832 if (then_assign && else_assign)
1833 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1834 then_assign, else_assign);
1836 /* Find data references. */
1837 then_datarefs.create (1);
1838 else_datarefs.create (1);
1839 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1840 == chrec_dont_know)
1841 || !then_datarefs.length ()
1842 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1843 == chrec_dont_know)
1844 || !else_datarefs.length ())
1846 free_data_refs (then_datarefs);
1847 free_data_refs (else_datarefs);
1848 return false;
1851 /* Find pairs of stores with equal LHS. */
1852 auto_vec<gimple, 1> then_stores, else_stores;
1853 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1855 if (DR_IS_READ (then_dr))
1856 continue;
1858 then_store = DR_STMT (then_dr);
1859 then_lhs = gimple_get_lhs (then_store);
1860 if (then_lhs == NULL_TREE)
1861 continue;
1862 found = false;
1864 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1866 if (DR_IS_READ (else_dr))
1867 continue;
1869 else_store = DR_STMT (else_dr);
1870 else_lhs = gimple_get_lhs (else_store);
1871 if (else_lhs == NULL_TREE)
1872 continue;
1874 if (operand_equal_p (then_lhs, else_lhs, 0))
1876 found = true;
1877 break;
1881 if (!found)
1882 continue;
1884 then_stores.safe_push (then_store);
1885 else_stores.safe_push (else_store);
1888 /* No pairs of stores found. */
1889 if (!then_stores.length ()
1890 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1892 free_data_refs (then_datarefs);
1893 free_data_refs (else_datarefs);
1894 return false;
1897 /* Compute and check data dependencies in both basic blocks. */
1898 then_ddrs.create (1);
1899 else_ddrs.create (1);
1900 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1901 vNULL, false)
1902 || !compute_all_dependences (else_datarefs, &else_ddrs,
1903 vNULL, false))
1905 free_dependence_relations (then_ddrs);
1906 free_dependence_relations (else_ddrs);
1907 free_data_refs (then_datarefs);
1908 free_data_refs (else_datarefs);
1909 return false;
1911 blocks[0] = then_bb;
1912 blocks[1] = else_bb;
1913 blocks[2] = join_bb;
1914 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1916 /* Check that there are no read-after-write or write-after-write dependencies
1917 in THEN_BB. */
1918 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1920 struct data_reference *dra = DDR_A (ddr);
1921 struct data_reference *drb = DDR_B (ddr);
1923 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1924 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1925 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1926 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1927 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1928 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1930 free_dependence_relations (then_ddrs);
1931 free_dependence_relations (else_ddrs);
1932 free_data_refs (then_datarefs);
1933 free_data_refs (else_datarefs);
1934 return false;
1938 /* Check that there are no read-after-write or write-after-write dependencies
1939 in ELSE_BB. */
1940 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1942 struct data_reference *dra = DDR_A (ddr);
1943 struct data_reference *drb = DDR_B (ddr);
1945 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1946 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1947 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1948 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1949 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1950 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1952 free_dependence_relations (then_ddrs);
1953 free_dependence_relations (else_ddrs);
1954 free_data_refs (then_datarefs);
1955 free_data_refs (else_datarefs);
1956 return false;
1960 /* Sink stores with same LHS. */
1961 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1963 else_store = else_stores[i];
1964 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1965 then_store, else_store);
1966 ok = ok || res;
1969 free_dependence_relations (then_ddrs);
1970 free_dependence_relations (else_ddrs);
1971 free_data_refs (then_datarefs);
1972 free_data_refs (else_datarefs);
1974 return ok;
1977 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1979 static bool
1980 local_mem_dependence (gimple stmt, basic_block bb)
1982 tree vuse = gimple_vuse (stmt);
1983 gimple def;
1985 if (!vuse)
1986 return false;
1988 def = SSA_NAME_DEF_STMT (vuse);
1989 return (def && gimple_bb (def) == bb);
1992 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1993 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1994 and BB3 rejoins control flow following BB1 and BB2, look for
1995 opportunities to hoist loads as follows. If BB3 contains a PHI of
1996 two loads, one each occurring in BB1 and BB2, and the loads are
1997 provably of adjacent fields in the same structure, then move both
1998 loads into BB0. Of course this can only be done if there are no
1999 dependencies preventing such motion.
2001 One of the hoisted loads will always be speculative, so the
2002 transformation is currently conservative:
2004 - The fields must be strictly adjacent.
2005 - The two fields must occupy a single memory block that is
2006 guaranteed to not cross a page boundary.
2008 The last is difficult to prove, as such memory blocks should be
2009 aligned on the minimum of the stack alignment boundary and the
2010 alignment guaranteed by heap allocation interfaces. Thus we rely
2011 on a parameter for the alignment value.
2013 Provided a good value is used for the last case, the first
2014 restriction could possibly be relaxed. */
2016 static void
2017 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2018 basic_block bb2, basic_block bb3)
2020 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2021 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2022 gphi_iterator gsi;
2024 /* Walk the phis in bb3 looking for an opportunity. We are looking
2025 for phis of two SSA names, one each of which is defined in bb1 and
2026 bb2. */
2027 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2029 gphi *phi_stmt = gsi.phi ();
2030 gimple def1, def2, defswap;
2031 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
2032 tree tree_offset1, tree_offset2, tree_size2, next;
2033 int offset1, offset2, size2;
2034 unsigned align1;
2035 gimple_stmt_iterator gsi2;
2036 basic_block bb_for_def1, bb_for_def2;
2038 if (gimple_phi_num_args (phi_stmt) != 2
2039 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2040 continue;
2042 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2043 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2045 if (TREE_CODE (arg1) != SSA_NAME
2046 || TREE_CODE (arg2) != SSA_NAME
2047 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2048 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2049 continue;
2051 def1 = SSA_NAME_DEF_STMT (arg1);
2052 def2 = SSA_NAME_DEF_STMT (arg2);
2054 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2055 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2056 continue;
2058 /* Check the mode of the arguments to be sure a conditional move
2059 can be generated for it. */
2060 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2061 == CODE_FOR_nothing)
2062 continue;
2064 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2065 if (!gimple_assign_single_p (def1)
2066 || !gimple_assign_single_p (def2)
2067 || gimple_has_volatile_ops (def1)
2068 || gimple_has_volatile_ops (def2))
2069 continue;
2071 ref1 = gimple_assign_rhs1 (def1);
2072 ref2 = gimple_assign_rhs1 (def2);
2074 if (TREE_CODE (ref1) != COMPONENT_REF
2075 || TREE_CODE (ref2) != COMPONENT_REF)
2076 continue;
2078 /* The zeroth operand of the two component references must be
2079 identical. It is not sufficient to compare get_base_address of
2080 the two references, because this could allow for different
2081 elements of the same array in the two trees. It is not safe to
2082 assume that the existence of one array element implies the
2083 existence of a different one. */
2084 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2085 continue;
2087 field1 = TREE_OPERAND (ref1, 1);
2088 field2 = TREE_OPERAND (ref2, 1);
2090 /* Check for field adjacency, and ensure field1 comes first. */
2091 for (next = DECL_CHAIN (field1);
2092 next && TREE_CODE (next) != FIELD_DECL;
2093 next = DECL_CHAIN (next))
2096 if (next != field2)
2098 for (next = DECL_CHAIN (field2);
2099 next && TREE_CODE (next) != FIELD_DECL;
2100 next = DECL_CHAIN (next))
2103 if (next != field1)
2104 continue;
2106 fieldswap = field1;
2107 field1 = field2;
2108 field2 = fieldswap;
2109 defswap = def1;
2110 def1 = def2;
2111 def2 = defswap;
2114 bb_for_def1 = gimple_bb (def1);
2115 bb_for_def2 = gimple_bb (def2);
2117 /* Check for proper alignment of the first field. */
2118 tree_offset1 = bit_position (field1);
2119 tree_offset2 = bit_position (field2);
2120 tree_size2 = DECL_SIZE (field2);
2122 if (!tree_fits_uhwi_p (tree_offset1)
2123 || !tree_fits_uhwi_p (tree_offset2)
2124 || !tree_fits_uhwi_p (tree_size2))
2125 continue;
2127 offset1 = tree_to_uhwi (tree_offset1);
2128 offset2 = tree_to_uhwi (tree_offset2);
2129 size2 = tree_to_uhwi (tree_size2);
2130 align1 = DECL_ALIGN (field1) % param_align_bits;
2132 if (offset1 % BITS_PER_UNIT != 0)
2133 continue;
2135 /* For profitability, the two field references should fit within
2136 a single cache line. */
2137 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2138 continue;
2140 /* The two expressions cannot be dependent upon vdefs defined
2141 in bb1/bb2. */
2142 if (local_mem_dependence (def1, bb_for_def1)
2143 || local_mem_dependence (def2, bb_for_def2))
2144 continue;
2146 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2147 bb0. We hoist the first one first so that a cache miss is handled
2148 efficiently regardless of hardware cache-fill policy. */
2149 gsi2 = gsi_for_stmt (def1);
2150 gsi_move_to_bb_end (&gsi2, bb0);
2151 gsi2 = gsi_for_stmt (def2);
2152 gsi_move_to_bb_end (&gsi2, bb0);
2154 if (dump_file && (dump_flags & TDF_DETAILS))
2156 fprintf (dump_file,
2157 "\nHoisting adjacent loads from %d and %d into %d: \n",
2158 bb_for_def1->index, bb_for_def2->index, bb0->index);
2159 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2160 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2165 /* Determine whether we should attempt to hoist adjacent loads out of
2166 diamond patterns in pass_phiopt. Always hoist loads if
2167 -fhoist-adjacent-loads is specified and the target machine has
2168 both a conditional move instruction and a defined cache line size. */
2170 static bool
2171 gate_hoist_loads (void)
2173 return (flag_hoist_adjacent_loads == 1
2174 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2175 && HAVE_conditional_move);
2178 /* This pass tries to replaces an if-then-else block with an
2179 assignment. We have four kinds of transformations. Some of these
2180 transformations are also performed by the ifcvt RTL optimizer.
2182 Conditional Replacement
2183 -----------------------
2185 This transformation, implemented in conditional_replacement,
2186 replaces
2188 bb0:
2189 if (cond) goto bb2; else goto bb1;
2190 bb1:
2191 bb2:
2192 x = PHI <0 (bb1), 1 (bb0), ...>;
2194 with
2196 bb0:
2197 x' = cond;
2198 goto bb2;
2199 bb2:
2200 x = PHI <x' (bb0), ...>;
2202 We remove bb1 as it becomes unreachable. This occurs often due to
2203 gimplification of conditionals.
2205 Value Replacement
2206 -----------------
2208 This transformation, implemented in value_replacement, replaces
2210 bb0:
2211 if (a != b) goto bb2; else goto bb1;
2212 bb1:
2213 bb2:
2214 x = PHI <a (bb1), b (bb0), ...>;
2216 with
2218 bb0:
2219 bb2:
2220 x = PHI <b (bb0), ...>;
2222 This opportunity can sometimes occur as a result of other
2223 optimizations.
2226 Another case caught by value replacement looks like this:
2228 bb0:
2229 t1 = a == CONST;
2230 t2 = b > c;
2231 t3 = t1 & t2;
2232 if (t3 != 0) goto bb1; else goto bb2;
2233 bb1:
2234 bb2:
2235 x = PHI (CONST, a)
2237 Gets replaced with:
2238 bb0:
2239 bb2:
2240 t1 = a == CONST;
2241 t2 = b > c;
2242 t3 = t1 & t2;
2243 x = a;
2245 ABS Replacement
2246 ---------------
2248 This transformation, implemented in abs_replacement, replaces
2250 bb0:
2251 if (a >= 0) goto bb2; else goto bb1;
2252 bb1:
2253 x = -a;
2254 bb2:
2255 x = PHI <x (bb1), a (bb0), ...>;
2257 with
2259 bb0:
2260 x' = ABS_EXPR< a >;
2261 bb2:
2262 x = PHI <x' (bb0), ...>;
2264 MIN/MAX Replacement
2265 -------------------
2267 This transformation, minmax_replacement replaces
2269 bb0:
2270 if (a <= b) goto bb2; else goto bb1;
2271 bb1:
2272 bb2:
2273 x = PHI <b (bb1), a (bb0), ...>;
2275 with
2277 bb0:
2278 x' = MIN_EXPR (a, b)
2279 bb2:
2280 x = PHI <x' (bb0), ...>;
2282 A similar transformation is done for MAX_EXPR.
2285 This pass also performs a fifth transformation of a slightly different
2286 flavor.
2288 Adjacent Load Hoisting
2289 ----------------------
2291 This transformation replaces
2293 bb0:
2294 if (...) goto bb2; else goto bb1;
2295 bb1:
2296 x1 = (<expr>).field1;
2297 goto bb3;
2298 bb2:
2299 x2 = (<expr>).field2;
2300 bb3:
2301 # x = PHI <x1, x2>;
2303 with
2305 bb0:
2306 x1 = (<expr>).field1;
2307 x2 = (<expr>).field2;
2308 if (...) goto bb2; else goto bb1;
2309 bb1:
2310 goto bb3;
2311 bb2:
2312 bb3:
2313 # x = PHI <x1, x2>;
2315 The purpose of this transformation is to enable generation of conditional
2316 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2317 the loads is speculative, the transformation is restricted to very
2318 specific cases to avoid introducing a page fault. We are looking for
2319 the common idiom:
2321 if (...)
2322 x = y->left;
2323 else
2324 x = y->right;
2326 where left and right are typically adjacent pointers in a tree structure. */
2328 namespace {
2330 const pass_data pass_data_phiopt =
2332 GIMPLE_PASS, /* type */
2333 "phiopt", /* name */
2334 OPTGROUP_NONE, /* optinfo_flags */
2335 TV_TREE_PHIOPT, /* tv_id */
2336 ( PROP_cfg | PROP_ssa ), /* properties_required */
2337 0, /* properties_provided */
2338 0, /* properties_destroyed */
2339 0, /* todo_flags_start */
2340 0, /* todo_flags_finish */
2343 class pass_phiopt : public gimple_opt_pass
2345 public:
2346 pass_phiopt (gcc::context *ctxt)
2347 : gimple_opt_pass (pass_data_phiopt, ctxt)
2350 /* opt_pass methods: */
2351 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2352 virtual bool gate (function *) { return flag_ssa_phiopt; }
2353 virtual unsigned int execute (function *)
2355 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2358 }; // class pass_phiopt
2360 } // anon namespace
2362 gimple_opt_pass *
2363 make_pass_phiopt (gcc::context *ctxt)
2365 return new pass_phiopt (ctxt);
2368 namespace {
2370 const pass_data pass_data_cselim =
2372 GIMPLE_PASS, /* type */
2373 "cselim", /* name */
2374 OPTGROUP_NONE, /* optinfo_flags */
2375 TV_TREE_PHIOPT, /* tv_id */
2376 ( PROP_cfg | PROP_ssa ), /* properties_required */
2377 0, /* properties_provided */
2378 0, /* properties_destroyed */
2379 0, /* todo_flags_start */
2380 0, /* todo_flags_finish */
2383 class pass_cselim : public gimple_opt_pass
2385 public:
2386 pass_cselim (gcc::context *ctxt)
2387 : gimple_opt_pass (pass_data_cselim, ctxt)
2390 /* opt_pass methods: */
2391 virtual bool gate (function *) { return flag_tree_cselim; }
2392 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2394 }; // class pass_cselim
2396 } // anon namespace
2398 gimple_opt_pass *
2399 make_pass_cselim (gcc::context *ctxt)
2401 return new pass_cselim (ctxt);