* lib/ubsan-dg.exp (check_effective_target_fsanitize_undefined):
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobbe1becb964df03219b38499250a001e932765728
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "stor-layout.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "predict.h"
30 #include "vec.h"
31 #include "hashtab.h"
32 #include "hash-set.h"
33 #include "machmode.h"
34 #include "hard-reg-set.h"
35 #include "input.h"
36 #include "function.h"
37 #include "dominance.h"
38 #include "cfg.h"
39 #include "cfganal.h"
40 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimplify.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-ssa.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "expr.h"
56 #include "tree-dfa.h"
57 #include "tree-pass.h"
58 #include "langhooks.h"
59 #include "domwalk.h"
60 #include "cfgloop.h"
61 #include "tree-data-ref.h"
62 #include "gimple-pretty-print.h"
63 #include "insn-config.h"
64 #include "expr.h"
65 #include "insn-codes.h"
66 #include "optabs.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-inline.h"
70 #ifndef HAVE_conditional_move
71 #define HAVE_conditional_move (0)
72 #endif
74 static unsigned int tree_ssa_phiopt_worker (bool, bool);
75 static bool conditional_replacement (basic_block, basic_block,
76 edge, edge, gphi *, tree, tree);
77 static int value_replacement (basic_block, basic_block,
78 edge, edge, gimple, tree, tree);
79 static bool minmax_replacement (basic_block, basic_block,
80 edge, edge, gimple, tree, tree);
81 static bool abs_replacement (basic_block, basic_block,
82 edge, edge, gimple, tree, tree);
83 static bool neg_replacement (basic_block, basic_block,
84 edge, edge, gimple, tree, tree);
85 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
86 hash_set<tree> *);
87 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
88 static hash_set<tree> * get_non_trapping ();
89 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
90 static void hoist_adjacent_loads (basic_block, basic_block,
91 basic_block, basic_block);
92 static bool gate_hoist_loads (void);
94 /* This pass tries to transform conditional stores into unconditional
95 ones, enabling further simplifications with the simpler then and else
96 blocks. In particular it replaces this:
98 bb0:
99 if (cond) goto bb2; else goto bb1;
100 bb1:
101 *p = RHS;
102 bb2:
104 with
106 bb0:
107 if (cond) goto bb1; else goto bb2;
108 bb1:
109 condtmp' = *p;
110 bb2:
111 condtmp = PHI <RHS, condtmp'>
112 *p = condtmp;
114 This transformation can only be done under several constraints,
115 documented below. It also replaces:
117 bb0:
118 if (cond) goto bb2; else goto bb1;
119 bb1:
120 *p = RHS1;
121 goto bb3;
122 bb2:
123 *p = RHS2;
124 bb3:
126 with
128 bb0:
129 if (cond) goto bb3; else goto bb1;
130 bb1:
131 bb3:
132 condtmp = PHI <RHS1, RHS2>
133 *p = condtmp; */
135 static unsigned int
136 tree_ssa_cs_elim (void)
138 unsigned todo;
139 /* ??? We are not interested in loop related info, but the following
140 will create it, ICEing as we didn't init loops with pre-headers.
141 An interfacing issue of find_data_references_in_bb. */
142 loop_optimizer_init (LOOPS_NORMAL);
143 scev_initialize ();
144 todo = tree_ssa_phiopt_worker (true, false);
145 scev_finalize ();
146 loop_optimizer_finalize ();
147 return todo;
150 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
152 static gphi *
153 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
155 gimple_stmt_iterator i;
156 gphi *phi = NULL;
157 if (gimple_seq_singleton_p (seq))
158 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
159 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
161 gphi *p = as_a <gphi *> (gsi_stmt (i));
162 /* If the PHI arguments are equal then we can skip this PHI. */
163 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
164 gimple_phi_arg_def (p, e1->dest_idx)))
165 continue;
167 /* If we already have a PHI that has the two edge arguments are
168 different, then return it is not a singleton for these PHIs. */
169 if (phi)
170 return NULL;
172 phi = p;
174 return phi;
177 /* The core routine of conditional store replacement and normal
178 phi optimizations. Both share much of the infrastructure in how
179 to match applicable basic block patterns. DO_STORE_ELIM is true
180 when we want to do conditional store replacement, false otherwise.
181 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
182 of diamond control flow patterns, false otherwise. */
183 static unsigned int
184 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
186 basic_block bb;
187 basic_block *bb_order;
188 unsigned n, i;
189 bool cfgchanged = false;
190 hash_set<tree> *nontrap = 0;
192 if (do_store_elim)
193 /* Calculate the set of non-trapping memory accesses. */
194 nontrap = get_non_trapping ();
196 /* The replacement of conditional negation with a non-branching
197 sequence is really only a win when optimizing for speed and we
198 can avoid transformations by gimple if-conversion that result
199 in poor RTL generation.
201 Ideally either gimple if-conversion or the RTL expanders will
202 be improved and the code to emit branchless conditional negation
203 can be removed. */
204 bool replace_conditional_negation = false;
205 if (!do_store_elim)
206 replace_conditional_negation
207 = ((!optimize_size && optimize >= 2)
208 || (((flag_tree_loop_vectorize || cfun->has_force_vectorize_loops)
209 && flag_tree_loop_if_convert != 0)
210 || flag_tree_loop_if_convert == 1
211 || flag_tree_loop_if_convert_stores == 1));
213 /* Search every basic block for COND_EXPR we may be able to optimize.
215 We walk the blocks in order that guarantees that a block with
216 a single predecessor is processed before the predecessor.
217 This ensures that we collapse inner ifs before visiting the
218 outer ones, and also that we do not try to visit a removed
219 block. */
220 bb_order = single_pred_before_succ_order ();
221 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
223 for (i = 0; i < n; i++)
225 gimple cond_stmt;
226 gphi *phi;
227 basic_block bb1, bb2;
228 edge e1, e2;
229 tree arg0, arg1;
231 bb = bb_order[i];
233 cond_stmt = last_stmt (bb);
234 /* Check to see if the last statement is a GIMPLE_COND. */
235 if (!cond_stmt
236 || gimple_code (cond_stmt) != GIMPLE_COND)
237 continue;
239 e1 = EDGE_SUCC (bb, 0);
240 bb1 = e1->dest;
241 e2 = EDGE_SUCC (bb, 1);
242 bb2 = e2->dest;
244 /* We cannot do the optimization on abnormal edges. */
245 if ((e1->flags & EDGE_ABNORMAL) != 0
246 || (e2->flags & EDGE_ABNORMAL) != 0)
247 continue;
249 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
250 if (EDGE_COUNT (bb1->succs) == 0
251 || bb2 == NULL
252 || EDGE_COUNT (bb2->succs) == 0)
253 continue;
255 /* Find the bb which is the fall through to the other. */
256 if (EDGE_SUCC (bb1, 0)->dest == bb2)
258 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
260 basic_block bb_tmp = bb1;
261 edge e_tmp = e1;
262 bb1 = bb2;
263 bb2 = bb_tmp;
264 e1 = e2;
265 e2 = e_tmp;
267 else if (do_store_elim
268 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
270 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
272 if (!single_succ_p (bb1)
273 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
274 || !single_succ_p (bb2)
275 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
276 || EDGE_COUNT (bb3->preds) != 2)
277 continue;
278 if (cond_if_else_store_replacement (bb1, bb2, bb3))
279 cfgchanged = true;
280 continue;
282 else if (do_hoist_loads
283 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
285 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
287 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
288 && single_succ_p (bb1)
289 && single_succ_p (bb2)
290 && single_pred_p (bb1)
291 && single_pred_p (bb2)
292 && EDGE_COUNT (bb->succs) == 2
293 && EDGE_COUNT (bb3->preds) == 2
294 /* If one edge or the other is dominant, a conditional move
295 is likely to perform worse than the well-predicted branch. */
296 && !predictable_edge_p (EDGE_SUCC (bb, 0))
297 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
298 hoist_adjacent_loads (bb, bb1, bb2, bb3);
299 continue;
301 else
302 continue;
304 e1 = EDGE_SUCC (bb1, 0);
306 /* Make sure that bb1 is just a fall through. */
307 if (!single_succ_p (bb1)
308 || (e1->flags & EDGE_FALLTHRU) == 0)
309 continue;
311 /* Also make sure that bb1 only have one predecessor and that it
312 is bb. */
313 if (!single_pred_p (bb1)
314 || single_pred (bb1) != bb)
315 continue;
317 if (do_store_elim)
319 /* bb1 is the middle block, bb2 the join block, bb the split block,
320 e1 the fallthrough edge from bb1 to bb2. We can't do the
321 optimization if the join block has more than two predecessors. */
322 if (EDGE_COUNT (bb2->preds) > 2)
323 continue;
324 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
325 cfgchanged = true;
327 else
329 gimple_seq phis = phi_nodes (bb2);
330 gimple_stmt_iterator gsi;
331 bool candorest = true;
333 /* Value replacement can work with more than one PHI
334 so try that first. */
335 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
337 phi = as_a <gphi *> (gsi_stmt (gsi));
338 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
339 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
340 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
342 candorest = false;
343 cfgchanged = true;
344 break;
348 if (!candorest)
349 continue;
351 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
352 if (!phi)
353 continue;
355 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
356 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
358 /* Something is wrong if we cannot find the arguments in the PHI
359 node. */
360 gcc_assert (arg0 != NULL && arg1 != NULL);
362 /* Do the replacement of conditional if it can be done. */
363 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
364 cfgchanged = true;
365 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
366 cfgchanged = true;
367 else if (replace_conditional_negation
368 && neg_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
369 cfgchanged = true;
370 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
371 cfgchanged = true;
375 free (bb_order);
377 if (do_store_elim)
378 delete nontrap;
379 /* If the CFG has changed, we should cleanup the CFG. */
380 if (cfgchanged && do_store_elim)
382 /* In cond-store replacement we have added some loads on edges
383 and new VOPS (as we moved the store, and created a load). */
384 gsi_commit_edge_inserts ();
385 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
387 else if (cfgchanged)
388 return TODO_cleanup_cfg;
389 return 0;
392 /* Replace PHI node element whose edge is E in block BB with variable NEW.
393 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
394 is known to have two edges, one of which must reach BB). */
396 static void
397 replace_phi_edge_with_variable (basic_block cond_block,
398 edge e, gimple phi, tree new_tree)
400 basic_block bb = gimple_bb (phi);
401 basic_block block_to_remove;
402 gimple_stmt_iterator gsi;
404 /* Change the PHI argument to new. */
405 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
407 /* Remove the empty basic block. */
408 if (EDGE_SUCC (cond_block, 0)->dest == bb)
410 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
411 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
412 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
413 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
415 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
417 else
419 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
420 EDGE_SUCC (cond_block, 1)->flags
421 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
422 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
423 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
425 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
427 delete_basic_block (block_to_remove);
429 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
430 gsi = gsi_last_bb (cond_block);
431 gsi_remove (&gsi, true);
433 if (dump_file && (dump_flags & TDF_DETAILS))
434 fprintf (dump_file,
435 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
436 cond_block->index,
437 bb->index);
440 /* The function conditional_replacement does the main work of doing the
441 conditional replacement. Return true if the replacement is done.
442 Otherwise return false.
443 BB is the basic block where the replacement is going to be done on. ARG0
444 is argument 0 from PHI. Likewise for ARG1. */
446 static bool
447 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
448 edge e0, edge e1, gphi *phi,
449 tree arg0, tree arg1)
451 tree result;
452 gimple stmt;
453 gassign *new_stmt;
454 tree cond;
455 gimple_stmt_iterator gsi;
456 edge true_edge, false_edge;
457 tree new_var, new_var2;
458 bool neg;
460 /* FIXME: Gimplification of complex type is too hard for now. */
461 /* We aren't prepared to handle vectors either (and it is a question
462 if it would be worthwhile anyway). */
463 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
464 || POINTER_TYPE_P (TREE_TYPE (arg0)))
465 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
466 || POINTER_TYPE_P (TREE_TYPE (arg1))))
467 return false;
469 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
470 convert it to the conditional. */
471 if ((integer_zerop (arg0) && integer_onep (arg1))
472 || (integer_zerop (arg1) && integer_onep (arg0)))
473 neg = false;
474 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
475 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
476 neg = true;
477 else
478 return false;
480 if (!empty_block_p (middle_bb))
481 return false;
483 /* At this point we know we have a GIMPLE_COND with two successors.
484 One successor is BB, the other successor is an empty block which
485 falls through into BB.
487 There is a single PHI node at the join point (BB) and its arguments
488 are constants (0, 1) or (0, -1).
490 So, given the condition COND, and the two PHI arguments, we can
491 rewrite this PHI into non-branching code:
493 dest = (COND) or dest = COND'
495 We use the condition as-is if the argument associated with the
496 true edge has the value one or the argument associated with the
497 false edge as the value zero. Note that those conditions are not
498 the same since only one of the outgoing edges from the GIMPLE_COND
499 will directly reach BB and thus be associated with an argument. */
501 stmt = last_stmt (cond_bb);
502 result = PHI_RESULT (phi);
504 /* To handle special cases like floating point comparison, it is easier and
505 less error-prone to build a tree and gimplify it on the fly though it is
506 less efficient. */
507 cond = fold_build2_loc (gimple_location (stmt),
508 gimple_cond_code (stmt), boolean_type_node,
509 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
511 /* We need to know which is the true edge and which is the false
512 edge so that we know when to invert the condition below. */
513 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
514 if ((e0 == true_edge && integer_zerop (arg0))
515 || (e0 == false_edge && !integer_zerop (arg0))
516 || (e1 == true_edge && integer_zerop (arg1))
517 || (e1 == false_edge && !integer_zerop (arg1)))
518 cond = fold_build1_loc (gimple_location (stmt),
519 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
521 if (neg)
523 cond = fold_convert_loc (gimple_location (stmt),
524 TREE_TYPE (result), cond);
525 cond = fold_build1_loc (gimple_location (stmt),
526 NEGATE_EXPR, TREE_TYPE (cond), cond);
529 /* Insert our new statements at the end of conditional block before the
530 COND_STMT. */
531 gsi = gsi_for_stmt (stmt);
532 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
533 GSI_SAME_STMT);
535 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
537 source_location locus_0, locus_1;
539 new_var2 = make_ssa_name (TREE_TYPE (result));
540 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
541 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
542 new_var = new_var2;
544 /* Set the locus to the first argument, unless is doesn't have one. */
545 locus_0 = gimple_phi_arg_location (phi, 0);
546 locus_1 = gimple_phi_arg_location (phi, 1);
547 if (locus_0 == UNKNOWN_LOCATION)
548 locus_0 = locus_1;
549 gimple_set_location (new_stmt, locus_0);
552 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
554 /* Note that we optimized this PHI. */
555 return true;
558 /* Update *ARG which is defined in STMT so that it contains the
559 computed value if that seems profitable. Return true if the
560 statement is made dead by that rewriting. */
562 static bool
563 jump_function_from_stmt (tree *arg, gimple stmt)
565 enum tree_code code = gimple_assign_rhs_code (stmt);
566 if (code == ADDR_EXPR)
568 /* For arg = &p->i transform it to p, if possible. */
569 tree rhs1 = gimple_assign_rhs1 (stmt);
570 HOST_WIDE_INT offset;
571 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
572 &offset);
573 if (tem
574 && TREE_CODE (tem) == MEM_REF
575 && (mem_ref_offset (tem) + offset) == 0)
577 *arg = TREE_OPERAND (tem, 0);
578 return true;
581 /* TODO: Much like IPA-CP jump-functions we want to handle constant
582 additions symbolically here, and we'd need to update the comparison
583 code that compares the arg + cst tuples in our caller. For now the
584 code above exactly handles the VEC_BASE pattern from vec.h. */
585 return false;
588 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
589 of the form SSA_NAME NE 0.
591 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
592 the two input values of the EQ_EXPR match arg0 and arg1.
594 If so update *code and return TRUE. Otherwise return FALSE. */
596 static bool
597 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
598 enum tree_code *code, const_tree rhs)
600 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
601 statement. */
602 if (TREE_CODE (rhs) == SSA_NAME)
604 gimple def1 = SSA_NAME_DEF_STMT (rhs);
606 /* Verify the defining statement has an EQ_EXPR on the RHS. */
607 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
609 /* Finally verify the source operands of the EQ_EXPR are equal
610 to arg0 and arg1. */
611 tree op0 = gimple_assign_rhs1 (def1);
612 tree op1 = gimple_assign_rhs2 (def1);
613 if ((operand_equal_for_phi_arg_p (arg0, op0)
614 && operand_equal_for_phi_arg_p (arg1, op1))
615 || (operand_equal_for_phi_arg_p (arg0, op1)
616 && operand_equal_for_phi_arg_p (arg1, op0)))
618 /* We will perform the optimization. */
619 *code = gimple_assign_rhs_code (def1);
620 return true;
624 return false;
627 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
629 Also return TRUE if arg0/arg1 are equal to the source arguments of a
630 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
632 Return FALSE otherwise. */
634 static bool
635 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
636 enum tree_code *code, gimple cond)
638 gimple def;
639 tree lhs = gimple_cond_lhs (cond);
640 tree rhs = gimple_cond_rhs (cond);
642 if ((operand_equal_for_phi_arg_p (arg0, lhs)
643 && operand_equal_for_phi_arg_p (arg1, rhs))
644 || (operand_equal_for_phi_arg_p (arg1, lhs)
645 && operand_equal_for_phi_arg_p (arg0, rhs)))
646 return true;
648 /* Now handle more complex case where we have an EQ comparison
649 which feeds a BIT_AND_EXPR which feeds COND.
651 First verify that COND is of the form SSA_NAME NE 0. */
652 if (*code != NE_EXPR || !integer_zerop (rhs)
653 || TREE_CODE (lhs) != SSA_NAME)
654 return false;
656 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
657 def = SSA_NAME_DEF_STMT (lhs);
658 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
659 return false;
661 /* Now verify arg0/arg1 correspond to the source arguments of an
662 EQ comparison feeding the BIT_AND_EXPR. */
664 tree tmp = gimple_assign_rhs1 (def);
665 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
666 return true;
668 tmp = gimple_assign_rhs2 (def);
669 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
670 return true;
672 return false;
675 /* Returns true if ARG is a neutral element for operation CODE
676 on the RIGHT side. */
678 static bool
679 neutral_element_p (tree_code code, tree arg, bool right)
681 switch (code)
683 case PLUS_EXPR:
684 case BIT_IOR_EXPR:
685 case BIT_XOR_EXPR:
686 return integer_zerop (arg);
688 case LROTATE_EXPR:
689 case RROTATE_EXPR:
690 case LSHIFT_EXPR:
691 case RSHIFT_EXPR:
692 case MINUS_EXPR:
693 case POINTER_PLUS_EXPR:
694 return right && integer_zerop (arg);
696 case MULT_EXPR:
697 return integer_onep (arg);
699 case TRUNC_DIV_EXPR:
700 case CEIL_DIV_EXPR:
701 case FLOOR_DIV_EXPR:
702 case ROUND_DIV_EXPR:
703 case EXACT_DIV_EXPR:
704 return right && integer_onep (arg);
706 case BIT_AND_EXPR:
707 return integer_all_onesp (arg);
709 default:
710 return false;
714 /* Returns true if ARG is an absorbing element for operation CODE. */
716 static bool
717 absorbing_element_p (tree_code code, tree arg)
719 switch (code)
721 case BIT_IOR_EXPR:
722 return integer_all_onesp (arg);
724 case MULT_EXPR:
725 case BIT_AND_EXPR:
726 return integer_zerop (arg);
728 default:
729 return false;
733 /* The function value_replacement does the main work of doing the value
734 replacement. Return non-zero if the replacement is done. Otherwise return
735 0. If we remove the middle basic block, return 2.
736 BB is the basic block where the replacement is going to be done on. ARG0
737 is argument 0 from the PHI. Likewise for ARG1. */
739 static int
740 value_replacement (basic_block cond_bb, basic_block middle_bb,
741 edge e0, edge e1, gimple phi,
742 tree arg0, tree arg1)
744 gimple_stmt_iterator gsi;
745 gimple cond;
746 edge true_edge, false_edge;
747 enum tree_code code;
748 bool emtpy_or_with_defined_p = true;
750 /* If the type says honor signed zeros we cannot do this
751 optimization. */
752 if (HONOR_SIGNED_ZEROS (arg1))
753 return 0;
755 /* If there is a statement in MIDDLE_BB that defines one of the PHI
756 arguments, then adjust arg0 or arg1. */
757 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
758 while (!gsi_end_p (gsi))
760 gimple stmt = gsi_stmt (gsi);
761 tree lhs;
762 gsi_next_nondebug (&gsi);
763 if (!is_gimple_assign (stmt))
765 emtpy_or_with_defined_p = false;
766 continue;
768 /* Now try to adjust arg0 or arg1 according to the computation
769 in the statement. */
770 lhs = gimple_assign_lhs (stmt);
771 if (!(lhs == arg0
772 && jump_function_from_stmt (&arg0, stmt))
773 || (lhs == arg1
774 && jump_function_from_stmt (&arg1, stmt)))
775 emtpy_or_with_defined_p = false;
778 cond = last_stmt (cond_bb);
779 code = gimple_cond_code (cond);
781 /* This transformation is only valid for equality comparisons. */
782 if (code != NE_EXPR && code != EQ_EXPR)
783 return 0;
785 /* We need to know which is the true edge and which is the false
786 edge so that we know if have abs or negative abs. */
787 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
789 /* At this point we know we have a COND_EXPR with two successors.
790 One successor is BB, the other successor is an empty block which
791 falls through into BB.
793 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
795 There is a single PHI node at the join point (BB) with two arguments.
797 We now need to verify that the two arguments in the PHI node match
798 the two arguments to the equality comparison. */
800 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
802 edge e;
803 tree arg;
805 /* For NE_EXPR, we want to build an assignment result = arg where
806 arg is the PHI argument associated with the true edge. For
807 EQ_EXPR we want the PHI argument associated with the false edge. */
808 e = (code == NE_EXPR ? true_edge : false_edge);
810 /* Unfortunately, E may not reach BB (it may instead have gone to
811 OTHER_BLOCK). If that is the case, then we want the single outgoing
812 edge from OTHER_BLOCK which reaches BB and represents the desired
813 path from COND_BLOCK. */
814 if (e->dest == middle_bb)
815 e = single_succ_edge (e->dest);
817 /* Now we know the incoming edge to BB that has the argument for the
818 RHS of our new assignment statement. */
819 if (e0 == e)
820 arg = arg0;
821 else
822 arg = arg1;
824 /* If the middle basic block was empty or is defining the
825 PHI arguments and this is a single phi where the args are different
826 for the edges e0 and e1 then we can remove the middle basic block. */
827 if (emtpy_or_with_defined_p
828 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
829 e0, e1) == phi)
831 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
832 /* Note that we optimized this PHI. */
833 return 2;
835 else
837 /* Replace the PHI arguments with arg. */
838 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
839 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
840 if (dump_file && (dump_flags & TDF_DETAILS))
842 fprintf (dump_file, "PHI ");
843 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
844 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
845 cond_bb->index);
846 print_generic_expr (dump_file, arg, 0);
847 fprintf (dump_file, ".\n");
849 return 1;
854 /* Now optimize (x != 0) ? x + y : y to just y.
855 The following condition is too restrictive, there can easily be another
856 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
857 gimple assign = last_and_only_stmt (middle_bb);
858 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
859 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
860 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
861 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
862 return 0;
864 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
865 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
866 return 0;
868 /* Only transform if it removes the condition. */
869 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
870 return 0;
872 /* Size-wise, this is always profitable. */
873 if (optimize_bb_for_speed_p (cond_bb)
874 /* The special case is useless if it has a low probability. */
875 && profile_status_for_fn (cfun) != PROFILE_ABSENT
876 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
877 /* If assign is cheap, there is no point avoiding it. */
878 && estimate_num_insns (assign, &eni_time_weights)
879 >= 3 * estimate_num_insns (cond, &eni_time_weights))
880 return 0;
882 tree lhs = gimple_assign_lhs (assign);
883 tree rhs1 = gimple_assign_rhs1 (assign);
884 tree rhs2 = gimple_assign_rhs2 (assign);
885 enum tree_code code_def = gimple_assign_rhs_code (assign);
886 tree cond_lhs = gimple_cond_lhs (cond);
887 tree cond_rhs = gimple_cond_rhs (cond);
889 if (((code == NE_EXPR && e1 == false_edge)
890 || (code == EQ_EXPR && e1 == true_edge))
891 && arg0 == lhs
892 && ((arg1 == rhs1
893 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
894 && neutral_element_p (code_def, cond_rhs, true))
895 || (arg1 == rhs2
896 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
897 && neutral_element_p (code_def, cond_rhs, false))
898 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
899 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
900 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
901 && absorbing_element_p (code_def, cond_rhs))))
903 gsi = gsi_for_stmt (cond);
904 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
905 gsi_move_before (&gsi_from, &gsi);
906 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
907 return 2;
910 return 0;
913 /* The function minmax_replacement does the main work of doing the minmax
914 replacement. Return true if the replacement is done. Otherwise return
915 false.
916 BB is the basic block where the replacement is going to be done on. ARG0
917 is argument 0 from the PHI. Likewise for ARG1. */
919 static bool
920 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
921 edge e0, edge e1, gimple phi,
922 tree arg0, tree arg1)
924 tree result, type;
925 gcond *cond;
926 gassign *new_stmt;
927 edge true_edge, false_edge;
928 enum tree_code cmp, minmax, ass_code;
929 tree smaller, larger, arg_true, arg_false;
930 gimple_stmt_iterator gsi, gsi_from;
932 type = TREE_TYPE (PHI_RESULT (phi));
934 /* The optimization may be unsafe due to NaNs. */
935 if (HONOR_NANS (type))
936 return false;
938 cond = as_a <gcond *> (last_stmt (cond_bb));
939 cmp = gimple_cond_code (cond);
941 /* This transformation is only valid for order comparisons. Record which
942 operand is smaller/larger if the result of the comparison is true. */
943 if (cmp == LT_EXPR || cmp == LE_EXPR)
945 smaller = gimple_cond_lhs (cond);
946 larger = gimple_cond_rhs (cond);
948 else if (cmp == GT_EXPR || cmp == GE_EXPR)
950 smaller = gimple_cond_rhs (cond);
951 larger = gimple_cond_lhs (cond);
953 else
954 return false;
956 /* We need to know which is the true edge and which is the false
957 edge so that we know if have abs or negative abs. */
958 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
960 /* Forward the edges over the middle basic block. */
961 if (true_edge->dest == middle_bb)
962 true_edge = EDGE_SUCC (true_edge->dest, 0);
963 if (false_edge->dest == middle_bb)
964 false_edge = EDGE_SUCC (false_edge->dest, 0);
966 if (true_edge == e0)
968 gcc_assert (false_edge == e1);
969 arg_true = arg0;
970 arg_false = arg1;
972 else
974 gcc_assert (false_edge == e0);
975 gcc_assert (true_edge == e1);
976 arg_true = arg1;
977 arg_false = arg0;
980 if (empty_block_p (middle_bb))
982 if (operand_equal_for_phi_arg_p (arg_true, smaller)
983 && operand_equal_for_phi_arg_p (arg_false, larger))
985 /* Case
987 if (smaller < larger)
988 rslt = smaller;
989 else
990 rslt = larger; */
991 minmax = MIN_EXPR;
993 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
994 && operand_equal_for_phi_arg_p (arg_true, larger))
995 minmax = MAX_EXPR;
996 else
997 return false;
999 else
1001 /* Recognize the following case, assuming d <= u:
1003 if (a <= u)
1004 b = MAX (a, d);
1005 x = PHI <b, u>
1007 This is equivalent to
1009 b = MAX (a, d);
1010 x = MIN (b, u); */
1012 gimple assign = last_and_only_stmt (middle_bb);
1013 tree lhs, op0, op1, bound;
1015 if (!assign
1016 || gimple_code (assign) != GIMPLE_ASSIGN)
1017 return false;
1019 lhs = gimple_assign_lhs (assign);
1020 ass_code = gimple_assign_rhs_code (assign);
1021 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1022 return false;
1023 op0 = gimple_assign_rhs1 (assign);
1024 op1 = gimple_assign_rhs2 (assign);
1026 if (true_edge->src == middle_bb)
1028 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1029 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1030 return false;
1032 if (operand_equal_for_phi_arg_p (arg_false, larger))
1034 /* Case
1036 if (smaller < larger)
1038 r' = MAX_EXPR (smaller, bound)
1040 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1041 if (ass_code != MAX_EXPR)
1042 return false;
1044 minmax = MIN_EXPR;
1045 if (operand_equal_for_phi_arg_p (op0, smaller))
1046 bound = op1;
1047 else if (operand_equal_for_phi_arg_p (op1, smaller))
1048 bound = op0;
1049 else
1050 return false;
1052 /* We need BOUND <= LARGER. */
1053 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1054 bound, larger)))
1055 return false;
1057 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1059 /* Case
1061 if (smaller < larger)
1063 r' = MIN_EXPR (larger, bound)
1065 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1066 if (ass_code != MIN_EXPR)
1067 return false;
1069 minmax = MAX_EXPR;
1070 if (operand_equal_for_phi_arg_p (op0, larger))
1071 bound = op1;
1072 else if (operand_equal_for_phi_arg_p (op1, larger))
1073 bound = op0;
1074 else
1075 return false;
1077 /* We need BOUND >= SMALLER. */
1078 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1079 bound, smaller)))
1080 return false;
1082 else
1083 return false;
1085 else
1087 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1088 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1089 return false;
1091 if (operand_equal_for_phi_arg_p (arg_true, larger))
1093 /* Case
1095 if (smaller > larger)
1097 r' = MIN_EXPR (smaller, bound)
1099 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1100 if (ass_code != MIN_EXPR)
1101 return false;
1103 minmax = MAX_EXPR;
1104 if (operand_equal_for_phi_arg_p (op0, smaller))
1105 bound = op1;
1106 else if (operand_equal_for_phi_arg_p (op1, smaller))
1107 bound = op0;
1108 else
1109 return false;
1111 /* We need BOUND >= LARGER. */
1112 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1113 bound, larger)))
1114 return false;
1116 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1118 /* Case
1120 if (smaller > larger)
1122 r' = MAX_EXPR (larger, bound)
1124 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1125 if (ass_code != MAX_EXPR)
1126 return false;
1128 minmax = MIN_EXPR;
1129 if (operand_equal_for_phi_arg_p (op0, larger))
1130 bound = op1;
1131 else if (operand_equal_for_phi_arg_p (op1, larger))
1132 bound = op0;
1133 else
1134 return false;
1136 /* We need BOUND <= SMALLER. */
1137 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1138 bound, smaller)))
1139 return false;
1141 else
1142 return false;
1145 /* Move the statement from the middle block. */
1146 gsi = gsi_last_bb (cond_bb);
1147 gsi_from = gsi_last_nondebug_bb (middle_bb);
1148 gsi_move_before (&gsi_from, &gsi);
1151 /* Emit the statement to compute min/max. */
1152 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1153 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1154 gsi = gsi_last_bb (cond_bb);
1155 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1157 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1158 return true;
1161 /* The function absolute_replacement does the main work of doing the absolute
1162 replacement. Return true if the replacement is done. Otherwise return
1163 false.
1164 bb is the basic block where the replacement is going to be done on. arg0
1165 is argument 0 from the phi. Likewise for arg1. */
1167 static bool
1168 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1169 edge e0 ATTRIBUTE_UNUSED, edge e1,
1170 gimple phi, tree arg0, tree arg1)
1172 tree result;
1173 gassign *new_stmt;
1174 gimple cond;
1175 gimple_stmt_iterator gsi;
1176 edge true_edge, false_edge;
1177 gimple assign;
1178 edge e;
1179 tree rhs, lhs;
1180 bool negate;
1181 enum tree_code cond_code;
1183 /* If the type says honor signed zeros we cannot do this
1184 optimization. */
1185 if (HONOR_SIGNED_ZEROS (arg1))
1186 return false;
1188 /* OTHER_BLOCK must have only one executable statement which must have the
1189 form arg0 = -arg1 or arg1 = -arg0. */
1191 assign = last_and_only_stmt (middle_bb);
1192 /* If we did not find the proper negation assignment, then we can not
1193 optimize. */
1194 if (assign == NULL)
1195 return false;
1197 /* If we got here, then we have found the only executable statement
1198 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1199 arg1 = -arg0, then we can not optimize. */
1200 if (gimple_code (assign) != GIMPLE_ASSIGN)
1201 return false;
1203 lhs = gimple_assign_lhs (assign);
1205 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1206 return false;
1208 rhs = gimple_assign_rhs1 (assign);
1210 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1211 if (!(lhs == arg0 && rhs == arg1)
1212 && !(lhs == arg1 && rhs == arg0))
1213 return false;
1215 cond = last_stmt (cond_bb);
1216 result = PHI_RESULT (phi);
1218 /* Only relationals comparing arg[01] against zero are interesting. */
1219 cond_code = gimple_cond_code (cond);
1220 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1221 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1222 return false;
1224 /* Make sure the conditional is arg[01] OP y. */
1225 if (gimple_cond_lhs (cond) != rhs)
1226 return false;
1228 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1229 ? real_zerop (gimple_cond_rhs (cond))
1230 : integer_zerop (gimple_cond_rhs (cond)))
1232 else
1233 return false;
1235 /* We need to know which is the true edge and which is the false
1236 edge so that we know if have abs or negative abs. */
1237 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1239 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1240 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1241 the false edge goes to OTHER_BLOCK. */
1242 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1243 e = true_edge;
1244 else
1245 e = false_edge;
1247 if (e->dest == middle_bb)
1248 negate = true;
1249 else
1250 negate = false;
1252 result = duplicate_ssa_name (result, NULL);
1254 if (negate)
1255 lhs = make_ssa_name (TREE_TYPE (result));
1256 else
1257 lhs = result;
1259 /* Build the modify expression with abs expression. */
1260 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1262 gsi = gsi_last_bb (cond_bb);
1263 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1265 if (negate)
1267 /* Get the right GSI. We want to insert after the recently
1268 added ABS_EXPR statement (which we know is the first statement
1269 in the block. */
1270 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1272 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1275 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1277 /* Note that we optimized this PHI. */
1278 return true;
1281 /* The function neg_replacement replaces conditional negation with
1282 equivalent straight line code. Returns TRUE if replacement is done,
1283 otherwise returns FALSE.
1285 COND_BB branches around negation occuring in MIDDLE_BB.
1287 E0 and E1 are edges out of COND_BB. E0 reaches MIDDLE_BB and
1288 E1 reaches the other successor which should contain PHI with
1289 arguments ARG0 and ARG1.
1291 Assuming negation is to occur when the condition is true,
1292 then the non-branching sequence is:
1294 result = (rhs ^ -cond) + cond
1296 Inverting the condition or its result gives us negation
1297 when the original condition is false. */
1299 static bool
1300 neg_replacement (basic_block cond_bb, basic_block middle_bb,
1301 edge e0 ATTRIBUTE_UNUSED, edge e1,
1302 gimple phi, tree arg0, tree arg1)
1304 gimple new_stmt, cond;
1305 gimple_stmt_iterator gsi;
1306 gimple assign;
1307 edge true_edge, false_edge;
1308 tree rhs, lhs;
1309 enum tree_code cond_code;
1310 bool invert = false;
1312 /* This transformation performs logical operations on the
1313 incoming arguments. So force them to be integral types. */
1314 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1315 return false;
1317 /* OTHER_BLOCK must have only one executable statement which must have the
1318 form arg0 = -arg1 or arg1 = -arg0. */
1320 assign = last_and_only_stmt (middle_bb);
1321 /* If we did not find the proper negation assignment, then we can not
1322 optimize. */
1323 if (assign == NULL)
1324 return false;
1326 /* If we got here, then we have found the only executable statement
1327 in OTHER_BLOCK. If it is anything other than arg0 = -arg1 or
1328 arg1 = -arg0, then we can not optimize. */
1329 if (gimple_code (assign) != GIMPLE_ASSIGN)
1330 return false;
1332 lhs = gimple_assign_lhs (assign);
1334 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1335 return false;
1337 rhs = gimple_assign_rhs1 (assign);
1339 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1340 if (!(lhs == arg0 && rhs == arg1)
1341 && !(lhs == arg1 && rhs == arg0))
1342 return false;
1344 /* The basic sequence assumes we negate when the condition is true.
1345 If we need the opposite, then we will either need to invert the
1346 condition or its result. */
1347 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1348 invert = false_edge->dest == middle_bb;
1350 /* Unlike abs_replacement, we can handle arbitrary conditionals here. */
1351 cond = last_stmt (cond_bb);
1352 cond_code = gimple_cond_code (cond);
1354 /* If inversion is needed, first try to invert the test since
1355 that's cheapest. */
1356 if (invert)
1358 bool honor_nans = HONOR_NANS (gimple_cond_lhs (cond));
1359 enum tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
1361 /* If invert_tree_comparison was successful, then use its return
1362 value as the new code and note that inversion is no longer
1363 needed. */
1364 if (new_code != ERROR_MARK)
1366 cond_code = new_code;
1367 invert = false;
1371 tree cond_val = make_ssa_name (boolean_type_node);
1372 new_stmt = gimple_build_assign (cond_val, cond_code,
1373 gimple_cond_lhs (cond),
1374 gimple_cond_rhs (cond));
1375 gsi = gsi_last_bb (cond_bb);
1376 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1378 /* If we still need inversion, then invert the result of the
1379 condition. */
1380 if (invert)
1382 tree tmp = make_ssa_name (boolean_type_node);
1383 new_stmt = gimple_build_assign (tmp, BIT_XOR_EXPR, cond_val,
1384 boolean_true_node);
1385 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1386 cond_val = tmp;
1389 /* Get the condition in the right type so that we can perform
1390 logical and arithmetic operations on it. */
1391 tree cond_val_converted = make_ssa_name (TREE_TYPE (rhs));
1392 new_stmt = gimple_build_assign (cond_val_converted, NOP_EXPR, cond_val);
1393 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1395 tree neg_cond_val_converted = make_ssa_name (TREE_TYPE (rhs));
1396 new_stmt = gimple_build_assign (neg_cond_val_converted, NEGATE_EXPR,
1397 cond_val_converted);
1398 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1400 tree tmp = make_ssa_name (TREE_TYPE (rhs));
1401 new_stmt = gimple_build_assign (tmp, BIT_XOR_EXPR, rhs,
1402 neg_cond_val_converted);
1403 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1405 tree new_lhs = make_ssa_name (TREE_TYPE (rhs));
1406 new_stmt = gimple_build_assign (new_lhs, PLUS_EXPR, tmp, cond_val_converted);
1407 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1409 replace_phi_edge_with_variable (cond_bb, e1, phi, new_lhs);
1411 /* Note that we optimized this PHI. */
1412 return true;
1415 /* Auxiliary functions to determine the set of memory accesses which
1416 can't trap because they are preceded by accesses to the same memory
1417 portion. We do that for MEM_REFs, so we only need to track
1418 the SSA_NAME of the pointer indirectly referenced. The algorithm
1419 simply is a walk over all instructions in dominator order. When
1420 we see an MEM_REF we determine if we've already seen a same
1421 ref anywhere up to the root of the dominator tree. If we do the
1422 current access can't trap. If we don't see any dominating access
1423 the current access might trap, but might also make later accesses
1424 non-trapping, so we remember it. We need to be careful with loads
1425 or stores, for instance a load might not trap, while a store would,
1426 so if we see a dominating read access this doesn't mean that a later
1427 write access would not trap. Hence we also need to differentiate the
1428 type of access(es) seen.
1430 ??? We currently are very conservative and assume that a load might
1431 trap even if a store doesn't (write-only memory). This probably is
1432 overly conservative. */
1434 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1435 through it was seen, which would constitute a no-trap region for
1436 same accesses. */
1437 struct name_to_bb
1439 unsigned int ssa_name_ver;
1440 unsigned int phase;
1441 bool store;
1442 HOST_WIDE_INT offset, size;
1443 basic_block bb;
1446 /* Hashtable helpers. */
1448 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1450 typedef name_to_bb value_type;
1451 typedef name_to_bb compare_type;
1452 static inline hashval_t hash (const value_type *);
1453 static inline bool equal (const value_type *, const compare_type *);
1456 /* Used for quick clearing of the hash-table when we see calls.
1457 Hash entries with phase < nt_call_phase are invalid. */
1458 static unsigned int nt_call_phase;
1460 /* The hash function. */
1462 inline hashval_t
1463 ssa_names_hasher::hash (const value_type *n)
1465 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1466 ^ (n->offset << 6) ^ (n->size << 3);
1469 /* The equality function of *P1 and *P2. */
1471 inline bool
1472 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1474 return n1->ssa_name_ver == n2->ssa_name_ver
1475 && n1->store == n2->store
1476 && n1->offset == n2->offset
1477 && n1->size == n2->size;
1480 class nontrapping_dom_walker : public dom_walker
1482 public:
1483 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1484 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1486 virtual void before_dom_children (basic_block);
1487 virtual void after_dom_children (basic_block);
1489 private:
1491 /* We see the expression EXP in basic block BB. If it's an interesting
1492 expression (an MEM_REF through an SSA_NAME) possibly insert the
1493 expression into the set NONTRAP or the hash table of seen expressions.
1494 STORE is true if this expression is on the LHS, otherwise it's on
1495 the RHS. */
1496 void add_or_mark_expr (basic_block, tree, bool);
1498 hash_set<tree> *m_nontrapping;
1500 /* The hash table for remembering what we've seen. */
1501 hash_table<ssa_names_hasher> m_seen_ssa_names;
1504 /* Called by walk_dominator_tree, when entering the block BB. */
1505 void
1506 nontrapping_dom_walker::before_dom_children (basic_block bb)
1508 edge e;
1509 edge_iterator ei;
1510 gimple_stmt_iterator gsi;
1512 /* If we haven't seen all our predecessors, clear the hash-table. */
1513 FOR_EACH_EDGE (e, ei, bb->preds)
1514 if ((((size_t)e->src->aux) & 2) == 0)
1516 nt_call_phase++;
1517 break;
1520 /* Mark this BB as being on the path to dominator root and as visited. */
1521 bb->aux = (void*)(1 | 2);
1523 /* And walk the statements in order. */
1524 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1526 gimple stmt = gsi_stmt (gsi);
1528 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1529 nt_call_phase++;
1530 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1532 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1533 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1538 /* Called by walk_dominator_tree, when basic block BB is exited. */
1539 void
1540 nontrapping_dom_walker::after_dom_children (basic_block bb)
1542 /* This BB isn't on the path to dominator root anymore. */
1543 bb->aux = (void*)2;
1546 /* We see the expression EXP in basic block BB. If it's an interesting
1547 expression (an MEM_REF through an SSA_NAME) possibly insert the
1548 expression into the set NONTRAP or the hash table of seen expressions.
1549 STORE is true if this expression is on the LHS, otherwise it's on
1550 the RHS. */
1551 void
1552 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1554 HOST_WIDE_INT size;
1556 if (TREE_CODE (exp) == MEM_REF
1557 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1558 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1559 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1561 tree name = TREE_OPERAND (exp, 0);
1562 struct name_to_bb map;
1563 name_to_bb **slot;
1564 struct name_to_bb *n2bb;
1565 basic_block found_bb = 0;
1567 /* Try to find the last seen MEM_REF through the same
1568 SSA_NAME, which can trap. */
1569 map.ssa_name_ver = SSA_NAME_VERSION (name);
1570 map.phase = 0;
1571 map.bb = 0;
1572 map.store = store;
1573 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1574 map.size = size;
1576 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1577 n2bb = *slot;
1578 if (n2bb && n2bb->phase >= nt_call_phase)
1579 found_bb = n2bb->bb;
1581 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1582 (it's in a basic block on the path from us to the dominator root)
1583 then we can't trap. */
1584 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1586 m_nontrapping->add (exp);
1588 else
1590 /* EXP might trap, so insert it into the hash table. */
1591 if (n2bb)
1593 n2bb->phase = nt_call_phase;
1594 n2bb->bb = bb;
1596 else
1598 n2bb = XNEW (struct name_to_bb);
1599 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1600 n2bb->phase = nt_call_phase;
1601 n2bb->bb = bb;
1602 n2bb->store = store;
1603 n2bb->offset = map.offset;
1604 n2bb->size = size;
1605 *slot = n2bb;
1611 /* This is the entry point of gathering non trapping memory accesses.
1612 It will do a dominator walk over the whole function, and it will
1613 make use of the bb->aux pointers. It returns a set of trees
1614 (the MEM_REFs itself) which can't trap. */
1615 static hash_set<tree> *
1616 get_non_trapping (void)
1618 nt_call_phase = 0;
1619 hash_set<tree> *nontrap = new hash_set<tree>;
1620 /* We're going to do a dominator walk, so ensure that we have
1621 dominance information. */
1622 calculate_dominance_info (CDI_DOMINATORS);
1624 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1625 .walk (cfun->cfg->x_entry_block_ptr);
1627 clear_aux_for_blocks ();
1628 return nontrap;
1631 /* Do the main work of conditional store replacement. We already know
1632 that the recognized pattern looks like so:
1634 split:
1635 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1636 MIDDLE_BB:
1637 something
1638 fallthrough (edge E0)
1639 JOIN_BB:
1640 some more
1642 We check that MIDDLE_BB contains only one store, that that store
1643 doesn't trap (not via NOTRAP, but via checking if an access to the same
1644 memory location dominates us) and that the store has a "simple" RHS. */
1646 static bool
1647 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1648 edge e0, edge e1, hash_set<tree> *nontrap)
1650 gimple assign = last_and_only_stmt (middle_bb);
1651 tree lhs, rhs, name, name2;
1652 gphi *newphi;
1653 gassign *new_stmt;
1654 gimple_stmt_iterator gsi;
1655 source_location locus;
1657 /* Check if middle_bb contains of only one store. */
1658 if (!assign
1659 || !gimple_assign_single_p (assign)
1660 || gimple_has_volatile_ops (assign))
1661 return false;
1663 locus = gimple_location (assign);
1664 lhs = gimple_assign_lhs (assign);
1665 rhs = gimple_assign_rhs1 (assign);
1666 if (TREE_CODE (lhs) != MEM_REF
1667 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1668 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1669 return false;
1671 /* Prove that we can move the store down. We could also check
1672 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1673 whose value is not available readily, which we want to avoid. */
1674 if (!nontrap->contains (lhs))
1675 return false;
1677 /* Now we've checked the constraints, so do the transformation:
1678 1) Remove the single store. */
1679 gsi = gsi_for_stmt (assign);
1680 unlink_stmt_vdef (assign);
1681 gsi_remove (&gsi, true);
1682 release_defs (assign);
1684 /* 2) Insert a load from the memory of the store to the temporary
1685 on the edge which did not contain the store. */
1686 lhs = unshare_expr (lhs);
1687 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1688 new_stmt = gimple_build_assign (name, lhs);
1689 gimple_set_location (new_stmt, locus);
1690 gsi_insert_on_edge (e1, new_stmt);
1692 /* 3) Create a PHI node at the join block, with one argument
1693 holding the old RHS, and the other holding the temporary
1694 where we stored the old memory contents. */
1695 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1696 newphi = create_phi_node (name2, join_bb);
1697 add_phi_arg (newphi, rhs, e0, locus);
1698 add_phi_arg (newphi, name, e1, locus);
1700 lhs = unshare_expr (lhs);
1701 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1703 /* 4) Insert that PHI node. */
1704 gsi = gsi_after_labels (join_bb);
1705 if (gsi_end_p (gsi))
1707 gsi = gsi_last_bb (join_bb);
1708 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1710 else
1711 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1713 return true;
1716 /* Do the main work of conditional store replacement. */
1718 static bool
1719 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1720 basic_block join_bb, gimple then_assign,
1721 gimple else_assign)
1723 tree lhs_base, lhs, then_rhs, else_rhs, name;
1724 source_location then_locus, else_locus;
1725 gimple_stmt_iterator gsi;
1726 gphi *newphi;
1727 gassign *new_stmt;
1729 if (then_assign == NULL
1730 || !gimple_assign_single_p (then_assign)
1731 || gimple_clobber_p (then_assign)
1732 || gimple_has_volatile_ops (then_assign)
1733 || else_assign == NULL
1734 || !gimple_assign_single_p (else_assign)
1735 || gimple_clobber_p (else_assign)
1736 || gimple_has_volatile_ops (else_assign))
1737 return false;
1739 lhs = gimple_assign_lhs (then_assign);
1740 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1741 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1742 return false;
1744 lhs_base = get_base_address (lhs);
1745 if (lhs_base == NULL_TREE
1746 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1747 return false;
1749 then_rhs = gimple_assign_rhs1 (then_assign);
1750 else_rhs = gimple_assign_rhs1 (else_assign);
1751 then_locus = gimple_location (then_assign);
1752 else_locus = gimple_location (else_assign);
1754 /* Now we've checked the constraints, so do the transformation:
1755 1) Remove the stores. */
1756 gsi = gsi_for_stmt (then_assign);
1757 unlink_stmt_vdef (then_assign);
1758 gsi_remove (&gsi, true);
1759 release_defs (then_assign);
1761 gsi = gsi_for_stmt (else_assign);
1762 unlink_stmt_vdef (else_assign);
1763 gsi_remove (&gsi, true);
1764 release_defs (else_assign);
1766 /* 2) Create a PHI node at the join block, with one argument
1767 holding the old RHS, and the other holding the temporary
1768 where we stored the old memory contents. */
1769 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1770 newphi = create_phi_node (name, join_bb);
1771 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1772 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1774 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1776 /* 3) Insert that PHI node. */
1777 gsi = gsi_after_labels (join_bb);
1778 if (gsi_end_p (gsi))
1780 gsi = gsi_last_bb (join_bb);
1781 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1783 else
1784 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1786 return true;
1789 /* Conditional store replacement. We already know
1790 that the recognized pattern looks like so:
1792 split:
1793 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1794 THEN_BB:
1796 X = Y;
1798 goto JOIN_BB;
1799 ELSE_BB:
1801 X = Z;
1803 fallthrough (edge E0)
1804 JOIN_BB:
1805 some more
1807 We check that it is safe to sink the store to JOIN_BB by verifying that
1808 there are no read-after-write or write-after-write dependencies in
1809 THEN_BB and ELSE_BB. */
1811 static bool
1812 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1813 basic_block join_bb)
1815 gimple then_assign = last_and_only_stmt (then_bb);
1816 gimple else_assign = last_and_only_stmt (else_bb);
1817 vec<data_reference_p> then_datarefs, else_datarefs;
1818 vec<ddr_p> then_ddrs, else_ddrs;
1819 gimple then_store, else_store;
1820 bool found, ok = false, res;
1821 struct data_dependence_relation *ddr;
1822 data_reference_p then_dr, else_dr;
1823 int i, j;
1824 tree then_lhs, else_lhs;
1825 basic_block blocks[3];
1827 if (MAX_STORES_TO_SINK == 0)
1828 return false;
1830 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1831 if (then_assign && else_assign)
1832 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1833 then_assign, else_assign);
1835 /* Find data references. */
1836 then_datarefs.create (1);
1837 else_datarefs.create (1);
1838 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1839 == chrec_dont_know)
1840 || !then_datarefs.length ()
1841 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1842 == chrec_dont_know)
1843 || !else_datarefs.length ())
1845 free_data_refs (then_datarefs);
1846 free_data_refs (else_datarefs);
1847 return false;
1850 /* Find pairs of stores with equal LHS. */
1851 auto_vec<gimple, 1> then_stores, else_stores;
1852 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1854 if (DR_IS_READ (then_dr))
1855 continue;
1857 then_store = DR_STMT (then_dr);
1858 then_lhs = gimple_get_lhs (then_store);
1859 if (then_lhs == NULL_TREE)
1860 continue;
1861 found = false;
1863 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1865 if (DR_IS_READ (else_dr))
1866 continue;
1868 else_store = DR_STMT (else_dr);
1869 else_lhs = gimple_get_lhs (else_store);
1870 if (else_lhs == NULL_TREE)
1871 continue;
1873 if (operand_equal_p (then_lhs, else_lhs, 0))
1875 found = true;
1876 break;
1880 if (!found)
1881 continue;
1883 then_stores.safe_push (then_store);
1884 else_stores.safe_push (else_store);
1887 /* No pairs of stores found. */
1888 if (!then_stores.length ()
1889 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1891 free_data_refs (then_datarefs);
1892 free_data_refs (else_datarefs);
1893 return false;
1896 /* Compute and check data dependencies in both basic blocks. */
1897 then_ddrs.create (1);
1898 else_ddrs.create (1);
1899 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1900 vNULL, false)
1901 || !compute_all_dependences (else_datarefs, &else_ddrs,
1902 vNULL, false))
1904 free_dependence_relations (then_ddrs);
1905 free_dependence_relations (else_ddrs);
1906 free_data_refs (then_datarefs);
1907 free_data_refs (else_datarefs);
1908 return false;
1910 blocks[0] = then_bb;
1911 blocks[1] = else_bb;
1912 blocks[2] = join_bb;
1913 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1915 /* Check that there are no read-after-write or write-after-write dependencies
1916 in THEN_BB. */
1917 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1919 struct data_reference *dra = DDR_A (ddr);
1920 struct data_reference *drb = DDR_B (ddr);
1922 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1923 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1924 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1925 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1926 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1927 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1929 free_dependence_relations (then_ddrs);
1930 free_dependence_relations (else_ddrs);
1931 free_data_refs (then_datarefs);
1932 free_data_refs (else_datarefs);
1933 return false;
1937 /* Check that there are no read-after-write or write-after-write dependencies
1938 in ELSE_BB. */
1939 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1941 struct data_reference *dra = DDR_A (ddr);
1942 struct data_reference *drb = DDR_B (ddr);
1944 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1945 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1946 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1947 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1948 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1949 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1951 free_dependence_relations (then_ddrs);
1952 free_dependence_relations (else_ddrs);
1953 free_data_refs (then_datarefs);
1954 free_data_refs (else_datarefs);
1955 return false;
1959 /* Sink stores with same LHS. */
1960 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1962 else_store = else_stores[i];
1963 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1964 then_store, else_store);
1965 ok = ok || res;
1968 free_dependence_relations (then_ddrs);
1969 free_dependence_relations (else_ddrs);
1970 free_data_refs (then_datarefs);
1971 free_data_refs (else_datarefs);
1973 return ok;
1976 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1978 static bool
1979 local_mem_dependence (gimple stmt, basic_block bb)
1981 tree vuse = gimple_vuse (stmt);
1982 gimple def;
1984 if (!vuse)
1985 return false;
1987 def = SSA_NAME_DEF_STMT (vuse);
1988 return (def && gimple_bb (def) == bb);
1991 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1992 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1993 and BB3 rejoins control flow following BB1 and BB2, look for
1994 opportunities to hoist loads as follows. If BB3 contains a PHI of
1995 two loads, one each occurring in BB1 and BB2, and the loads are
1996 provably of adjacent fields in the same structure, then move both
1997 loads into BB0. Of course this can only be done if there are no
1998 dependencies preventing such motion.
2000 One of the hoisted loads will always be speculative, so the
2001 transformation is currently conservative:
2003 - The fields must be strictly adjacent.
2004 - The two fields must occupy a single memory block that is
2005 guaranteed to not cross a page boundary.
2007 The last is difficult to prove, as such memory blocks should be
2008 aligned on the minimum of the stack alignment boundary and the
2009 alignment guaranteed by heap allocation interfaces. Thus we rely
2010 on a parameter for the alignment value.
2012 Provided a good value is used for the last case, the first
2013 restriction could possibly be relaxed. */
2015 static void
2016 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2017 basic_block bb2, basic_block bb3)
2019 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2020 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2021 gphi_iterator gsi;
2023 /* Walk the phis in bb3 looking for an opportunity. We are looking
2024 for phis of two SSA names, one each of which is defined in bb1 and
2025 bb2. */
2026 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2028 gphi *phi_stmt = gsi.phi ();
2029 gimple def1, def2, defswap;
2030 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
2031 tree tree_offset1, tree_offset2, tree_size2, next;
2032 int offset1, offset2, size2;
2033 unsigned align1;
2034 gimple_stmt_iterator gsi2;
2035 basic_block bb_for_def1, bb_for_def2;
2037 if (gimple_phi_num_args (phi_stmt) != 2
2038 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2039 continue;
2041 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2042 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2044 if (TREE_CODE (arg1) != SSA_NAME
2045 || TREE_CODE (arg2) != SSA_NAME
2046 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2047 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2048 continue;
2050 def1 = SSA_NAME_DEF_STMT (arg1);
2051 def2 = SSA_NAME_DEF_STMT (arg2);
2053 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2054 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2055 continue;
2057 /* Check the mode of the arguments to be sure a conditional move
2058 can be generated for it. */
2059 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2060 == CODE_FOR_nothing)
2061 continue;
2063 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2064 if (!gimple_assign_single_p (def1)
2065 || !gimple_assign_single_p (def2)
2066 || gimple_has_volatile_ops (def1)
2067 || gimple_has_volatile_ops (def2))
2068 continue;
2070 ref1 = gimple_assign_rhs1 (def1);
2071 ref2 = gimple_assign_rhs1 (def2);
2073 if (TREE_CODE (ref1) != COMPONENT_REF
2074 || TREE_CODE (ref2) != COMPONENT_REF)
2075 continue;
2077 /* The zeroth operand of the two component references must be
2078 identical. It is not sufficient to compare get_base_address of
2079 the two references, because this could allow for different
2080 elements of the same array in the two trees. It is not safe to
2081 assume that the existence of one array element implies the
2082 existence of a different one. */
2083 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2084 continue;
2086 field1 = TREE_OPERAND (ref1, 1);
2087 field2 = TREE_OPERAND (ref2, 1);
2089 /* Check for field adjacency, and ensure field1 comes first. */
2090 for (next = DECL_CHAIN (field1);
2091 next && TREE_CODE (next) != FIELD_DECL;
2092 next = DECL_CHAIN (next))
2095 if (next != field2)
2097 for (next = DECL_CHAIN (field2);
2098 next && TREE_CODE (next) != FIELD_DECL;
2099 next = DECL_CHAIN (next))
2102 if (next != field1)
2103 continue;
2105 fieldswap = field1;
2106 field1 = field2;
2107 field2 = fieldswap;
2108 defswap = def1;
2109 def1 = def2;
2110 def2 = defswap;
2113 bb_for_def1 = gimple_bb (def1);
2114 bb_for_def2 = gimple_bb (def2);
2116 /* Check for proper alignment of the first field. */
2117 tree_offset1 = bit_position (field1);
2118 tree_offset2 = bit_position (field2);
2119 tree_size2 = DECL_SIZE (field2);
2121 if (!tree_fits_uhwi_p (tree_offset1)
2122 || !tree_fits_uhwi_p (tree_offset2)
2123 || !tree_fits_uhwi_p (tree_size2))
2124 continue;
2126 offset1 = tree_to_uhwi (tree_offset1);
2127 offset2 = tree_to_uhwi (tree_offset2);
2128 size2 = tree_to_uhwi (tree_size2);
2129 align1 = DECL_ALIGN (field1) % param_align_bits;
2131 if (offset1 % BITS_PER_UNIT != 0)
2132 continue;
2134 /* For profitability, the two field references should fit within
2135 a single cache line. */
2136 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2137 continue;
2139 /* The two expressions cannot be dependent upon vdefs defined
2140 in bb1/bb2. */
2141 if (local_mem_dependence (def1, bb_for_def1)
2142 || local_mem_dependence (def2, bb_for_def2))
2143 continue;
2145 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2146 bb0. We hoist the first one first so that a cache miss is handled
2147 efficiently regardless of hardware cache-fill policy. */
2148 gsi2 = gsi_for_stmt (def1);
2149 gsi_move_to_bb_end (&gsi2, bb0);
2150 gsi2 = gsi_for_stmt (def2);
2151 gsi_move_to_bb_end (&gsi2, bb0);
2153 if (dump_file && (dump_flags & TDF_DETAILS))
2155 fprintf (dump_file,
2156 "\nHoisting adjacent loads from %d and %d into %d: \n",
2157 bb_for_def1->index, bb_for_def2->index, bb0->index);
2158 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2159 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2164 /* Determine whether we should attempt to hoist adjacent loads out of
2165 diamond patterns in pass_phiopt. Always hoist loads if
2166 -fhoist-adjacent-loads is specified and the target machine has
2167 both a conditional move instruction and a defined cache line size. */
2169 static bool
2170 gate_hoist_loads (void)
2172 return (flag_hoist_adjacent_loads == 1
2173 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2174 && HAVE_conditional_move);
2177 /* This pass tries to replaces an if-then-else block with an
2178 assignment. We have four kinds of transformations. Some of these
2179 transformations are also performed by the ifcvt RTL optimizer.
2181 Conditional Replacement
2182 -----------------------
2184 This transformation, implemented in conditional_replacement,
2185 replaces
2187 bb0:
2188 if (cond) goto bb2; else goto bb1;
2189 bb1:
2190 bb2:
2191 x = PHI <0 (bb1), 1 (bb0), ...>;
2193 with
2195 bb0:
2196 x' = cond;
2197 goto bb2;
2198 bb2:
2199 x = PHI <x' (bb0), ...>;
2201 We remove bb1 as it becomes unreachable. This occurs often due to
2202 gimplification of conditionals.
2204 Value Replacement
2205 -----------------
2207 This transformation, implemented in value_replacement, replaces
2209 bb0:
2210 if (a != b) goto bb2; else goto bb1;
2211 bb1:
2212 bb2:
2213 x = PHI <a (bb1), b (bb0), ...>;
2215 with
2217 bb0:
2218 bb2:
2219 x = PHI <b (bb0), ...>;
2221 This opportunity can sometimes occur as a result of other
2222 optimizations.
2225 Another case caught by value replacement looks like this:
2227 bb0:
2228 t1 = a == CONST;
2229 t2 = b > c;
2230 t3 = t1 & t2;
2231 if (t3 != 0) goto bb1; else goto bb2;
2232 bb1:
2233 bb2:
2234 x = PHI (CONST, a)
2236 Gets replaced with:
2237 bb0:
2238 bb2:
2239 t1 = a == CONST;
2240 t2 = b > c;
2241 t3 = t1 & t2;
2242 x = a;
2244 ABS Replacement
2245 ---------------
2247 This transformation, implemented in abs_replacement, replaces
2249 bb0:
2250 if (a >= 0) goto bb2; else goto bb1;
2251 bb1:
2252 x = -a;
2253 bb2:
2254 x = PHI <x (bb1), a (bb0), ...>;
2256 with
2258 bb0:
2259 x' = ABS_EXPR< a >;
2260 bb2:
2261 x = PHI <x' (bb0), ...>;
2263 MIN/MAX Replacement
2264 -------------------
2266 This transformation, minmax_replacement replaces
2268 bb0:
2269 if (a <= b) goto bb2; else goto bb1;
2270 bb1:
2271 bb2:
2272 x = PHI <b (bb1), a (bb0), ...>;
2274 with
2276 bb0:
2277 x' = MIN_EXPR (a, b)
2278 bb2:
2279 x = PHI <x' (bb0), ...>;
2281 A similar transformation is done for MAX_EXPR.
2284 This pass also performs a fifth transformation of a slightly different
2285 flavor.
2287 Adjacent Load Hoisting
2288 ----------------------
2290 This transformation replaces
2292 bb0:
2293 if (...) goto bb2; else goto bb1;
2294 bb1:
2295 x1 = (<expr>).field1;
2296 goto bb3;
2297 bb2:
2298 x2 = (<expr>).field2;
2299 bb3:
2300 # x = PHI <x1, x2>;
2302 with
2304 bb0:
2305 x1 = (<expr>).field1;
2306 x2 = (<expr>).field2;
2307 if (...) goto bb2; else goto bb1;
2308 bb1:
2309 goto bb3;
2310 bb2:
2311 bb3:
2312 # x = PHI <x1, x2>;
2314 The purpose of this transformation is to enable generation of conditional
2315 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2316 the loads is speculative, the transformation is restricted to very
2317 specific cases to avoid introducing a page fault. We are looking for
2318 the common idiom:
2320 if (...)
2321 x = y->left;
2322 else
2323 x = y->right;
2325 where left and right are typically adjacent pointers in a tree structure. */
2327 namespace {
2329 const pass_data pass_data_phiopt =
2331 GIMPLE_PASS, /* type */
2332 "phiopt", /* name */
2333 OPTGROUP_NONE, /* optinfo_flags */
2334 TV_TREE_PHIOPT, /* tv_id */
2335 ( PROP_cfg | PROP_ssa ), /* properties_required */
2336 0, /* properties_provided */
2337 0, /* properties_destroyed */
2338 0, /* todo_flags_start */
2339 0, /* todo_flags_finish */
2342 class pass_phiopt : public gimple_opt_pass
2344 public:
2345 pass_phiopt (gcc::context *ctxt)
2346 : gimple_opt_pass (pass_data_phiopt, ctxt)
2349 /* opt_pass methods: */
2350 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2351 virtual bool gate (function *) { return flag_ssa_phiopt; }
2352 virtual unsigned int execute (function *)
2354 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2357 }; // class pass_phiopt
2359 } // anon namespace
2361 gimple_opt_pass *
2362 make_pass_phiopt (gcc::context *ctxt)
2364 return new pass_phiopt (ctxt);
2367 namespace {
2369 const pass_data pass_data_cselim =
2371 GIMPLE_PASS, /* type */
2372 "cselim", /* name */
2373 OPTGROUP_NONE, /* optinfo_flags */
2374 TV_TREE_PHIOPT, /* tv_id */
2375 ( PROP_cfg | PROP_ssa ), /* properties_required */
2376 0, /* properties_provided */
2377 0, /* properties_destroyed */
2378 0, /* todo_flags_start */
2379 0, /* todo_flags_finish */
2382 class pass_cselim : public gimple_opt_pass
2384 public:
2385 pass_cselim (gcc::context *ctxt)
2386 : gimple_opt_pass (pass_data_cselim, ctxt)
2389 /* opt_pass methods: */
2390 virtual bool gate (function *) { return flag_tree_cselim; }
2391 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2393 }; // class pass_cselim
2395 } // anon namespace
2397 gimple_opt_pass *
2398 make_pass_cselim (gcc::context *ctxt)
2400 return new pass_cselim (ctxt);