1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "insn-codes.h"
29 #include "tree-pass.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block
, basic_block
,
51 edge
, edge
, gphi
*, tree
, tree
);
52 static gphi
*factor_out_conditional_conversion (edge
, edge
, gphi
*, tree
, tree
,
54 static int value_replacement (basic_block
, basic_block
,
55 edge
, edge
, gimple
*, tree
, tree
);
56 static bool minmax_replacement (basic_block
, basic_block
,
57 edge
, edge
, gimple
*, tree
, tree
);
58 static bool abs_replacement (basic_block
, basic_block
,
59 edge
, edge
, gimple
*, tree
, tree
);
60 static bool cond_store_replacement (basic_block
, basic_block
, edge
, edge
,
62 static bool cond_if_else_store_replacement (basic_block
, basic_block
, basic_block
);
63 static hash_set
<tree
> * get_non_trapping ();
64 static void replace_phi_edge_with_variable (basic_block
, edge
, gimple
*, tree
);
65 static void hoist_adjacent_loads (basic_block
, basic_block
,
66 basic_block
, basic_block
);
67 static bool gate_hoist_loads (void);
69 /* This pass tries to transform conditional stores into unconditional
70 ones, enabling further simplifications with the simpler then and else
71 blocks. In particular it replaces this:
74 if (cond) goto bb2; else goto bb1;
82 if (cond) goto bb1; else goto bb2;
86 condtmp = PHI <RHS, condtmp'>
89 This transformation can only be done under several constraints,
90 documented below. It also replaces:
93 if (cond) goto bb2; else goto bb1;
104 if (cond) goto bb3; else goto bb1;
107 condtmp = PHI <RHS1, RHS2>
111 tree_ssa_cs_elim (void)
114 /* ??? We are not interested in loop related info, but the following
115 will create it, ICEing as we didn't init loops with pre-headers.
116 An interfacing issue of find_data_references_in_bb. */
117 loop_optimizer_init (LOOPS_NORMAL
);
119 todo
= tree_ssa_phiopt_worker (true, false);
121 loop_optimizer_finalize ();
125 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
128 single_non_singleton_phi_for_edges (gimple_seq seq
, edge e0
, edge e1
)
130 gimple_stmt_iterator i
;
132 if (gimple_seq_singleton_p (seq
))
133 return as_a
<gphi
*> (gsi_stmt (gsi_start (seq
)));
134 for (i
= gsi_start (seq
); !gsi_end_p (i
); gsi_next (&i
))
136 gphi
*p
= as_a
<gphi
*> (gsi_stmt (i
));
137 /* If the PHI arguments are equal then we can skip this PHI. */
138 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p
, e0
->dest_idx
),
139 gimple_phi_arg_def (p
, e1
->dest_idx
)))
142 /* If we already have a PHI that has the two edge arguments are
143 different, then return it is not a singleton for these PHIs. */
152 /* The core routine of conditional store replacement and normal
153 phi optimizations. Both share much of the infrastructure in how
154 to match applicable basic block patterns. DO_STORE_ELIM is true
155 when we want to do conditional store replacement, false otherwise.
156 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
157 of diamond control flow patterns, false otherwise. */
159 tree_ssa_phiopt_worker (bool do_store_elim
, bool do_hoist_loads
)
162 basic_block
*bb_order
;
164 bool cfgchanged
= false;
165 hash_set
<tree
> *nontrap
= 0;
168 /* Calculate the set of non-trapping memory accesses. */
169 nontrap
= get_non_trapping ();
171 /* Search every basic block for COND_EXPR we may be able to optimize.
173 We walk the blocks in order that guarantees that a block with
174 a single predecessor is processed before the predecessor.
175 This ensures that we collapse inner ifs before visiting the
176 outer ones, and also that we do not try to visit a removed
178 bb_order
= single_pred_before_succ_order ();
179 n
= n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
;
181 for (i
= 0; i
< n
; i
++)
185 basic_block bb1
, bb2
;
191 cond_stmt
= last_stmt (bb
);
192 /* Check to see if the last statement is a GIMPLE_COND. */
194 || gimple_code (cond_stmt
) != GIMPLE_COND
)
197 e1
= EDGE_SUCC (bb
, 0);
199 e2
= EDGE_SUCC (bb
, 1);
202 /* We cannot do the optimization on abnormal edges. */
203 if ((e1
->flags
& EDGE_ABNORMAL
) != 0
204 || (e2
->flags
& EDGE_ABNORMAL
) != 0)
207 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
208 if (EDGE_COUNT (bb1
->succs
) == 0
210 || EDGE_COUNT (bb2
->succs
) == 0)
213 /* Find the bb which is the fall through to the other. */
214 if (EDGE_SUCC (bb1
, 0)->dest
== bb2
)
216 else if (EDGE_SUCC (bb2
, 0)->dest
== bb1
)
218 std::swap (bb1
, bb2
);
221 else if (do_store_elim
222 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
224 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
226 if (!single_succ_p (bb1
)
227 || (EDGE_SUCC (bb1
, 0)->flags
& EDGE_FALLTHRU
) == 0
228 || !single_succ_p (bb2
)
229 || (EDGE_SUCC (bb2
, 0)->flags
& EDGE_FALLTHRU
) == 0
230 || EDGE_COUNT (bb3
->preds
) != 2)
232 if (cond_if_else_store_replacement (bb1
, bb2
, bb3
))
236 else if (do_hoist_loads
237 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
239 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
241 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt
)))
242 && single_succ_p (bb1
)
243 && single_succ_p (bb2
)
244 && single_pred_p (bb1
)
245 && single_pred_p (bb2
)
246 && EDGE_COUNT (bb
->succs
) == 2
247 && EDGE_COUNT (bb3
->preds
) == 2
248 /* If one edge or the other is dominant, a conditional move
249 is likely to perform worse than the well-predicted branch. */
250 && !predictable_edge_p (EDGE_SUCC (bb
, 0))
251 && !predictable_edge_p (EDGE_SUCC (bb
, 1)))
252 hoist_adjacent_loads (bb
, bb1
, bb2
, bb3
);
258 e1
= EDGE_SUCC (bb1
, 0);
260 /* Make sure that bb1 is just a fall through. */
261 if (!single_succ_p (bb1
)
262 || (e1
->flags
& EDGE_FALLTHRU
) == 0)
265 /* Also make sure that bb1 only have one predecessor and that it
267 if (!single_pred_p (bb1
)
268 || single_pred (bb1
) != bb
)
273 /* bb1 is the middle block, bb2 the join block, bb the split block,
274 e1 the fallthrough edge from bb1 to bb2. We can't do the
275 optimization if the join block has more than two predecessors. */
276 if (EDGE_COUNT (bb2
->preds
) > 2)
278 if (cond_store_replacement (bb1
, bb2
, e1
, e2
, nontrap
))
283 gimple_seq phis
= phi_nodes (bb2
);
284 gimple_stmt_iterator gsi
;
285 bool candorest
= true;
287 /* Value replacement can work with more than one PHI
288 so try that first. */
289 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
291 phi
= as_a
<gphi
*> (gsi_stmt (gsi
));
292 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
293 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
294 if (value_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
) == 2)
305 phi
= single_non_singleton_phi_for_edges (phis
, e1
, e2
);
309 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
310 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
312 /* Something is wrong if we cannot find the arguments in the PHI
314 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
316 gphi
*newphi
= factor_out_conditional_conversion (e1
, e2
, phi
,
322 /* factor_out_conditional_conversion may create a new PHI in
323 BB2 and eliminate an existing PHI in BB2. Recompute values
324 that may be affected by that change. */
325 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
326 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
327 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
330 /* Do the replacement of conditional if it can be done. */
331 if (conditional_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
333 else if (abs_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
335 else if (minmax_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
344 /* If the CFG has changed, we should cleanup the CFG. */
345 if (cfgchanged
&& do_store_elim
)
347 /* In cond-store replacement we have added some loads on edges
348 and new VOPS (as we moved the store, and created a load). */
349 gsi_commit_edge_inserts ();
350 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
353 return TODO_cleanup_cfg
;
357 /* Replace PHI node element whose edge is E in block BB with variable NEW.
358 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
359 is known to have two edges, one of which must reach BB). */
362 replace_phi_edge_with_variable (basic_block cond_block
,
363 edge e
, gimple
*phi
, tree new_tree
)
365 basic_block bb
= gimple_bb (phi
);
366 basic_block block_to_remove
;
367 gimple_stmt_iterator gsi
;
369 /* Change the PHI argument to new. */
370 SET_USE (PHI_ARG_DEF_PTR (phi
, e
->dest_idx
), new_tree
);
372 /* Remove the empty basic block. */
373 if (EDGE_SUCC (cond_block
, 0)->dest
== bb
)
375 EDGE_SUCC (cond_block
, 0)->flags
|= EDGE_FALLTHRU
;
376 EDGE_SUCC (cond_block
, 0)->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
377 EDGE_SUCC (cond_block
, 0)->probability
= profile_probability::always ();
379 block_to_remove
= EDGE_SUCC (cond_block
, 1)->dest
;
383 EDGE_SUCC (cond_block
, 1)->flags
|= EDGE_FALLTHRU
;
384 EDGE_SUCC (cond_block
, 1)->flags
385 &= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
386 EDGE_SUCC (cond_block
, 1)->probability
= profile_probability::always ();
388 block_to_remove
= EDGE_SUCC (cond_block
, 0)->dest
;
390 delete_basic_block (block_to_remove
);
392 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393 gsi
= gsi_last_bb (cond_block
);
394 gsi_remove (&gsi
, true);
396 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
398 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
403 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405 to the result of PHI stmt. COND_STMT is the controlling predicate.
406 Return the newly-created PHI, if any. */
409 factor_out_conditional_conversion (edge e0
, edge e1
, gphi
*phi
,
410 tree arg0
, tree arg1
, gimple
*cond_stmt
)
412 gimple
*arg0_def_stmt
= NULL
, *arg1_def_stmt
= NULL
, *new_stmt
;
413 tree new_arg0
= NULL_TREE
, new_arg1
= NULL_TREE
;
416 gimple_stmt_iterator gsi
, gsi_for_def
;
417 source_location locus
= gimple_location (phi
);
418 enum tree_code convert_code
;
420 /* Handle only PHI statements with two arguments. TODO: If all
421 other arguments to PHI are INTEGER_CST or if their defining
422 statement have the same unary operation, we can handle more
423 than two arguments too. */
424 if (gimple_phi_num_args (phi
) != 2)
427 /* First canonicalize to simplify tests. */
428 if (TREE_CODE (arg0
) != SSA_NAME
)
430 std::swap (arg0
, arg1
);
434 if (TREE_CODE (arg0
) != SSA_NAME
435 || (TREE_CODE (arg1
) != SSA_NAME
436 && TREE_CODE (arg1
) != INTEGER_CST
))
439 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
441 arg0_def_stmt
= SSA_NAME_DEF_STMT (arg0
);
442 if (!gimple_assign_cast_p (arg0_def_stmt
))
445 /* Use the RHS as new_arg0. */
446 convert_code
= gimple_assign_rhs_code (arg0_def_stmt
);
447 new_arg0
= gimple_assign_rhs1 (arg0_def_stmt
);
448 if (convert_code
== VIEW_CONVERT_EXPR
)
450 new_arg0
= TREE_OPERAND (new_arg0
, 0);
451 if (!is_gimple_reg_type (TREE_TYPE (new_arg0
)))
455 if (TREE_CODE (arg1
) == SSA_NAME
)
457 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
459 arg1_def_stmt
= SSA_NAME_DEF_STMT (arg1
);
460 if (!is_gimple_assign (arg1_def_stmt
)
461 || gimple_assign_rhs_code (arg1_def_stmt
) != convert_code
)
464 /* Use the RHS as new_arg1. */
465 new_arg1
= gimple_assign_rhs1 (arg1_def_stmt
);
466 if (convert_code
== VIEW_CONVERT_EXPR
)
467 new_arg1
= TREE_OPERAND (new_arg1
, 0);
471 /* If arg1 is an INTEGER_CST, fold it to new type. */
472 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0
))
473 && int_fits_type_p (arg1
, TREE_TYPE (new_arg0
)))
475 if (gimple_assign_cast_p (arg0_def_stmt
))
477 /* For the INTEGER_CST case, we are just moving the
478 conversion from one place to another, which can often
479 hurt as the conversion moves further away from the
480 statement that computes the value. So, perform this
481 only if new_arg0 is an operand of COND_STMT, or
482 if arg0_def_stmt is the only non-debug stmt in
483 its basic block, because then it is possible this
484 could enable further optimizations (minmax replacement
485 etc.). See PR71016. */
486 if (new_arg0
!= gimple_cond_lhs (cond_stmt
)
487 && new_arg0
!= gimple_cond_rhs (cond_stmt
)
488 && gimple_bb (arg0_def_stmt
) == e0
->src
)
490 gsi
= gsi_for_stmt (arg0_def_stmt
);
491 gsi_prev_nondebug (&gsi
);
492 if (!gsi_end_p (gsi
))
494 gsi
= gsi_for_stmt (arg0_def_stmt
);
495 gsi_next_nondebug (&gsi
);
496 if (!gsi_end_p (gsi
))
499 new_arg1
= fold_convert (TREE_TYPE (new_arg0
), arg1
);
508 /* If arg0/arg1 have > 1 use, then this transformation actually increases
509 the number of expressions evaluated at runtime. */
510 if (!has_single_use (arg0
)
511 || (arg1_def_stmt
&& !has_single_use (arg1
)))
514 /* If types of new_arg0 and new_arg1 are different bailout. */
515 if (!types_compatible_p (TREE_TYPE (new_arg0
), TREE_TYPE (new_arg1
)))
518 /* Create a new PHI stmt. */
519 result
= PHI_RESULT (phi
);
520 temp
= make_ssa_name (TREE_TYPE (new_arg0
), NULL
);
521 newphi
= create_phi_node (temp
, gimple_bb (phi
));
523 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
525 fprintf (dump_file
, "PHI ");
526 print_generic_expr (dump_file
, gimple_phi_result (phi
));
528 " changed to factor conversion out from COND_EXPR.\n");
529 fprintf (dump_file
, "New stmt with CAST that defines ");
530 print_generic_expr (dump_file
, result
);
531 fprintf (dump_file
, ".\n");
534 /* Remove the old cast(s) that has single use. */
535 gsi_for_def
= gsi_for_stmt (arg0_def_stmt
);
536 gsi_remove (&gsi_for_def
, true);
537 release_defs (arg0_def_stmt
);
541 gsi_for_def
= gsi_for_stmt (arg1_def_stmt
);
542 gsi_remove (&gsi_for_def
, true);
543 release_defs (arg1_def_stmt
);
546 add_phi_arg (newphi
, new_arg0
, e0
, locus
);
547 add_phi_arg (newphi
, new_arg1
, e1
, locus
);
549 /* Create the conversion stmt and insert it. */
550 if (convert_code
== VIEW_CONVERT_EXPR
)
551 temp
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (result
), temp
);
552 new_stmt
= gimple_build_assign (result
, convert_code
, temp
);
553 gsi
= gsi_after_labels (gimple_bb (phi
));
554 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
556 /* Remove the original PHI stmt. */
557 gsi
= gsi_for_stmt (phi
);
558 gsi_remove (&gsi
, true);
562 /* The function conditional_replacement does the main work of doing the
563 conditional replacement. Return true if the replacement is done.
564 Otherwise return false.
565 BB is the basic block where the replacement is going to be done on. ARG0
566 is argument 0 from PHI. Likewise for ARG1. */
569 conditional_replacement (basic_block cond_bb
, basic_block middle_bb
,
570 edge e0
, edge e1
, gphi
*phi
,
571 tree arg0
, tree arg1
)
577 gimple_stmt_iterator gsi
;
578 edge true_edge
, false_edge
;
579 tree new_var
, new_var2
;
582 /* FIXME: Gimplification of complex type is too hard for now. */
583 /* We aren't prepared to handle vectors either (and it is a question
584 if it would be worthwhile anyway). */
585 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
586 || POINTER_TYPE_P (TREE_TYPE (arg0
)))
587 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
588 || POINTER_TYPE_P (TREE_TYPE (arg1
))))
591 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
592 convert it to the conditional. */
593 if ((integer_zerop (arg0
) && integer_onep (arg1
))
594 || (integer_zerop (arg1
) && integer_onep (arg0
)))
596 else if ((integer_zerop (arg0
) && integer_all_onesp (arg1
))
597 || (integer_zerop (arg1
) && integer_all_onesp (arg0
)))
602 if (!empty_block_p (middle_bb
))
605 /* At this point we know we have a GIMPLE_COND with two successors.
606 One successor is BB, the other successor is an empty block which
607 falls through into BB.
609 There is a single PHI node at the join point (BB) and its arguments
610 are constants (0, 1) or (0, -1).
612 So, given the condition COND, and the two PHI arguments, we can
613 rewrite this PHI into non-branching code:
615 dest = (COND) or dest = COND'
617 We use the condition as-is if the argument associated with the
618 true edge has the value one or the argument associated with the
619 false edge as the value zero. Note that those conditions are not
620 the same since only one of the outgoing edges from the GIMPLE_COND
621 will directly reach BB and thus be associated with an argument. */
623 stmt
= last_stmt (cond_bb
);
624 result
= PHI_RESULT (phi
);
626 /* To handle special cases like floating point comparison, it is easier and
627 less error-prone to build a tree and gimplify it on the fly though it is
629 cond
= fold_build2_loc (gimple_location (stmt
),
630 gimple_cond_code (stmt
), boolean_type_node
,
631 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
633 /* We need to know which is the true edge and which is the false
634 edge so that we know when to invert the condition below. */
635 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
636 if ((e0
== true_edge
&& integer_zerop (arg0
))
637 || (e0
== false_edge
&& !integer_zerop (arg0
))
638 || (e1
== true_edge
&& integer_zerop (arg1
))
639 || (e1
== false_edge
&& !integer_zerop (arg1
)))
640 cond
= fold_build1_loc (gimple_location (stmt
),
641 TRUTH_NOT_EXPR
, TREE_TYPE (cond
), cond
);
645 cond
= fold_convert_loc (gimple_location (stmt
),
646 TREE_TYPE (result
), cond
);
647 cond
= fold_build1_loc (gimple_location (stmt
),
648 NEGATE_EXPR
, TREE_TYPE (cond
), cond
);
651 /* Insert our new statements at the end of conditional block before the
653 gsi
= gsi_for_stmt (stmt
);
654 new_var
= force_gimple_operand_gsi (&gsi
, cond
, true, NULL
, true,
657 if (!useless_type_conversion_p (TREE_TYPE (result
), TREE_TYPE (new_var
)))
659 source_location locus_0
, locus_1
;
661 new_var2
= make_ssa_name (TREE_TYPE (result
));
662 new_stmt
= gimple_build_assign (new_var2
, CONVERT_EXPR
, new_var
);
663 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
666 /* Set the locus to the first argument, unless is doesn't have one. */
667 locus_0
= gimple_phi_arg_location (phi
, 0);
668 locus_1
= gimple_phi_arg_location (phi
, 1);
669 if (locus_0
== UNKNOWN_LOCATION
)
671 gimple_set_location (new_stmt
, locus_0
);
674 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, new_var
);
676 /* Note that we optimized this PHI. */
680 /* Update *ARG which is defined in STMT so that it contains the
681 computed value if that seems profitable. Return true if the
682 statement is made dead by that rewriting. */
685 jump_function_from_stmt (tree
*arg
, gimple
*stmt
)
687 enum tree_code code
= gimple_assign_rhs_code (stmt
);
688 if (code
== ADDR_EXPR
)
690 /* For arg = &p->i transform it to p, if possible. */
691 tree rhs1
= gimple_assign_rhs1 (stmt
);
692 HOST_WIDE_INT offset
;
693 tree tem
= get_addr_base_and_unit_offset (TREE_OPERAND (rhs1
, 0),
696 && TREE_CODE (tem
) == MEM_REF
697 && (mem_ref_offset (tem
) + offset
) == 0)
699 *arg
= TREE_OPERAND (tem
, 0);
703 /* TODO: Much like IPA-CP jump-functions we want to handle constant
704 additions symbolically here, and we'd need to update the comparison
705 code that compares the arg + cst tuples in our caller. For now the
706 code above exactly handles the VEC_BASE pattern from vec.h. */
710 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
711 of the form SSA_NAME NE 0.
713 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
714 the two input values of the EQ_EXPR match arg0 and arg1.
716 If so update *code and return TRUE. Otherwise return FALSE. */
719 rhs_is_fed_for_value_replacement (const_tree arg0
, const_tree arg1
,
720 enum tree_code
*code
, const_tree rhs
)
722 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
724 if (TREE_CODE (rhs
) == SSA_NAME
)
726 gimple
*def1
= SSA_NAME_DEF_STMT (rhs
);
728 /* Verify the defining statement has an EQ_EXPR on the RHS. */
729 if (is_gimple_assign (def1
) && gimple_assign_rhs_code (def1
) == EQ_EXPR
)
731 /* Finally verify the source operands of the EQ_EXPR are equal
733 tree op0
= gimple_assign_rhs1 (def1
);
734 tree op1
= gimple_assign_rhs2 (def1
);
735 if ((operand_equal_for_phi_arg_p (arg0
, op0
)
736 && operand_equal_for_phi_arg_p (arg1
, op1
))
737 || (operand_equal_for_phi_arg_p (arg0
, op1
)
738 && operand_equal_for_phi_arg_p (arg1
, op0
)))
740 /* We will perform the optimization. */
741 *code
= gimple_assign_rhs_code (def1
);
749 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
751 Also return TRUE if arg0/arg1 are equal to the source arguments of a
752 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
754 Return FALSE otherwise. */
757 operand_equal_for_value_replacement (const_tree arg0
, const_tree arg1
,
758 enum tree_code
*code
, gimple
*cond
)
761 tree lhs
= gimple_cond_lhs (cond
);
762 tree rhs
= gimple_cond_rhs (cond
);
764 if ((operand_equal_for_phi_arg_p (arg0
, lhs
)
765 && operand_equal_for_phi_arg_p (arg1
, rhs
))
766 || (operand_equal_for_phi_arg_p (arg1
, lhs
)
767 && operand_equal_for_phi_arg_p (arg0
, rhs
)))
770 /* Now handle more complex case where we have an EQ comparison
771 which feeds a BIT_AND_EXPR which feeds COND.
773 First verify that COND is of the form SSA_NAME NE 0. */
774 if (*code
!= NE_EXPR
|| !integer_zerop (rhs
)
775 || TREE_CODE (lhs
) != SSA_NAME
)
778 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
779 def
= SSA_NAME_DEF_STMT (lhs
);
780 if (!is_gimple_assign (def
) || gimple_assign_rhs_code (def
) != BIT_AND_EXPR
)
783 /* Now verify arg0/arg1 correspond to the source arguments of an
784 EQ comparison feeding the BIT_AND_EXPR. */
786 tree tmp
= gimple_assign_rhs1 (def
);
787 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
790 tmp
= gimple_assign_rhs2 (def
);
791 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
797 /* Returns true if ARG is a neutral element for operation CODE
798 on the RIGHT side. */
801 neutral_element_p (tree_code code
, tree arg
, bool right
)
808 return integer_zerop (arg
);
815 case POINTER_PLUS_EXPR
:
816 return right
&& integer_zerop (arg
);
819 return integer_onep (arg
);
826 return right
&& integer_onep (arg
);
829 return integer_all_onesp (arg
);
836 /* Returns true if ARG is an absorbing element for operation CODE. */
839 absorbing_element_p (tree_code code
, tree arg
, bool right
, tree rval
)
844 return integer_all_onesp (arg
);
848 return integer_zerop (arg
);
854 return !right
&& integer_zerop (arg
);
866 && integer_zerop (arg
)
867 && tree_single_nonzero_warnv_p (rval
, NULL
));
874 /* The function value_replacement does the main work of doing the value
875 replacement. Return non-zero if the replacement is done. Otherwise return
876 0. If we remove the middle basic block, return 2.
877 BB is the basic block where the replacement is going to be done on. ARG0
878 is argument 0 from the PHI. Likewise for ARG1. */
881 value_replacement (basic_block cond_bb
, basic_block middle_bb
,
882 edge e0
, edge e1
, gimple
*phi
,
883 tree arg0
, tree arg1
)
885 gimple_stmt_iterator gsi
;
887 edge true_edge
, false_edge
;
889 bool emtpy_or_with_defined_p
= true;
891 /* If the type says honor signed zeros we cannot do this
893 if (HONOR_SIGNED_ZEROS (arg1
))
896 /* If there is a statement in MIDDLE_BB that defines one of the PHI
897 arguments, then adjust arg0 or arg1. */
898 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
899 while (!gsi_end_p (gsi
))
901 gimple
*stmt
= gsi_stmt (gsi
);
903 gsi_next_nondebug (&gsi
);
904 if (!is_gimple_assign (stmt
))
906 emtpy_or_with_defined_p
= false;
909 /* Now try to adjust arg0 or arg1 according to the computation
911 lhs
= gimple_assign_lhs (stmt
);
913 && jump_function_from_stmt (&arg0
, stmt
))
915 && jump_function_from_stmt (&arg1
, stmt
)))
916 emtpy_or_with_defined_p
= false;
919 cond
= last_stmt (cond_bb
);
920 code
= gimple_cond_code (cond
);
922 /* This transformation is only valid for equality comparisons. */
923 if (code
!= NE_EXPR
&& code
!= EQ_EXPR
)
926 /* We need to know which is the true edge and which is the false
927 edge so that we know if have abs or negative abs. */
928 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
930 /* At this point we know we have a COND_EXPR with two successors.
931 One successor is BB, the other successor is an empty block which
932 falls through into BB.
934 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
936 There is a single PHI node at the join point (BB) with two arguments.
938 We now need to verify that the two arguments in the PHI node match
939 the two arguments to the equality comparison. */
941 if (operand_equal_for_value_replacement (arg0
, arg1
, &code
, cond
))
946 /* For NE_EXPR, we want to build an assignment result = arg where
947 arg is the PHI argument associated with the true edge. For
948 EQ_EXPR we want the PHI argument associated with the false edge. */
949 e
= (code
== NE_EXPR
? true_edge
: false_edge
);
951 /* Unfortunately, E may not reach BB (it may instead have gone to
952 OTHER_BLOCK). If that is the case, then we want the single outgoing
953 edge from OTHER_BLOCK which reaches BB and represents the desired
954 path from COND_BLOCK. */
955 if (e
->dest
== middle_bb
)
956 e
= single_succ_edge (e
->dest
);
958 /* Now we know the incoming edge to BB that has the argument for the
959 RHS of our new assignment statement. */
965 /* If the middle basic block was empty or is defining the
966 PHI arguments and this is a single phi where the args are different
967 for the edges e0 and e1 then we can remove the middle basic block. */
968 if (emtpy_or_with_defined_p
969 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)),
972 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, arg
);
973 /* Note that we optimized this PHI. */
978 /* Replace the PHI arguments with arg. */
979 SET_PHI_ARG_DEF (phi
, e0
->dest_idx
, arg
);
980 SET_PHI_ARG_DEF (phi
, e1
->dest_idx
, arg
);
981 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
983 fprintf (dump_file
, "PHI ");
984 print_generic_expr (dump_file
, gimple_phi_result (phi
));
985 fprintf (dump_file
, " reduced for COND_EXPR in block %d to ",
987 print_generic_expr (dump_file
, arg
);
988 fprintf (dump_file
, ".\n");
995 /* Now optimize (x != 0) ? x + y : y to just x + y. */
996 gsi
= gsi_last_nondebug_bb (middle_bb
);
1000 gimple
*assign
= gsi_stmt (gsi
);
1001 if (!is_gimple_assign (assign
)
1002 || gimple_assign_rhs_class (assign
) != GIMPLE_BINARY_RHS
1003 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
1004 && !POINTER_TYPE_P (TREE_TYPE (arg0
))))
1007 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1008 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
1011 /* Allow up to 2 cheap preparation statements that prepare argument
1019 iftmp.0_6 = x_5(D) r<< _1;
1021 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1032 # _2 = PHI <x_5(D)(2), _6(3)> */
1033 gimple
*prep_stmt
[2] = { NULL
, NULL
};
1035 for (prep_cnt
= 0; ; prep_cnt
++)
1037 gsi_prev_nondebug (&gsi
);
1038 if (gsi_end_p (gsi
))
1041 gimple
*g
= gsi_stmt (gsi
);
1042 if (gimple_code (g
) == GIMPLE_LABEL
)
1045 if (prep_cnt
== 2 || !is_gimple_assign (g
))
1048 tree lhs
= gimple_assign_lhs (g
);
1049 tree rhs1
= gimple_assign_rhs1 (g
);
1050 use_operand_p use_p
;
1052 if (TREE_CODE (lhs
) != SSA_NAME
1053 || TREE_CODE (rhs1
) != SSA_NAME
1054 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1055 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1056 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
1057 || use_stmt
!= (prep_cnt
? prep_stmt
[prep_cnt
- 1] : assign
))
1059 switch (gimple_assign_rhs_code (g
))
1067 if (TREE_CODE (gimple_assign_rhs2 (g
)) != INTEGER_CST
)
1073 prep_stmt
[prep_cnt
] = g
;
1076 /* Only transform if it removes the condition. */
1077 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)), e0
, e1
))
1080 /* Size-wise, this is always profitable. */
1081 if (optimize_bb_for_speed_p (cond_bb
)
1082 /* The special case is useless if it has a low probability. */
1083 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
1084 && EDGE_PRED (middle_bb
, 0)->probability
< profile_probability::even ()
1085 /* If assign is cheap, there is no point avoiding it. */
1086 && estimate_num_insns (bb_seq (middle_bb
), &eni_time_weights
)
1087 >= 3 * estimate_num_insns (cond
, &eni_time_weights
))
1090 tree lhs
= gimple_assign_lhs (assign
);
1091 tree rhs1
= gimple_assign_rhs1 (assign
);
1092 tree rhs2
= gimple_assign_rhs2 (assign
);
1093 enum tree_code code_def
= gimple_assign_rhs_code (assign
);
1094 tree cond_lhs
= gimple_cond_lhs (cond
);
1095 tree cond_rhs
= gimple_cond_rhs (cond
);
1097 /* Propagate the cond_rhs constant through preparation stmts,
1098 make sure UB isn't invoked while doing that. */
1099 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1101 gimple
*g
= prep_stmt
[i
];
1102 tree grhs1
= gimple_assign_rhs1 (g
);
1103 if (!operand_equal_for_phi_arg_p (cond_lhs
, grhs1
))
1105 cond_lhs
= gimple_assign_lhs (g
);
1106 cond_rhs
= fold_convert (TREE_TYPE (grhs1
), cond_rhs
);
1107 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1108 || TREE_OVERFLOW (cond_rhs
))
1110 if (gimple_assign_rhs_class (g
) == GIMPLE_BINARY_RHS
)
1112 cond_rhs
= int_const_binop (gimple_assign_rhs_code (g
), cond_rhs
,
1113 gimple_assign_rhs2 (g
));
1114 if (TREE_OVERFLOW (cond_rhs
))
1117 cond_rhs
= fold_convert (TREE_TYPE (cond_lhs
), cond_rhs
);
1118 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1119 || TREE_OVERFLOW (cond_rhs
))
1123 if (((code
== NE_EXPR
&& e1
== false_edge
)
1124 || (code
== EQ_EXPR
&& e1
== true_edge
))
1127 && operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1128 && neutral_element_p (code_def
, cond_rhs
, true))
1130 && operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1131 && neutral_element_p (code_def
, cond_rhs
, false))
1132 || (operand_equal_for_phi_arg_p (arg1
, cond_rhs
)
1133 && ((operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1134 && absorbing_element_p (code_def
, cond_rhs
, true, rhs2
))
1135 || (operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1136 && absorbing_element_p (code_def
,
1137 cond_rhs
, false, rhs2
))))))
1139 gsi
= gsi_for_stmt (cond
);
1140 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1148 # RANGE [0, 4294967294]
1149 u_6 = n_5 + 4294967295;
1152 # u_3 = PHI <u_6(3), 4294967295(2)> */
1153 reset_flow_sensitive_info (lhs
);
1154 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
1156 /* If available, we can use VR of phi result at least. */
1157 tree phires
= gimple_phi_result (phi
);
1158 struct range_info_def
*phires_range_info
1159 = SSA_NAME_RANGE_INFO (phires
);
1160 if (phires_range_info
)
1161 duplicate_ssa_name_range_info (lhs
, SSA_NAME_RANGE_TYPE (phires
),
1164 gimple_stmt_iterator gsi_from
;
1165 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1167 tree plhs
= gimple_assign_lhs (prep_stmt
[i
]);
1168 reset_flow_sensitive_info (plhs
);
1169 gsi_from
= gsi_for_stmt (prep_stmt
[i
]);
1170 gsi_move_before (&gsi_from
, &gsi
);
1172 gsi_from
= gsi_for_stmt (assign
);
1173 gsi_move_before (&gsi_from
, &gsi
);
1174 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, lhs
);
1181 /* The function minmax_replacement does the main work of doing the minmax
1182 replacement. Return true if the replacement is done. Otherwise return
1184 BB is the basic block where the replacement is going to be done on. ARG0
1185 is argument 0 from the PHI. Likewise for ARG1. */
1188 minmax_replacement (basic_block cond_bb
, basic_block middle_bb
,
1189 edge e0
, edge e1
, gimple
*phi
,
1190 tree arg0
, tree arg1
)
1195 edge true_edge
, false_edge
;
1196 enum tree_code cmp
, minmax
, ass_code
;
1197 tree smaller
, alt_smaller
, larger
, alt_larger
, arg_true
, arg_false
;
1198 gimple_stmt_iterator gsi
, gsi_from
;
1200 type
= TREE_TYPE (PHI_RESULT (phi
));
1202 /* The optimization may be unsafe due to NaNs. */
1203 if (HONOR_NANS (type
) || HONOR_SIGNED_ZEROS (type
))
1206 cond
= as_a
<gcond
*> (last_stmt (cond_bb
));
1207 cmp
= gimple_cond_code (cond
);
1209 /* This transformation is only valid for order comparisons. Record which
1210 operand is smaller/larger if the result of the comparison is true. */
1211 alt_smaller
= NULL_TREE
;
1212 alt_larger
= NULL_TREE
;
1213 if (cmp
== LT_EXPR
|| cmp
== LE_EXPR
)
1215 smaller
= gimple_cond_lhs (cond
);
1216 larger
= gimple_cond_rhs (cond
);
1217 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1218 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1219 if (TREE_CODE (larger
) == INTEGER_CST
)
1224 wide_int alt
= wi::sub (wi::to_wide (larger
), 1,
1225 TYPE_SIGN (TREE_TYPE (larger
)),
1228 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1233 wide_int alt
= wi::add (wi::to_wide (larger
), 1,
1234 TYPE_SIGN (TREE_TYPE (larger
)),
1237 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1241 else if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
1243 smaller
= gimple_cond_rhs (cond
);
1244 larger
= gimple_cond_lhs (cond
);
1245 /* If we have larger > CST it is equivalent to larger >= CST+1.
1246 Likewise larger >= CST is equivalent to larger > CST-1. */
1247 if (TREE_CODE (smaller
) == INTEGER_CST
)
1252 wide_int alt
= wi::add (wi::to_wide (smaller
), 1,
1253 TYPE_SIGN (TREE_TYPE (smaller
)),
1256 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1261 wide_int alt
= wi::sub (wi::to_wide (smaller
), 1,
1262 TYPE_SIGN (TREE_TYPE (smaller
)),
1265 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1272 /* We need to know which is the true edge and which is the false
1273 edge so that we know if have abs or negative abs. */
1274 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1276 /* Forward the edges over the middle basic block. */
1277 if (true_edge
->dest
== middle_bb
)
1278 true_edge
= EDGE_SUCC (true_edge
->dest
, 0);
1279 if (false_edge
->dest
== middle_bb
)
1280 false_edge
= EDGE_SUCC (false_edge
->dest
, 0);
1282 if (true_edge
== e0
)
1284 gcc_assert (false_edge
== e1
);
1290 gcc_assert (false_edge
== e0
);
1291 gcc_assert (true_edge
== e1
);
1296 if (empty_block_p (middle_bb
))
1298 if ((operand_equal_for_phi_arg_p (arg_true
, smaller
)
1300 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1301 && (operand_equal_for_phi_arg_p (arg_false
, larger
)
1303 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1307 if (smaller < larger)
1313 else if ((operand_equal_for_phi_arg_p (arg_false
, smaller
)
1315 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1316 && (operand_equal_for_phi_arg_p (arg_true
, larger
)
1318 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1325 /* Recognize the following case, assuming d <= u:
1331 This is equivalent to
1336 gimple
*assign
= last_and_only_stmt (middle_bb
);
1337 tree lhs
, op0
, op1
, bound
;
1340 || gimple_code (assign
) != GIMPLE_ASSIGN
)
1343 lhs
= gimple_assign_lhs (assign
);
1344 ass_code
= gimple_assign_rhs_code (assign
);
1345 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
1347 op0
= gimple_assign_rhs1 (assign
);
1348 op1
= gimple_assign_rhs2 (assign
);
1350 if (true_edge
->src
== middle_bb
)
1352 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1353 if (!operand_equal_for_phi_arg_p (lhs
, arg_true
))
1356 if (operand_equal_for_phi_arg_p (arg_false
, larger
)
1358 && operand_equal_for_phi_arg_p (arg_false
, alt_larger
)))
1362 if (smaller < larger)
1364 r' = MAX_EXPR (smaller, bound)
1366 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1367 if (ass_code
!= MAX_EXPR
)
1371 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1373 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1375 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1377 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1382 /* We need BOUND <= LARGER. */
1383 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1387 else if (operand_equal_for_phi_arg_p (arg_false
, smaller
)
1389 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1393 if (smaller < larger)
1395 r' = MIN_EXPR (larger, bound)
1397 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1398 if (ass_code
!= MIN_EXPR
)
1402 if (operand_equal_for_phi_arg_p (op0
, larger
)
1404 && operand_equal_for_phi_arg_p (op0
, alt_larger
)))
1406 else if (operand_equal_for_phi_arg_p (op1
, larger
)
1408 && operand_equal_for_phi_arg_p (op1
, alt_larger
)))
1413 /* We need BOUND >= SMALLER. */
1414 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1423 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1424 if (!operand_equal_for_phi_arg_p (lhs
, arg_false
))
1427 if (operand_equal_for_phi_arg_p (arg_true
, larger
)
1429 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
)))
1433 if (smaller > larger)
1435 r' = MIN_EXPR (smaller, bound)
1437 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1438 if (ass_code
!= MIN_EXPR
)
1442 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1444 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1446 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1448 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1453 /* We need BOUND >= LARGER. */
1454 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1458 else if (operand_equal_for_phi_arg_p (arg_true
, smaller
)
1460 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1464 if (smaller > larger)
1466 r' = MAX_EXPR (larger, bound)
1468 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1469 if (ass_code
!= MAX_EXPR
)
1473 if (operand_equal_for_phi_arg_p (op0
, larger
))
1475 else if (operand_equal_for_phi_arg_p (op1
, larger
))
1480 /* We need BOUND <= SMALLER. */
1481 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1489 /* Move the statement from the middle block. */
1490 gsi
= gsi_last_bb (cond_bb
);
1491 gsi_from
= gsi_last_nondebug_bb (middle_bb
);
1492 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from
),
1494 gsi_move_before (&gsi_from
, &gsi
);
1497 /* Create an SSA var to hold the min/max result. If we're the only
1498 things setting the target PHI, then we can clone the PHI
1499 variable. Otherwise we must create a new one. */
1500 result
= PHI_RESULT (phi
);
1501 if (EDGE_COUNT (gimple_bb (phi
)->preds
) == 2)
1502 result
= duplicate_ssa_name (result
, NULL
);
1504 result
= make_ssa_name (TREE_TYPE (result
));
1506 /* Emit the statement to compute min/max. */
1507 new_stmt
= gimple_build_assign (result
, minmax
, arg0
, arg1
);
1508 gsi
= gsi_last_bb (cond_bb
);
1509 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1511 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1516 /* The function absolute_replacement does the main work of doing the absolute
1517 replacement. Return true if the replacement is done. Otherwise return
1519 bb is the basic block where the replacement is going to be done on. arg0
1520 is argument 0 from the phi. Likewise for arg1. */
1523 abs_replacement (basic_block cond_bb
, basic_block middle_bb
,
1524 edge e0 ATTRIBUTE_UNUSED
, edge e1
,
1525 gimple
*phi
, tree arg0
, tree arg1
)
1530 gimple_stmt_iterator gsi
;
1531 edge true_edge
, false_edge
;
1536 enum tree_code cond_code
;
1538 /* If the type says honor signed zeros we cannot do this
1540 if (HONOR_SIGNED_ZEROS (arg1
))
1543 /* OTHER_BLOCK must have only one executable statement which must have the
1544 form arg0 = -arg1 or arg1 = -arg0. */
1546 assign
= last_and_only_stmt (middle_bb
);
1547 /* If we did not find the proper negation assignment, then we can not
1552 /* If we got here, then we have found the only executable statement
1553 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1554 arg1 = -arg0, then we can not optimize. */
1555 if (gimple_code (assign
) != GIMPLE_ASSIGN
)
1558 lhs
= gimple_assign_lhs (assign
);
1560 if (gimple_assign_rhs_code (assign
) != NEGATE_EXPR
)
1563 rhs
= gimple_assign_rhs1 (assign
);
1565 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1566 if (!(lhs
== arg0
&& rhs
== arg1
)
1567 && !(lhs
== arg1
&& rhs
== arg0
))
1570 cond
= last_stmt (cond_bb
);
1571 result
= PHI_RESULT (phi
);
1573 /* Only relationals comparing arg[01] against zero are interesting. */
1574 cond_code
= gimple_cond_code (cond
);
1575 if (cond_code
!= GT_EXPR
&& cond_code
!= GE_EXPR
1576 && cond_code
!= LT_EXPR
&& cond_code
!= LE_EXPR
)
1579 /* Make sure the conditional is arg[01] OP y. */
1580 if (gimple_cond_lhs (cond
) != rhs
)
1583 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond
)))
1584 ? real_zerop (gimple_cond_rhs (cond
))
1585 : integer_zerop (gimple_cond_rhs (cond
)))
1590 /* We need to know which is the true edge and which is the false
1591 edge so that we know if have abs or negative abs. */
1592 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1594 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1595 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1596 the false edge goes to OTHER_BLOCK. */
1597 if (cond_code
== GT_EXPR
|| cond_code
== GE_EXPR
)
1602 if (e
->dest
== middle_bb
)
1607 /* If the code negates only iff positive then make sure to not
1608 introduce undefined behavior when negating or computing the absolute.
1609 ??? We could use range info if present to check for arg1 == INT_MIN. */
1611 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
1612 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1
))))
1615 result
= duplicate_ssa_name (result
, NULL
);
1618 lhs
= make_ssa_name (TREE_TYPE (result
));
1622 /* Build the modify expression with abs expression. */
1623 new_stmt
= gimple_build_assign (lhs
, ABS_EXPR
, rhs
);
1625 gsi
= gsi_last_bb (cond_bb
);
1626 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1630 /* Get the right GSI. We want to insert after the recently
1631 added ABS_EXPR statement (which we know is the first statement
1633 new_stmt
= gimple_build_assign (result
, NEGATE_EXPR
, lhs
);
1635 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
1638 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1640 /* Note that we optimized this PHI. */
1644 /* Auxiliary functions to determine the set of memory accesses which
1645 can't trap because they are preceded by accesses to the same memory
1646 portion. We do that for MEM_REFs, so we only need to track
1647 the SSA_NAME of the pointer indirectly referenced. The algorithm
1648 simply is a walk over all instructions in dominator order. When
1649 we see an MEM_REF we determine if we've already seen a same
1650 ref anywhere up to the root of the dominator tree. If we do the
1651 current access can't trap. If we don't see any dominating access
1652 the current access might trap, but might also make later accesses
1653 non-trapping, so we remember it. We need to be careful with loads
1654 or stores, for instance a load might not trap, while a store would,
1655 so if we see a dominating read access this doesn't mean that a later
1656 write access would not trap. Hence we also need to differentiate the
1657 type of access(es) seen.
1659 ??? We currently are very conservative and assume that a load might
1660 trap even if a store doesn't (write-only memory). This probably is
1661 overly conservative. */
1663 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1664 through it was seen, which would constitute a no-trap region for
1668 unsigned int ssa_name_ver
;
1671 HOST_WIDE_INT offset
, size
;
1675 /* Hashtable helpers. */
1677 struct ssa_names_hasher
: free_ptr_hash
<name_to_bb
>
1679 static inline hashval_t
hash (const name_to_bb
*);
1680 static inline bool equal (const name_to_bb
*, const name_to_bb
*);
1683 /* Used for quick clearing of the hash-table when we see calls.
1684 Hash entries with phase < nt_call_phase are invalid. */
1685 static unsigned int nt_call_phase
;
1687 /* The hash function. */
1690 ssa_names_hasher::hash (const name_to_bb
*n
)
1692 return n
->ssa_name_ver
^ (((hashval_t
) n
->store
) << 31)
1693 ^ (n
->offset
<< 6) ^ (n
->size
<< 3);
1696 /* The equality function of *P1 and *P2. */
1699 ssa_names_hasher::equal (const name_to_bb
*n1
, const name_to_bb
*n2
)
1701 return n1
->ssa_name_ver
== n2
->ssa_name_ver
1702 && n1
->store
== n2
->store
1703 && n1
->offset
== n2
->offset
1704 && n1
->size
== n2
->size
;
1707 class nontrapping_dom_walker
: public dom_walker
1710 nontrapping_dom_walker (cdi_direction direction
, hash_set
<tree
> *ps
)
1711 : dom_walker (direction
), m_nontrapping (ps
), m_seen_ssa_names (128) {}
1713 virtual edge
before_dom_children (basic_block
);
1714 virtual void after_dom_children (basic_block
);
1718 /* We see the expression EXP in basic block BB. If it's an interesting
1719 expression (an MEM_REF through an SSA_NAME) possibly insert the
1720 expression into the set NONTRAP or the hash table of seen expressions.
1721 STORE is true if this expression is on the LHS, otherwise it's on
1723 void add_or_mark_expr (basic_block
, tree
, bool);
1725 hash_set
<tree
> *m_nontrapping
;
1727 /* The hash table for remembering what we've seen. */
1728 hash_table
<ssa_names_hasher
> m_seen_ssa_names
;
1731 /* Called by walk_dominator_tree, when entering the block BB. */
1733 nontrapping_dom_walker::before_dom_children (basic_block bb
)
1737 gimple_stmt_iterator gsi
;
1739 /* If we haven't seen all our predecessors, clear the hash-table. */
1740 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1741 if ((((size_t)e
->src
->aux
) & 2) == 0)
1747 /* Mark this BB as being on the path to dominator root and as visited. */
1748 bb
->aux
= (void*)(1 | 2);
1750 /* And walk the statements in order. */
1751 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1753 gimple
*stmt
= gsi_stmt (gsi
);
1755 if ((gimple_code (stmt
) == GIMPLE_ASM
&& gimple_vdef (stmt
))
1756 || (is_gimple_call (stmt
)
1757 && (!nonfreeing_call_p (stmt
) || !nonbarrier_call_p (stmt
))))
1759 else if (gimple_assign_single_p (stmt
) && !gimple_has_volatile_ops (stmt
))
1761 add_or_mark_expr (bb
, gimple_assign_lhs (stmt
), true);
1762 add_or_mark_expr (bb
, gimple_assign_rhs1 (stmt
), false);
1768 /* Called by walk_dominator_tree, when basic block BB is exited. */
1770 nontrapping_dom_walker::after_dom_children (basic_block bb
)
1772 /* This BB isn't on the path to dominator root anymore. */
1776 /* We see the expression EXP in basic block BB. If it's an interesting
1777 expression (an MEM_REF through an SSA_NAME) possibly insert the
1778 expression into the set NONTRAP or the hash table of seen expressions.
1779 STORE is true if this expression is on the LHS, otherwise it's on
1782 nontrapping_dom_walker::add_or_mark_expr (basic_block bb
, tree exp
, bool store
)
1786 if (TREE_CODE (exp
) == MEM_REF
1787 && TREE_CODE (TREE_OPERAND (exp
, 0)) == SSA_NAME
1788 && tree_fits_shwi_p (TREE_OPERAND (exp
, 1))
1789 && (size
= int_size_in_bytes (TREE_TYPE (exp
))) > 0)
1791 tree name
= TREE_OPERAND (exp
, 0);
1792 struct name_to_bb map
;
1794 struct name_to_bb
*n2bb
;
1795 basic_block found_bb
= 0;
1797 /* Try to find the last seen MEM_REF through the same
1798 SSA_NAME, which can trap. */
1799 map
.ssa_name_ver
= SSA_NAME_VERSION (name
);
1803 map
.offset
= tree_to_shwi (TREE_OPERAND (exp
, 1));
1806 slot
= m_seen_ssa_names
.find_slot (&map
, INSERT
);
1808 if (n2bb
&& n2bb
->phase
>= nt_call_phase
)
1809 found_bb
= n2bb
->bb
;
1811 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1812 (it's in a basic block on the path from us to the dominator root)
1813 then we can't trap. */
1814 if (found_bb
&& (((size_t)found_bb
->aux
) & 1) == 1)
1816 m_nontrapping
->add (exp
);
1820 /* EXP might trap, so insert it into the hash table. */
1823 n2bb
->phase
= nt_call_phase
;
1828 n2bb
= XNEW (struct name_to_bb
);
1829 n2bb
->ssa_name_ver
= SSA_NAME_VERSION (name
);
1830 n2bb
->phase
= nt_call_phase
;
1832 n2bb
->store
= store
;
1833 n2bb
->offset
= map
.offset
;
1841 /* This is the entry point of gathering non trapping memory accesses.
1842 It will do a dominator walk over the whole function, and it will
1843 make use of the bb->aux pointers. It returns a set of trees
1844 (the MEM_REFs itself) which can't trap. */
1845 static hash_set
<tree
> *
1846 get_non_trapping (void)
1849 hash_set
<tree
> *nontrap
= new hash_set
<tree
>;
1850 /* We're going to do a dominator walk, so ensure that we have
1851 dominance information. */
1852 calculate_dominance_info (CDI_DOMINATORS
);
1854 nontrapping_dom_walker (CDI_DOMINATORS
, nontrap
)
1855 .walk (cfun
->cfg
->x_entry_block_ptr
);
1857 clear_aux_for_blocks ();
1861 /* Do the main work of conditional store replacement. We already know
1862 that the recognized pattern looks like so:
1865 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1868 fallthrough (edge E0)
1872 We check that MIDDLE_BB contains only one store, that that store
1873 doesn't trap (not via NOTRAP, but via checking if an access to the same
1874 memory location dominates us) and that the store has a "simple" RHS. */
1877 cond_store_replacement (basic_block middle_bb
, basic_block join_bb
,
1878 edge e0
, edge e1
, hash_set
<tree
> *nontrap
)
1880 gimple
*assign
= last_and_only_stmt (middle_bb
);
1881 tree lhs
, rhs
, name
, name2
;
1884 gimple_stmt_iterator gsi
;
1885 source_location locus
;
1887 /* Check if middle_bb contains of only one store. */
1889 || !gimple_assign_single_p (assign
)
1890 || gimple_has_volatile_ops (assign
))
1893 locus
= gimple_location (assign
);
1894 lhs
= gimple_assign_lhs (assign
);
1895 rhs
= gimple_assign_rhs1 (assign
);
1896 if (TREE_CODE (lhs
) != MEM_REF
1897 || TREE_CODE (TREE_OPERAND (lhs
, 0)) != SSA_NAME
1898 || !is_gimple_reg_type (TREE_TYPE (lhs
)))
1901 /* Prove that we can move the store down. We could also check
1902 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1903 whose value is not available readily, which we want to avoid. */
1904 if (!nontrap
->contains (lhs
))
1907 /* Now we've checked the constraints, so do the transformation:
1908 1) Remove the single store. */
1909 gsi
= gsi_for_stmt (assign
);
1910 unlink_stmt_vdef (assign
);
1911 gsi_remove (&gsi
, true);
1912 release_defs (assign
);
1914 /* Make both store and load use alias-set zero as we have to
1915 deal with the case of the store being a conditional change
1916 of the dynamic type. */
1917 lhs
= unshare_expr (lhs
);
1919 while (handled_component_p (*basep
))
1920 basep
= &TREE_OPERAND (*basep
, 0);
1921 if (TREE_CODE (*basep
) == MEM_REF
1922 || TREE_CODE (*basep
) == TARGET_MEM_REF
)
1923 TREE_OPERAND (*basep
, 1)
1924 = fold_convert (ptr_type_node
, TREE_OPERAND (*basep
, 1));
1926 *basep
= build2 (MEM_REF
, TREE_TYPE (*basep
),
1927 build_fold_addr_expr (*basep
),
1928 build_zero_cst (ptr_type_node
));
1930 /* 2) Insert a load from the memory of the store to the temporary
1931 on the edge which did not contain the store. */
1932 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
1933 new_stmt
= gimple_build_assign (name
, lhs
);
1934 gimple_set_location (new_stmt
, locus
);
1935 gsi_insert_on_edge (e1
, new_stmt
);
1937 /* 3) Create a PHI node at the join block, with one argument
1938 holding the old RHS, and the other holding the temporary
1939 where we stored the old memory contents. */
1940 name2
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
1941 newphi
= create_phi_node (name2
, join_bb
);
1942 add_phi_arg (newphi
, rhs
, e0
, locus
);
1943 add_phi_arg (newphi
, name
, e1
, locus
);
1945 lhs
= unshare_expr (lhs
);
1946 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
1948 /* 4) Insert that PHI node. */
1949 gsi
= gsi_after_labels (join_bb
);
1950 if (gsi_end_p (gsi
))
1952 gsi
= gsi_last_bb (join_bb
);
1953 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
1956 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1961 /* Do the main work of conditional store replacement. */
1964 cond_if_else_store_replacement_1 (basic_block then_bb
, basic_block else_bb
,
1965 basic_block join_bb
, gimple
*then_assign
,
1966 gimple
*else_assign
)
1968 tree lhs_base
, lhs
, then_rhs
, else_rhs
, name
;
1969 source_location then_locus
, else_locus
;
1970 gimple_stmt_iterator gsi
;
1974 if (then_assign
== NULL
1975 || !gimple_assign_single_p (then_assign
)
1976 || gimple_clobber_p (then_assign
)
1977 || gimple_has_volatile_ops (then_assign
)
1978 || else_assign
== NULL
1979 || !gimple_assign_single_p (else_assign
)
1980 || gimple_clobber_p (else_assign
)
1981 || gimple_has_volatile_ops (else_assign
))
1984 lhs
= gimple_assign_lhs (then_assign
);
1985 if (!is_gimple_reg_type (TREE_TYPE (lhs
))
1986 || !operand_equal_p (lhs
, gimple_assign_lhs (else_assign
), 0))
1989 lhs_base
= get_base_address (lhs
);
1990 if (lhs_base
== NULL_TREE
1991 || (!DECL_P (lhs_base
) && TREE_CODE (lhs_base
) != MEM_REF
))
1994 then_rhs
= gimple_assign_rhs1 (then_assign
);
1995 else_rhs
= gimple_assign_rhs1 (else_assign
);
1996 then_locus
= gimple_location (then_assign
);
1997 else_locus
= gimple_location (else_assign
);
1999 /* Now we've checked the constraints, so do the transformation:
2000 1) Remove the stores. */
2001 gsi
= gsi_for_stmt (then_assign
);
2002 unlink_stmt_vdef (then_assign
);
2003 gsi_remove (&gsi
, true);
2004 release_defs (then_assign
);
2006 gsi
= gsi_for_stmt (else_assign
);
2007 unlink_stmt_vdef (else_assign
);
2008 gsi_remove (&gsi
, true);
2009 release_defs (else_assign
);
2011 /* 2) Create a PHI node at the join block, with one argument
2012 holding the old RHS, and the other holding the temporary
2013 where we stored the old memory contents. */
2014 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2015 newphi
= create_phi_node (name
, join_bb
);
2016 add_phi_arg (newphi
, then_rhs
, EDGE_SUCC (then_bb
, 0), then_locus
);
2017 add_phi_arg (newphi
, else_rhs
, EDGE_SUCC (else_bb
, 0), else_locus
);
2019 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
2021 /* 3) Insert that PHI node. */
2022 gsi
= gsi_after_labels (join_bb
);
2023 if (gsi_end_p (gsi
))
2025 gsi
= gsi_last_bb (join_bb
);
2026 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
2029 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
2034 /* Conditional store replacement. We already know
2035 that the recognized pattern looks like so:
2038 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2048 fallthrough (edge E0)
2052 We check that it is safe to sink the store to JOIN_BB by verifying that
2053 there are no read-after-write or write-after-write dependencies in
2054 THEN_BB and ELSE_BB. */
2057 cond_if_else_store_replacement (basic_block then_bb
, basic_block else_bb
,
2058 basic_block join_bb
)
2060 gimple
*then_assign
= last_and_only_stmt (then_bb
);
2061 gimple
*else_assign
= last_and_only_stmt (else_bb
);
2062 vec
<data_reference_p
> then_datarefs
, else_datarefs
;
2063 vec
<ddr_p
> then_ddrs
, else_ddrs
;
2064 gimple
*then_store
, *else_store
;
2065 bool found
, ok
= false, res
;
2066 struct data_dependence_relation
*ddr
;
2067 data_reference_p then_dr
, else_dr
;
2069 tree then_lhs
, else_lhs
;
2070 basic_block blocks
[3];
2072 if (MAX_STORES_TO_SINK
== 0)
2075 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
2076 if (then_assign
&& else_assign
)
2077 return cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2078 then_assign
, else_assign
);
2080 /* Find data references. */
2081 then_datarefs
.create (1);
2082 else_datarefs
.create (1);
2083 if ((find_data_references_in_bb (NULL
, then_bb
, &then_datarefs
)
2085 || !then_datarefs
.length ()
2086 || (find_data_references_in_bb (NULL
, else_bb
, &else_datarefs
)
2088 || !else_datarefs
.length ())
2090 free_data_refs (then_datarefs
);
2091 free_data_refs (else_datarefs
);
2095 /* Find pairs of stores with equal LHS. */
2096 auto_vec
<gimple
*, 1> then_stores
, else_stores
;
2097 FOR_EACH_VEC_ELT (then_datarefs
, i
, then_dr
)
2099 if (DR_IS_READ (then_dr
))
2102 then_store
= DR_STMT (then_dr
);
2103 then_lhs
= gimple_get_lhs (then_store
);
2104 if (then_lhs
== NULL_TREE
)
2108 FOR_EACH_VEC_ELT (else_datarefs
, j
, else_dr
)
2110 if (DR_IS_READ (else_dr
))
2113 else_store
= DR_STMT (else_dr
);
2114 else_lhs
= gimple_get_lhs (else_store
);
2115 if (else_lhs
== NULL_TREE
)
2118 if (operand_equal_p (then_lhs
, else_lhs
, 0))
2128 then_stores
.safe_push (then_store
);
2129 else_stores
.safe_push (else_store
);
2132 /* No pairs of stores found. */
2133 if (!then_stores
.length ()
2134 || then_stores
.length () > (unsigned) MAX_STORES_TO_SINK
)
2136 free_data_refs (then_datarefs
);
2137 free_data_refs (else_datarefs
);
2141 /* Compute and check data dependencies in both basic blocks. */
2142 then_ddrs
.create (1);
2143 else_ddrs
.create (1);
2144 if (!compute_all_dependences (then_datarefs
, &then_ddrs
,
2146 || !compute_all_dependences (else_datarefs
, &else_ddrs
,
2149 free_dependence_relations (then_ddrs
);
2150 free_dependence_relations (else_ddrs
);
2151 free_data_refs (then_datarefs
);
2152 free_data_refs (else_datarefs
);
2155 blocks
[0] = then_bb
;
2156 blocks
[1] = else_bb
;
2157 blocks
[2] = join_bb
;
2158 renumber_gimple_stmt_uids_in_blocks (blocks
, 3);
2160 /* Check that there are no read-after-write or write-after-write dependencies
2162 FOR_EACH_VEC_ELT (then_ddrs
, i
, ddr
)
2164 struct data_reference
*dra
= DDR_A (ddr
);
2165 struct data_reference
*drb
= DDR_B (ddr
);
2167 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2168 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2169 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2170 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2171 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2172 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2174 free_dependence_relations (then_ddrs
);
2175 free_dependence_relations (else_ddrs
);
2176 free_data_refs (then_datarefs
);
2177 free_data_refs (else_datarefs
);
2182 /* Check that there are no read-after-write or write-after-write dependencies
2184 FOR_EACH_VEC_ELT (else_ddrs
, i
, ddr
)
2186 struct data_reference
*dra
= DDR_A (ddr
);
2187 struct data_reference
*drb
= DDR_B (ddr
);
2189 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2190 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2191 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2192 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2193 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2194 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2196 free_dependence_relations (then_ddrs
);
2197 free_dependence_relations (else_ddrs
);
2198 free_data_refs (then_datarefs
);
2199 free_data_refs (else_datarefs
);
2204 /* Sink stores with same LHS. */
2205 FOR_EACH_VEC_ELT (then_stores
, i
, then_store
)
2207 else_store
= else_stores
[i
];
2208 res
= cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2209 then_store
, else_store
);
2213 free_dependence_relations (then_ddrs
);
2214 free_dependence_relations (else_ddrs
);
2215 free_data_refs (then_datarefs
);
2216 free_data_refs (else_datarefs
);
2221 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2224 local_mem_dependence (gimple
*stmt
, basic_block bb
)
2226 tree vuse
= gimple_vuse (stmt
);
2232 def
= SSA_NAME_DEF_STMT (vuse
);
2233 return (def
&& gimple_bb (def
) == bb
);
2236 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2237 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2238 and BB3 rejoins control flow following BB1 and BB2, look for
2239 opportunities to hoist loads as follows. If BB3 contains a PHI of
2240 two loads, one each occurring in BB1 and BB2, and the loads are
2241 provably of adjacent fields in the same structure, then move both
2242 loads into BB0. Of course this can only be done if there are no
2243 dependencies preventing such motion.
2245 One of the hoisted loads will always be speculative, so the
2246 transformation is currently conservative:
2248 - The fields must be strictly adjacent.
2249 - The two fields must occupy a single memory block that is
2250 guaranteed to not cross a page boundary.
2252 The last is difficult to prove, as such memory blocks should be
2253 aligned on the minimum of the stack alignment boundary and the
2254 alignment guaranteed by heap allocation interfaces. Thus we rely
2255 on a parameter for the alignment value.
2257 Provided a good value is used for the last case, the first
2258 restriction could possibly be relaxed. */
2261 hoist_adjacent_loads (basic_block bb0
, basic_block bb1
,
2262 basic_block bb2
, basic_block bb3
)
2264 int param_align
= PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE
);
2265 unsigned param_align_bits
= (unsigned) (param_align
* BITS_PER_UNIT
);
2268 /* Walk the phis in bb3 looking for an opportunity. We are looking
2269 for phis of two SSA names, one each of which is defined in bb1 and
2271 for (gsi
= gsi_start_phis (bb3
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2273 gphi
*phi_stmt
= gsi
.phi ();
2274 gimple
*def1
, *def2
;
2275 tree arg1
, arg2
, ref1
, ref2
, field1
, field2
;
2276 tree tree_offset1
, tree_offset2
, tree_size2
, next
;
2277 int offset1
, offset2
, size2
;
2279 gimple_stmt_iterator gsi2
;
2280 basic_block bb_for_def1
, bb_for_def2
;
2282 if (gimple_phi_num_args (phi_stmt
) != 2
2283 || virtual_operand_p (gimple_phi_result (phi_stmt
)))
2286 arg1
= gimple_phi_arg_def (phi_stmt
, 0);
2287 arg2
= gimple_phi_arg_def (phi_stmt
, 1);
2289 if (TREE_CODE (arg1
) != SSA_NAME
2290 || TREE_CODE (arg2
) != SSA_NAME
2291 || SSA_NAME_IS_DEFAULT_DEF (arg1
)
2292 || SSA_NAME_IS_DEFAULT_DEF (arg2
))
2295 def1
= SSA_NAME_DEF_STMT (arg1
);
2296 def2
= SSA_NAME_DEF_STMT (arg2
);
2298 if ((gimple_bb (def1
) != bb1
|| gimple_bb (def2
) != bb2
)
2299 && (gimple_bb (def2
) != bb1
|| gimple_bb (def1
) != bb2
))
2302 /* Check the mode of the arguments to be sure a conditional move
2303 can be generated for it. */
2304 if (optab_handler (movcc_optab
, TYPE_MODE (TREE_TYPE (arg1
)))
2305 == CODE_FOR_nothing
)
2308 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2309 if (!gimple_assign_single_p (def1
)
2310 || !gimple_assign_single_p (def2
)
2311 || gimple_has_volatile_ops (def1
)
2312 || gimple_has_volatile_ops (def2
))
2315 ref1
= gimple_assign_rhs1 (def1
);
2316 ref2
= gimple_assign_rhs1 (def2
);
2318 if (TREE_CODE (ref1
) != COMPONENT_REF
2319 || TREE_CODE (ref2
) != COMPONENT_REF
)
2322 /* The zeroth operand of the two component references must be
2323 identical. It is not sufficient to compare get_base_address of
2324 the two references, because this could allow for different
2325 elements of the same array in the two trees. It is not safe to
2326 assume that the existence of one array element implies the
2327 existence of a different one. */
2328 if (!operand_equal_p (TREE_OPERAND (ref1
, 0), TREE_OPERAND (ref2
, 0), 0))
2331 field1
= TREE_OPERAND (ref1
, 1);
2332 field2
= TREE_OPERAND (ref2
, 1);
2334 /* Check for field adjacency, and ensure field1 comes first. */
2335 for (next
= DECL_CHAIN (field1
);
2336 next
&& TREE_CODE (next
) != FIELD_DECL
;
2337 next
= DECL_CHAIN (next
))
2342 for (next
= DECL_CHAIN (field2
);
2343 next
&& TREE_CODE (next
) != FIELD_DECL
;
2344 next
= DECL_CHAIN (next
))
2350 std::swap (field1
, field2
);
2351 std::swap (def1
, def2
);
2354 bb_for_def1
= gimple_bb (def1
);
2355 bb_for_def2
= gimple_bb (def2
);
2357 /* Check for proper alignment of the first field. */
2358 tree_offset1
= bit_position (field1
);
2359 tree_offset2
= bit_position (field2
);
2360 tree_size2
= DECL_SIZE (field2
);
2362 if (!tree_fits_uhwi_p (tree_offset1
)
2363 || !tree_fits_uhwi_p (tree_offset2
)
2364 || !tree_fits_uhwi_p (tree_size2
))
2367 offset1
= tree_to_uhwi (tree_offset1
);
2368 offset2
= tree_to_uhwi (tree_offset2
);
2369 size2
= tree_to_uhwi (tree_size2
);
2370 align1
= DECL_ALIGN (field1
) % param_align_bits
;
2372 if (offset1
% BITS_PER_UNIT
!= 0)
2375 /* For profitability, the two field references should fit within
2376 a single cache line. */
2377 if (align1
+ offset2
- offset1
+ size2
> param_align_bits
)
2380 /* The two expressions cannot be dependent upon vdefs defined
2382 if (local_mem_dependence (def1
, bb_for_def1
)
2383 || local_mem_dependence (def2
, bb_for_def2
))
2386 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2387 bb0. We hoist the first one first so that a cache miss is handled
2388 efficiently regardless of hardware cache-fill policy. */
2389 gsi2
= gsi_for_stmt (def1
);
2390 gsi_move_to_bb_end (&gsi2
, bb0
);
2391 gsi2
= gsi_for_stmt (def2
);
2392 gsi_move_to_bb_end (&gsi2
, bb0
);
2394 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2397 "\nHoisting adjacent loads from %d and %d into %d: \n",
2398 bb_for_def1
->index
, bb_for_def2
->index
, bb0
->index
);
2399 print_gimple_stmt (dump_file
, def1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2400 print_gimple_stmt (dump_file
, def2
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2405 /* Determine whether we should attempt to hoist adjacent loads out of
2406 diamond patterns in pass_phiopt. Always hoist loads if
2407 -fhoist-adjacent-loads is specified and the target machine has
2408 both a conditional move instruction and a defined cache line size. */
2411 gate_hoist_loads (void)
2413 return (flag_hoist_adjacent_loads
== 1
2414 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE
)
2415 && HAVE_conditional_move
);
2418 /* This pass tries to replaces an if-then-else block with an
2419 assignment. We have four kinds of transformations. Some of these
2420 transformations are also performed by the ifcvt RTL optimizer.
2422 Conditional Replacement
2423 -----------------------
2425 This transformation, implemented in conditional_replacement,
2429 if (cond) goto bb2; else goto bb1;
2432 x = PHI <0 (bb1), 1 (bb0), ...>;
2440 x = PHI <x' (bb0), ...>;
2442 We remove bb1 as it becomes unreachable. This occurs often due to
2443 gimplification of conditionals.
2448 This transformation, implemented in value_replacement, replaces
2451 if (a != b) goto bb2; else goto bb1;
2454 x = PHI <a (bb1), b (bb0), ...>;
2460 x = PHI <b (bb0), ...>;
2462 This opportunity can sometimes occur as a result of other
2466 Another case caught by value replacement looks like this:
2472 if (t3 != 0) goto bb1; else goto bb2;
2488 This transformation, implemented in abs_replacement, replaces
2491 if (a >= 0) goto bb2; else goto bb1;
2495 x = PHI <x (bb1), a (bb0), ...>;
2502 x = PHI <x' (bb0), ...>;
2507 This transformation, minmax_replacement replaces
2510 if (a <= b) goto bb2; else goto bb1;
2513 x = PHI <b (bb1), a (bb0), ...>;
2518 x' = MIN_EXPR (a, b)
2520 x = PHI <x' (bb0), ...>;
2522 A similar transformation is done for MAX_EXPR.
2525 This pass also performs a fifth transformation of a slightly different
2528 Factor conversion in COND_EXPR
2529 ------------------------------
2531 This transformation factors the conversion out of COND_EXPR with
2532 factor_out_conditional_conversion.
2535 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2539 tmp = PHI <tmp, CST>
2542 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2548 Adjacent Load Hoisting
2549 ----------------------
2551 This transformation replaces
2554 if (...) goto bb2; else goto bb1;
2556 x1 = (<expr>).field1;
2559 x2 = (<expr>).field2;
2566 x1 = (<expr>).field1;
2567 x2 = (<expr>).field2;
2568 if (...) goto bb2; else goto bb1;
2575 The purpose of this transformation is to enable generation of conditional
2576 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2577 the loads is speculative, the transformation is restricted to very
2578 specific cases to avoid introducing a page fault. We are looking for
2586 where left and right are typically adjacent pointers in a tree structure. */
2590 const pass_data pass_data_phiopt
=
2592 GIMPLE_PASS
, /* type */
2593 "phiopt", /* name */
2594 OPTGROUP_NONE
, /* optinfo_flags */
2595 TV_TREE_PHIOPT
, /* tv_id */
2596 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2597 0, /* properties_provided */
2598 0, /* properties_destroyed */
2599 0, /* todo_flags_start */
2600 0, /* todo_flags_finish */
2603 class pass_phiopt
: public gimple_opt_pass
2606 pass_phiopt (gcc::context
*ctxt
)
2607 : gimple_opt_pass (pass_data_phiopt
, ctxt
)
2610 /* opt_pass methods: */
2611 opt_pass
* clone () { return new pass_phiopt (m_ctxt
); }
2612 virtual bool gate (function
*) { return flag_ssa_phiopt
; }
2613 virtual unsigned int execute (function
*)
2615 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2618 }; // class pass_phiopt
2623 make_pass_phiopt (gcc::context
*ctxt
)
2625 return new pass_phiopt (ctxt
);
2630 const pass_data pass_data_cselim
=
2632 GIMPLE_PASS
, /* type */
2633 "cselim", /* name */
2634 OPTGROUP_NONE
, /* optinfo_flags */
2635 TV_TREE_PHIOPT
, /* tv_id */
2636 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2637 0, /* properties_provided */
2638 0, /* properties_destroyed */
2639 0, /* todo_flags_start */
2640 0, /* todo_flags_finish */
2643 class pass_cselim
: public gimple_opt_pass
2646 pass_cselim (gcc::context
*ctxt
)
2647 : gimple_opt_pass (pass_data_cselim
, ctxt
)
2650 /* opt_pass methods: */
2651 virtual bool gate (function
*) { return flag_tree_cselim
; }
2652 virtual unsigned int execute (function
*) { return tree_ssa_cs_elim (); }
2654 }; // class pass_cselim
2659 make_pass_cselim (gcc::context
*ctxt
)
2661 return new pass_cselim (ctxt
);