1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "insn-codes.h"
29 #include "tree-pass.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block
, basic_block
,
51 edge
, edge
, gphi
*, tree
, tree
);
52 static gphi
*factor_out_conditional_conversion (edge
, edge
, gphi
*, tree
, tree
,
54 static int value_replacement (basic_block
, basic_block
,
55 edge
, edge
, gimple
*, tree
, tree
);
56 static bool minmax_replacement (basic_block
, basic_block
,
57 edge
, edge
, gimple
*, tree
, tree
);
58 static bool abs_replacement (basic_block
, basic_block
,
59 edge
, edge
, gimple
*, tree
, tree
);
60 static bool cond_store_replacement (basic_block
, basic_block
, edge
, edge
,
62 static bool cond_if_else_store_replacement (basic_block
, basic_block
, basic_block
);
63 static hash_set
<tree
> * get_non_trapping ();
64 static void replace_phi_edge_with_variable (basic_block
, edge
, gimple
*, tree
);
65 static void hoist_adjacent_loads (basic_block
, basic_block
,
66 basic_block
, basic_block
);
67 static bool gate_hoist_loads (void);
69 /* This pass tries to transform conditional stores into unconditional
70 ones, enabling further simplifications with the simpler then and else
71 blocks. In particular it replaces this:
74 if (cond) goto bb2; else goto bb1;
82 if (cond) goto bb1; else goto bb2;
86 condtmp = PHI <RHS, condtmp'>
89 This transformation can only be done under several constraints,
90 documented below. It also replaces:
93 if (cond) goto bb2; else goto bb1;
104 if (cond) goto bb3; else goto bb1;
107 condtmp = PHI <RHS1, RHS2>
111 tree_ssa_cs_elim (void)
114 /* ??? We are not interested in loop related info, but the following
115 will create it, ICEing as we didn't init loops with pre-headers.
116 An interfacing issue of find_data_references_in_bb. */
117 loop_optimizer_init (LOOPS_NORMAL
);
119 todo
= tree_ssa_phiopt_worker (true, false);
121 loop_optimizer_finalize ();
125 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
128 single_non_singleton_phi_for_edges (gimple_seq seq
, edge e0
, edge e1
)
130 gimple_stmt_iterator i
;
132 if (gimple_seq_singleton_p (seq
))
133 return as_a
<gphi
*> (gsi_stmt (gsi_start (seq
)));
134 for (i
= gsi_start (seq
); !gsi_end_p (i
); gsi_next (&i
))
136 gphi
*p
= as_a
<gphi
*> (gsi_stmt (i
));
137 /* If the PHI arguments are equal then we can skip this PHI. */
138 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p
, e0
->dest_idx
),
139 gimple_phi_arg_def (p
, e1
->dest_idx
)))
142 /* If we already have a PHI that has the two edge arguments are
143 different, then return it is not a singleton for these PHIs. */
152 /* The core routine of conditional store replacement and normal
153 phi optimizations. Both share much of the infrastructure in how
154 to match applicable basic block patterns. DO_STORE_ELIM is true
155 when we want to do conditional store replacement, false otherwise.
156 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
157 of diamond control flow patterns, false otherwise. */
159 tree_ssa_phiopt_worker (bool do_store_elim
, bool do_hoist_loads
)
162 basic_block
*bb_order
;
164 bool cfgchanged
= false;
165 hash_set
<tree
> *nontrap
= 0;
168 /* Calculate the set of non-trapping memory accesses. */
169 nontrap
= get_non_trapping ();
171 /* Search every basic block for COND_EXPR we may be able to optimize.
173 We walk the blocks in order that guarantees that a block with
174 a single predecessor is processed before the predecessor.
175 This ensures that we collapse inner ifs before visiting the
176 outer ones, and also that we do not try to visit a removed
178 bb_order
= single_pred_before_succ_order ();
179 n
= n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
;
181 for (i
= 0; i
< n
; i
++)
185 basic_block bb1
, bb2
;
191 cond_stmt
= last_stmt (bb
);
192 /* Check to see if the last statement is a GIMPLE_COND. */
194 || gimple_code (cond_stmt
) != GIMPLE_COND
)
197 e1
= EDGE_SUCC (bb
, 0);
199 e2
= EDGE_SUCC (bb
, 1);
202 /* We cannot do the optimization on abnormal edges. */
203 if ((e1
->flags
& EDGE_ABNORMAL
) != 0
204 || (e2
->flags
& EDGE_ABNORMAL
) != 0)
207 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
208 if (EDGE_COUNT (bb1
->succs
) == 0
210 || EDGE_COUNT (bb2
->succs
) == 0)
213 /* Find the bb which is the fall through to the other. */
214 if (EDGE_SUCC (bb1
, 0)->dest
== bb2
)
216 else if (EDGE_SUCC (bb2
, 0)->dest
== bb1
)
218 std::swap (bb1
, bb2
);
221 else if (do_store_elim
222 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
224 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
226 if (!single_succ_p (bb1
)
227 || (EDGE_SUCC (bb1
, 0)->flags
& EDGE_FALLTHRU
) == 0
228 || !single_succ_p (bb2
)
229 || (EDGE_SUCC (bb2
, 0)->flags
& EDGE_FALLTHRU
) == 0
230 || EDGE_COUNT (bb3
->preds
) != 2)
232 if (cond_if_else_store_replacement (bb1
, bb2
, bb3
))
236 else if (do_hoist_loads
237 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
239 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
241 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt
)))
242 && single_succ_p (bb1
)
243 && single_succ_p (bb2
)
244 && single_pred_p (bb1
)
245 && single_pred_p (bb2
)
246 && EDGE_COUNT (bb
->succs
) == 2
247 && EDGE_COUNT (bb3
->preds
) == 2
248 /* If one edge or the other is dominant, a conditional move
249 is likely to perform worse than the well-predicted branch. */
250 && !predictable_edge_p (EDGE_SUCC (bb
, 0))
251 && !predictable_edge_p (EDGE_SUCC (bb
, 1)))
252 hoist_adjacent_loads (bb
, bb1
, bb2
, bb3
);
258 e1
= EDGE_SUCC (bb1
, 0);
260 /* Make sure that bb1 is just a fall through. */
261 if (!single_succ_p (bb1
)
262 || (e1
->flags
& EDGE_FALLTHRU
) == 0)
265 /* Also make sure that bb1 only have one predecessor and that it
267 if (!single_pred_p (bb1
)
268 || single_pred (bb1
) != bb
)
273 /* bb1 is the middle block, bb2 the join block, bb the split block,
274 e1 the fallthrough edge from bb1 to bb2. We can't do the
275 optimization if the join block has more than two predecessors. */
276 if (EDGE_COUNT (bb2
->preds
) > 2)
278 if (cond_store_replacement (bb1
, bb2
, e1
, e2
, nontrap
))
283 gimple_seq phis
= phi_nodes (bb2
);
284 gimple_stmt_iterator gsi
;
285 bool candorest
= true;
287 /* Value replacement can work with more than one PHI
288 so try that first. */
289 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
291 phi
= as_a
<gphi
*> (gsi_stmt (gsi
));
292 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
293 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
294 if (value_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
) == 2)
305 phi
= single_non_singleton_phi_for_edges (phis
, e1
, e2
);
309 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
310 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
312 /* Something is wrong if we cannot find the arguments in the PHI
314 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
316 gphi
*newphi
= factor_out_conditional_conversion (e1
, e2
, phi
,
322 /* factor_out_conditional_conversion may create a new PHI in
323 BB2 and eliminate an existing PHI in BB2. Recompute values
324 that may be affected by that change. */
325 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
326 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
327 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
330 /* Do the replacement of conditional if it can be done. */
331 if (conditional_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
333 else if (abs_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
335 else if (minmax_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
344 /* If the CFG has changed, we should cleanup the CFG. */
345 if (cfgchanged
&& do_store_elim
)
347 /* In cond-store replacement we have added some loads on edges
348 and new VOPS (as we moved the store, and created a load). */
349 gsi_commit_edge_inserts ();
350 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
353 return TODO_cleanup_cfg
;
357 /* Replace PHI node element whose edge is E in block BB with variable NEW.
358 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
359 is known to have two edges, one of which must reach BB). */
362 replace_phi_edge_with_variable (basic_block cond_block
,
363 edge e
, gimple
*phi
, tree new_tree
)
365 basic_block bb
= gimple_bb (phi
);
366 basic_block block_to_remove
;
367 gimple_stmt_iterator gsi
;
369 /* Change the PHI argument to new. */
370 SET_USE (PHI_ARG_DEF_PTR (phi
, e
->dest_idx
), new_tree
);
372 /* Remove the empty basic block. */
373 if (EDGE_SUCC (cond_block
, 0)->dest
== bb
)
375 EDGE_SUCC (cond_block
, 0)->flags
|= EDGE_FALLTHRU
;
376 EDGE_SUCC (cond_block
, 0)->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
377 EDGE_SUCC (cond_block
, 0)->probability
= profile_probability::always ();
378 EDGE_SUCC (cond_block
, 0)->count
+= EDGE_SUCC (cond_block
, 1)->count
;
380 block_to_remove
= EDGE_SUCC (cond_block
, 1)->dest
;
384 EDGE_SUCC (cond_block
, 1)->flags
|= EDGE_FALLTHRU
;
385 EDGE_SUCC (cond_block
, 1)->flags
386 &= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
387 EDGE_SUCC (cond_block
, 1)->probability
= profile_probability::always ();
388 EDGE_SUCC (cond_block
, 1)->count
+= EDGE_SUCC (cond_block
, 0)->count
;
390 block_to_remove
= EDGE_SUCC (cond_block
, 0)->dest
;
392 delete_basic_block (block_to_remove
);
394 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
395 gsi
= gsi_last_bb (cond_block
);
396 gsi_remove (&gsi
, true);
398 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
400 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
405 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
406 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
407 to the result of PHI stmt. COND_STMT is the controlling predicate.
408 Return the newly-created PHI, if any. */
411 factor_out_conditional_conversion (edge e0
, edge e1
, gphi
*phi
,
412 tree arg0
, tree arg1
, gimple
*cond_stmt
)
414 gimple
*arg0_def_stmt
= NULL
, *arg1_def_stmt
= NULL
, *new_stmt
;
415 tree new_arg0
= NULL_TREE
, new_arg1
= NULL_TREE
;
418 gimple_stmt_iterator gsi
, gsi_for_def
;
419 source_location locus
= gimple_location (phi
);
420 enum tree_code convert_code
;
422 /* Handle only PHI statements with two arguments. TODO: If all
423 other arguments to PHI are INTEGER_CST or if their defining
424 statement have the same unary operation, we can handle more
425 than two arguments too. */
426 if (gimple_phi_num_args (phi
) != 2)
429 /* First canonicalize to simplify tests. */
430 if (TREE_CODE (arg0
) != SSA_NAME
)
432 std::swap (arg0
, arg1
);
436 if (TREE_CODE (arg0
) != SSA_NAME
437 || (TREE_CODE (arg1
) != SSA_NAME
438 && TREE_CODE (arg1
) != INTEGER_CST
))
441 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
443 arg0_def_stmt
= SSA_NAME_DEF_STMT (arg0
);
444 if (!gimple_assign_cast_p (arg0_def_stmt
))
447 /* Use the RHS as new_arg0. */
448 convert_code
= gimple_assign_rhs_code (arg0_def_stmt
);
449 new_arg0
= gimple_assign_rhs1 (arg0_def_stmt
);
450 if (convert_code
== VIEW_CONVERT_EXPR
)
452 new_arg0
= TREE_OPERAND (new_arg0
, 0);
453 if (!is_gimple_reg_type (TREE_TYPE (new_arg0
)))
457 if (TREE_CODE (arg1
) == SSA_NAME
)
459 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
461 arg1_def_stmt
= SSA_NAME_DEF_STMT (arg1
);
462 if (!is_gimple_assign (arg1_def_stmt
)
463 || gimple_assign_rhs_code (arg1_def_stmt
) != convert_code
)
466 /* Use the RHS as new_arg1. */
467 new_arg1
= gimple_assign_rhs1 (arg1_def_stmt
);
468 if (convert_code
== VIEW_CONVERT_EXPR
)
469 new_arg1
= TREE_OPERAND (new_arg1
, 0);
473 /* If arg1 is an INTEGER_CST, fold it to new type. */
474 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0
))
475 && int_fits_type_p (arg1
, TREE_TYPE (new_arg0
)))
477 if (gimple_assign_cast_p (arg0_def_stmt
))
479 /* For the INTEGER_CST case, we are just moving the
480 conversion from one place to another, which can often
481 hurt as the conversion moves further away from the
482 statement that computes the value. So, perform this
483 only if new_arg0 is an operand of COND_STMT, or
484 if arg0_def_stmt is the only non-debug stmt in
485 its basic block, because then it is possible this
486 could enable further optimizations (minmax replacement
487 etc.). See PR71016. */
488 if (new_arg0
!= gimple_cond_lhs (cond_stmt
)
489 && new_arg0
!= gimple_cond_rhs (cond_stmt
)
490 && gimple_bb (arg0_def_stmt
) == e0
->src
)
492 gsi
= gsi_for_stmt (arg0_def_stmt
);
493 gsi_prev_nondebug (&gsi
);
494 if (!gsi_end_p (gsi
))
496 gsi
= gsi_for_stmt (arg0_def_stmt
);
497 gsi_next_nondebug (&gsi
);
498 if (!gsi_end_p (gsi
))
501 new_arg1
= fold_convert (TREE_TYPE (new_arg0
), arg1
);
510 /* If arg0/arg1 have > 1 use, then this transformation actually increases
511 the number of expressions evaluated at runtime. */
512 if (!has_single_use (arg0
)
513 || (arg1_def_stmt
&& !has_single_use (arg1
)))
516 /* If types of new_arg0 and new_arg1 are different bailout. */
517 if (!types_compatible_p (TREE_TYPE (new_arg0
), TREE_TYPE (new_arg1
)))
520 /* Create a new PHI stmt. */
521 result
= PHI_RESULT (phi
);
522 temp
= make_ssa_name (TREE_TYPE (new_arg0
), NULL
);
523 newphi
= create_phi_node (temp
, gimple_bb (phi
));
525 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
527 fprintf (dump_file
, "PHI ");
528 print_generic_expr (dump_file
, gimple_phi_result (phi
));
530 " changed to factor conversion out from COND_EXPR.\n");
531 fprintf (dump_file
, "New stmt with CAST that defines ");
532 print_generic_expr (dump_file
, result
);
533 fprintf (dump_file
, ".\n");
536 /* Remove the old cast(s) that has single use. */
537 gsi_for_def
= gsi_for_stmt (arg0_def_stmt
);
538 gsi_remove (&gsi_for_def
, true);
539 release_defs (arg0_def_stmt
);
543 gsi_for_def
= gsi_for_stmt (arg1_def_stmt
);
544 gsi_remove (&gsi_for_def
, true);
545 release_defs (arg1_def_stmt
);
548 add_phi_arg (newphi
, new_arg0
, e0
, locus
);
549 add_phi_arg (newphi
, new_arg1
, e1
, locus
);
551 /* Create the conversion stmt and insert it. */
552 if (convert_code
== VIEW_CONVERT_EXPR
)
553 temp
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (result
), temp
);
554 new_stmt
= gimple_build_assign (result
, convert_code
, temp
);
555 gsi
= gsi_after_labels (gimple_bb (phi
));
556 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
558 /* Remove the original PHI stmt. */
559 gsi
= gsi_for_stmt (phi
);
560 gsi_remove (&gsi
, true);
564 /* The function conditional_replacement does the main work of doing the
565 conditional replacement. Return true if the replacement is done.
566 Otherwise return false.
567 BB is the basic block where the replacement is going to be done on. ARG0
568 is argument 0 from PHI. Likewise for ARG1. */
571 conditional_replacement (basic_block cond_bb
, basic_block middle_bb
,
572 edge e0
, edge e1
, gphi
*phi
,
573 tree arg0
, tree arg1
)
579 gimple_stmt_iterator gsi
;
580 edge true_edge
, false_edge
;
581 tree new_var
, new_var2
;
584 /* FIXME: Gimplification of complex type is too hard for now. */
585 /* We aren't prepared to handle vectors either (and it is a question
586 if it would be worthwhile anyway). */
587 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
588 || POINTER_TYPE_P (TREE_TYPE (arg0
)))
589 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
590 || POINTER_TYPE_P (TREE_TYPE (arg1
))))
593 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
594 convert it to the conditional. */
595 if ((integer_zerop (arg0
) && integer_onep (arg1
))
596 || (integer_zerop (arg1
) && integer_onep (arg0
)))
598 else if ((integer_zerop (arg0
) && integer_all_onesp (arg1
))
599 || (integer_zerop (arg1
) && integer_all_onesp (arg0
)))
604 if (!empty_block_p (middle_bb
))
607 /* At this point we know we have a GIMPLE_COND with two successors.
608 One successor is BB, the other successor is an empty block which
609 falls through into BB.
611 There is a single PHI node at the join point (BB) and its arguments
612 are constants (0, 1) or (0, -1).
614 So, given the condition COND, and the two PHI arguments, we can
615 rewrite this PHI into non-branching code:
617 dest = (COND) or dest = COND'
619 We use the condition as-is if the argument associated with the
620 true edge has the value one or the argument associated with the
621 false edge as the value zero. Note that those conditions are not
622 the same since only one of the outgoing edges from the GIMPLE_COND
623 will directly reach BB and thus be associated with an argument. */
625 stmt
= last_stmt (cond_bb
);
626 result
= PHI_RESULT (phi
);
628 /* To handle special cases like floating point comparison, it is easier and
629 less error-prone to build a tree and gimplify it on the fly though it is
631 cond
= fold_build2_loc (gimple_location (stmt
),
632 gimple_cond_code (stmt
), boolean_type_node
,
633 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
635 /* We need to know which is the true edge and which is the false
636 edge so that we know when to invert the condition below. */
637 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
638 if ((e0
== true_edge
&& integer_zerop (arg0
))
639 || (e0
== false_edge
&& !integer_zerop (arg0
))
640 || (e1
== true_edge
&& integer_zerop (arg1
))
641 || (e1
== false_edge
&& !integer_zerop (arg1
)))
642 cond
= fold_build1_loc (gimple_location (stmt
),
643 TRUTH_NOT_EXPR
, TREE_TYPE (cond
), cond
);
647 cond
= fold_convert_loc (gimple_location (stmt
),
648 TREE_TYPE (result
), cond
);
649 cond
= fold_build1_loc (gimple_location (stmt
),
650 NEGATE_EXPR
, TREE_TYPE (cond
), cond
);
653 /* Insert our new statements at the end of conditional block before the
655 gsi
= gsi_for_stmt (stmt
);
656 new_var
= force_gimple_operand_gsi (&gsi
, cond
, true, NULL
, true,
659 if (!useless_type_conversion_p (TREE_TYPE (result
), TREE_TYPE (new_var
)))
661 source_location locus_0
, locus_1
;
663 new_var2
= make_ssa_name (TREE_TYPE (result
));
664 new_stmt
= gimple_build_assign (new_var2
, CONVERT_EXPR
, new_var
);
665 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
668 /* Set the locus to the first argument, unless is doesn't have one. */
669 locus_0
= gimple_phi_arg_location (phi
, 0);
670 locus_1
= gimple_phi_arg_location (phi
, 1);
671 if (locus_0
== UNKNOWN_LOCATION
)
673 gimple_set_location (new_stmt
, locus_0
);
676 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, new_var
);
677 reset_flow_sensitive_info_in_bb (cond_bb
);
679 /* Note that we optimized this PHI. */
683 /* Update *ARG which is defined in STMT so that it contains the
684 computed value if that seems profitable. Return true if the
685 statement is made dead by that rewriting. */
688 jump_function_from_stmt (tree
*arg
, gimple
*stmt
)
690 enum tree_code code
= gimple_assign_rhs_code (stmt
);
691 if (code
== ADDR_EXPR
)
693 /* For arg = &p->i transform it to p, if possible. */
694 tree rhs1
= gimple_assign_rhs1 (stmt
);
695 HOST_WIDE_INT offset
;
696 tree tem
= get_addr_base_and_unit_offset (TREE_OPERAND (rhs1
, 0),
699 && TREE_CODE (tem
) == MEM_REF
700 && (mem_ref_offset (tem
) + offset
) == 0)
702 *arg
= TREE_OPERAND (tem
, 0);
706 /* TODO: Much like IPA-CP jump-functions we want to handle constant
707 additions symbolically here, and we'd need to update the comparison
708 code that compares the arg + cst tuples in our caller. For now the
709 code above exactly handles the VEC_BASE pattern from vec.h. */
713 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
714 of the form SSA_NAME NE 0.
716 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
717 the two input values of the EQ_EXPR match arg0 and arg1.
719 If so update *code and return TRUE. Otherwise return FALSE. */
722 rhs_is_fed_for_value_replacement (const_tree arg0
, const_tree arg1
,
723 enum tree_code
*code
, const_tree rhs
)
725 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
727 if (TREE_CODE (rhs
) == SSA_NAME
)
729 gimple
*def1
= SSA_NAME_DEF_STMT (rhs
);
731 /* Verify the defining statement has an EQ_EXPR on the RHS. */
732 if (is_gimple_assign (def1
) && gimple_assign_rhs_code (def1
) == EQ_EXPR
)
734 /* Finally verify the source operands of the EQ_EXPR are equal
736 tree op0
= gimple_assign_rhs1 (def1
);
737 tree op1
= gimple_assign_rhs2 (def1
);
738 if ((operand_equal_for_phi_arg_p (arg0
, op0
)
739 && operand_equal_for_phi_arg_p (arg1
, op1
))
740 || (operand_equal_for_phi_arg_p (arg0
, op1
)
741 && operand_equal_for_phi_arg_p (arg1
, op0
)))
743 /* We will perform the optimization. */
744 *code
= gimple_assign_rhs_code (def1
);
752 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
754 Also return TRUE if arg0/arg1 are equal to the source arguments of a
755 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
757 Return FALSE otherwise. */
760 operand_equal_for_value_replacement (const_tree arg0
, const_tree arg1
,
761 enum tree_code
*code
, gimple
*cond
)
764 tree lhs
= gimple_cond_lhs (cond
);
765 tree rhs
= gimple_cond_rhs (cond
);
767 if ((operand_equal_for_phi_arg_p (arg0
, lhs
)
768 && operand_equal_for_phi_arg_p (arg1
, rhs
))
769 || (operand_equal_for_phi_arg_p (arg1
, lhs
)
770 && operand_equal_for_phi_arg_p (arg0
, rhs
)))
773 /* Now handle more complex case where we have an EQ comparison
774 which feeds a BIT_AND_EXPR which feeds COND.
776 First verify that COND is of the form SSA_NAME NE 0. */
777 if (*code
!= NE_EXPR
|| !integer_zerop (rhs
)
778 || TREE_CODE (lhs
) != SSA_NAME
)
781 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
782 def
= SSA_NAME_DEF_STMT (lhs
);
783 if (!is_gimple_assign (def
) || gimple_assign_rhs_code (def
) != BIT_AND_EXPR
)
786 /* Now verify arg0/arg1 correspond to the source arguments of an
787 EQ comparison feeding the BIT_AND_EXPR. */
789 tree tmp
= gimple_assign_rhs1 (def
);
790 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
793 tmp
= gimple_assign_rhs2 (def
);
794 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
800 /* Returns true if ARG is a neutral element for operation CODE
801 on the RIGHT side. */
804 neutral_element_p (tree_code code
, tree arg
, bool right
)
811 return integer_zerop (arg
);
818 case POINTER_PLUS_EXPR
:
819 return right
&& integer_zerop (arg
);
822 return integer_onep (arg
);
829 return right
&& integer_onep (arg
);
832 return integer_all_onesp (arg
);
839 /* Returns true if ARG is an absorbing element for operation CODE. */
842 absorbing_element_p (tree_code code
, tree arg
, bool right
, tree rval
)
847 return integer_all_onesp (arg
);
851 return integer_zerop (arg
);
857 return !right
&& integer_zerop (arg
);
869 && integer_zerop (arg
)
870 && tree_single_nonzero_warnv_p (rval
, NULL
));
877 /* The function value_replacement does the main work of doing the value
878 replacement. Return non-zero if the replacement is done. Otherwise return
879 0. If we remove the middle basic block, return 2.
880 BB is the basic block where the replacement is going to be done on. ARG0
881 is argument 0 from the PHI. Likewise for ARG1. */
884 value_replacement (basic_block cond_bb
, basic_block middle_bb
,
885 edge e0
, edge e1
, gimple
*phi
,
886 tree arg0
, tree arg1
)
888 gimple_stmt_iterator gsi
;
890 edge true_edge
, false_edge
;
892 bool emtpy_or_with_defined_p
= true;
894 /* If the type says honor signed zeros we cannot do this
896 if (HONOR_SIGNED_ZEROS (arg1
))
899 /* If there is a statement in MIDDLE_BB that defines one of the PHI
900 arguments, then adjust arg0 or arg1. */
901 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
902 while (!gsi_end_p (gsi
))
904 gimple
*stmt
= gsi_stmt (gsi
);
906 gsi_next_nondebug (&gsi
);
907 if (!is_gimple_assign (stmt
))
909 emtpy_or_with_defined_p
= false;
912 /* Now try to adjust arg0 or arg1 according to the computation
914 lhs
= gimple_assign_lhs (stmt
);
916 && jump_function_from_stmt (&arg0
, stmt
))
918 && jump_function_from_stmt (&arg1
, stmt
)))
919 emtpy_or_with_defined_p
= false;
922 cond
= last_stmt (cond_bb
);
923 code
= gimple_cond_code (cond
);
925 /* This transformation is only valid for equality comparisons. */
926 if (code
!= NE_EXPR
&& code
!= EQ_EXPR
)
929 /* We need to know which is the true edge and which is the false
930 edge so that we know if have abs or negative abs. */
931 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
933 /* At this point we know we have a COND_EXPR with two successors.
934 One successor is BB, the other successor is an empty block which
935 falls through into BB.
937 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
939 There is a single PHI node at the join point (BB) with two arguments.
941 We now need to verify that the two arguments in the PHI node match
942 the two arguments to the equality comparison. */
944 if (operand_equal_for_value_replacement (arg0
, arg1
, &code
, cond
))
949 /* For NE_EXPR, we want to build an assignment result = arg where
950 arg is the PHI argument associated with the true edge. For
951 EQ_EXPR we want the PHI argument associated with the false edge. */
952 e
= (code
== NE_EXPR
? true_edge
: false_edge
);
954 /* Unfortunately, E may not reach BB (it may instead have gone to
955 OTHER_BLOCK). If that is the case, then we want the single outgoing
956 edge from OTHER_BLOCK which reaches BB and represents the desired
957 path from COND_BLOCK. */
958 if (e
->dest
== middle_bb
)
959 e
= single_succ_edge (e
->dest
);
961 /* Now we know the incoming edge to BB that has the argument for the
962 RHS of our new assignment statement. */
968 /* If the middle basic block was empty or is defining the
969 PHI arguments and this is a single phi where the args are different
970 for the edges e0 and e1 then we can remove the middle basic block. */
971 if (emtpy_or_with_defined_p
972 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)),
975 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, arg
);
976 /* Note that we optimized this PHI. */
981 /* Replace the PHI arguments with arg. */
982 SET_PHI_ARG_DEF (phi
, e0
->dest_idx
, arg
);
983 SET_PHI_ARG_DEF (phi
, e1
->dest_idx
, arg
);
984 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
986 fprintf (dump_file
, "PHI ");
987 print_generic_expr (dump_file
, gimple_phi_result (phi
));
988 fprintf (dump_file
, " reduced for COND_EXPR in block %d to ",
990 print_generic_expr (dump_file
, arg
);
991 fprintf (dump_file
, ".\n");
998 /* Now optimize (x != 0) ? x + y : y to just y.
999 The following condition is too restrictive, there can easily be another
1000 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
1001 gimple
*assign
= last_and_only_stmt (middle_bb
);
1002 if (!assign
|| gimple_code (assign
) != GIMPLE_ASSIGN
1003 || gimple_assign_rhs_class (assign
) != GIMPLE_BINARY_RHS
1004 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
1005 && !POINTER_TYPE_P (TREE_TYPE (arg0
))))
1008 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1009 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
1012 /* Only transform if it removes the condition. */
1013 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)), e0
, e1
))
1016 /* Size-wise, this is always profitable. */
1017 if (optimize_bb_for_speed_p (cond_bb
)
1018 /* The special case is useless if it has a low probability. */
1019 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
1020 && EDGE_PRED (middle_bb
, 0)->probability
< profile_probability::even ()
1021 /* If assign is cheap, there is no point avoiding it. */
1022 && estimate_num_insns (assign
, &eni_time_weights
)
1023 >= 3 * estimate_num_insns (cond
, &eni_time_weights
))
1026 tree lhs
= gimple_assign_lhs (assign
);
1027 tree rhs1
= gimple_assign_rhs1 (assign
);
1028 tree rhs2
= gimple_assign_rhs2 (assign
);
1029 enum tree_code code_def
= gimple_assign_rhs_code (assign
);
1030 tree cond_lhs
= gimple_cond_lhs (cond
);
1031 tree cond_rhs
= gimple_cond_rhs (cond
);
1033 if (((code
== NE_EXPR
&& e1
== false_edge
)
1034 || (code
== EQ_EXPR
&& e1
== true_edge
))
1037 && operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1038 && neutral_element_p (code_def
, cond_rhs
, true))
1040 && operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1041 && neutral_element_p (code_def
, cond_rhs
, false))
1042 || (operand_equal_for_phi_arg_p (arg1
, cond_rhs
)
1043 && ((operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1044 && absorbing_element_p (code_def
, cond_rhs
, true, rhs2
))
1045 || (operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1046 && absorbing_element_p (code_def
,
1047 cond_rhs
, false, rhs2
))))))
1049 gsi
= gsi_for_stmt (cond
);
1050 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
1052 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1060 # RANGE [0, 4294967294]
1061 u_6 = n_5 + 4294967295;
1064 # u_3 = PHI <u_6(3), 4294967295(2)> */
1065 SSA_NAME_RANGE_INFO (lhs
) = NULL
;
1066 /* If available, we can use VR of phi result at least. */
1067 tree phires
= gimple_phi_result (phi
);
1068 struct range_info_def
*phires_range_info
1069 = SSA_NAME_RANGE_INFO (phires
);
1070 if (phires_range_info
)
1071 duplicate_ssa_name_range_info (lhs
, SSA_NAME_RANGE_TYPE (phires
),
1074 gimple_stmt_iterator gsi_from
= gsi_for_stmt (assign
);
1075 gsi_move_before (&gsi_from
, &gsi
);
1076 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, lhs
);
1083 /* The function minmax_replacement does the main work of doing the minmax
1084 replacement. Return true if the replacement is done. Otherwise return
1086 BB is the basic block where the replacement is going to be done on. ARG0
1087 is argument 0 from the PHI. Likewise for ARG1. */
1090 minmax_replacement (basic_block cond_bb
, basic_block middle_bb
,
1091 edge e0
, edge e1
, gimple
*phi
,
1092 tree arg0
, tree arg1
)
1097 edge true_edge
, false_edge
;
1098 enum tree_code cmp
, minmax
, ass_code
;
1099 tree smaller
, alt_smaller
, larger
, alt_larger
, arg_true
, arg_false
;
1100 gimple_stmt_iterator gsi
, gsi_from
;
1102 type
= TREE_TYPE (PHI_RESULT (phi
));
1104 /* The optimization may be unsafe due to NaNs. */
1105 if (HONOR_NANS (type
) || HONOR_SIGNED_ZEROS (type
))
1108 cond
= as_a
<gcond
*> (last_stmt (cond_bb
));
1109 cmp
= gimple_cond_code (cond
);
1111 /* This transformation is only valid for order comparisons. Record which
1112 operand is smaller/larger if the result of the comparison is true. */
1113 alt_smaller
= NULL_TREE
;
1114 alt_larger
= NULL_TREE
;
1115 if (cmp
== LT_EXPR
|| cmp
== LE_EXPR
)
1117 smaller
= gimple_cond_lhs (cond
);
1118 larger
= gimple_cond_rhs (cond
);
1119 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1120 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1121 if (TREE_CODE (larger
) == INTEGER_CST
)
1126 wide_int alt
= wi::sub (larger
, 1, TYPE_SIGN (TREE_TYPE (larger
)),
1129 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1134 wide_int alt
= wi::add (larger
, 1, TYPE_SIGN (TREE_TYPE (larger
)),
1137 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1141 else if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
1143 smaller
= gimple_cond_rhs (cond
);
1144 larger
= gimple_cond_lhs (cond
);
1145 /* If we have larger > CST it is equivalent to larger >= CST+1.
1146 Likewise larger >= CST is equivalent to larger > CST-1. */
1147 if (TREE_CODE (smaller
) == INTEGER_CST
)
1152 wide_int alt
= wi::add (smaller
, 1, TYPE_SIGN (TREE_TYPE (smaller
)),
1155 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1160 wide_int alt
= wi::sub (smaller
, 1, TYPE_SIGN (TREE_TYPE (smaller
)),
1163 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1170 /* We need to know which is the true edge and which is the false
1171 edge so that we know if have abs or negative abs. */
1172 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1174 /* Forward the edges over the middle basic block. */
1175 if (true_edge
->dest
== middle_bb
)
1176 true_edge
= EDGE_SUCC (true_edge
->dest
, 0);
1177 if (false_edge
->dest
== middle_bb
)
1178 false_edge
= EDGE_SUCC (false_edge
->dest
, 0);
1180 if (true_edge
== e0
)
1182 gcc_assert (false_edge
== e1
);
1188 gcc_assert (false_edge
== e0
);
1189 gcc_assert (true_edge
== e1
);
1194 if (empty_block_p (middle_bb
))
1196 if ((operand_equal_for_phi_arg_p (arg_true
, smaller
)
1198 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1199 && (operand_equal_for_phi_arg_p (arg_false
, larger
)
1201 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1205 if (smaller < larger)
1211 else if ((operand_equal_for_phi_arg_p (arg_false
, smaller
)
1213 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1214 && (operand_equal_for_phi_arg_p (arg_true
, larger
)
1216 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1223 /* Recognize the following case, assuming d <= u:
1229 This is equivalent to
1234 gimple
*assign
= last_and_only_stmt (middle_bb
);
1235 tree lhs
, op0
, op1
, bound
;
1238 || gimple_code (assign
) != GIMPLE_ASSIGN
)
1241 lhs
= gimple_assign_lhs (assign
);
1242 ass_code
= gimple_assign_rhs_code (assign
);
1243 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
1245 op0
= gimple_assign_rhs1 (assign
);
1246 op1
= gimple_assign_rhs2 (assign
);
1248 if (true_edge
->src
== middle_bb
)
1250 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1251 if (!operand_equal_for_phi_arg_p (lhs
, arg_true
))
1254 if (operand_equal_for_phi_arg_p (arg_false
, larger
)
1256 && operand_equal_for_phi_arg_p (arg_false
, alt_larger
)))
1260 if (smaller < larger)
1262 r' = MAX_EXPR (smaller, bound)
1264 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1265 if (ass_code
!= MAX_EXPR
)
1269 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1271 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1273 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1275 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1280 /* We need BOUND <= LARGER. */
1281 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1285 else if (operand_equal_for_phi_arg_p (arg_false
, smaller
)
1287 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1291 if (smaller < larger)
1293 r' = MIN_EXPR (larger, bound)
1295 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1296 if (ass_code
!= MIN_EXPR
)
1300 if (operand_equal_for_phi_arg_p (op0
, larger
)
1302 && operand_equal_for_phi_arg_p (op0
, alt_larger
)))
1304 else if (operand_equal_for_phi_arg_p (op1
, larger
)
1306 && operand_equal_for_phi_arg_p (op1
, alt_larger
)))
1311 /* We need BOUND >= SMALLER. */
1312 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1321 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1322 if (!operand_equal_for_phi_arg_p (lhs
, arg_false
))
1325 if (operand_equal_for_phi_arg_p (arg_true
, larger
)
1327 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
)))
1331 if (smaller > larger)
1333 r' = MIN_EXPR (smaller, bound)
1335 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1336 if (ass_code
!= MIN_EXPR
)
1340 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1342 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1344 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1346 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1351 /* We need BOUND >= LARGER. */
1352 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1356 else if (operand_equal_for_phi_arg_p (arg_true
, smaller
)
1358 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1362 if (smaller > larger)
1364 r' = MAX_EXPR (larger, bound)
1366 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1367 if (ass_code
!= MAX_EXPR
)
1371 if (operand_equal_for_phi_arg_p (op0
, larger
))
1373 else if (operand_equal_for_phi_arg_p (op1
, larger
))
1378 /* We need BOUND <= SMALLER. */
1379 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1387 /* Move the statement from the middle block. */
1388 gsi
= gsi_last_bb (cond_bb
);
1389 gsi_from
= gsi_last_nondebug_bb (middle_bb
);
1390 gsi_move_before (&gsi_from
, &gsi
);
1393 /* Create an SSA var to hold the min/max result. If we're the only
1394 things setting the target PHI, then we can clone the PHI
1395 variable. Otherwise we must create a new one. */
1396 result
= PHI_RESULT (phi
);
1397 if (EDGE_COUNT (gimple_bb (phi
)->preds
) == 2)
1398 result
= duplicate_ssa_name (result
, NULL
);
1400 result
= make_ssa_name (TREE_TYPE (result
));
1402 /* Emit the statement to compute min/max. */
1403 new_stmt
= gimple_build_assign (result
, minmax
, arg0
, arg1
);
1404 gsi
= gsi_last_bb (cond_bb
);
1405 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1407 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1408 reset_flow_sensitive_info_in_bb (cond_bb
);
1413 /* The function absolute_replacement does the main work of doing the absolute
1414 replacement. Return true if the replacement is done. Otherwise return
1416 bb is the basic block where the replacement is going to be done on. arg0
1417 is argument 0 from the phi. Likewise for arg1. */
1420 abs_replacement (basic_block cond_bb
, basic_block middle_bb
,
1421 edge e0 ATTRIBUTE_UNUSED
, edge e1
,
1422 gimple
*phi
, tree arg0
, tree arg1
)
1427 gimple_stmt_iterator gsi
;
1428 edge true_edge
, false_edge
;
1433 enum tree_code cond_code
;
1435 /* If the type says honor signed zeros we cannot do this
1437 if (HONOR_SIGNED_ZEROS (arg1
))
1440 /* OTHER_BLOCK must have only one executable statement which must have the
1441 form arg0 = -arg1 or arg1 = -arg0. */
1443 assign
= last_and_only_stmt (middle_bb
);
1444 /* If we did not find the proper negation assignment, then we can not
1449 /* If we got here, then we have found the only executable statement
1450 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1451 arg1 = -arg0, then we can not optimize. */
1452 if (gimple_code (assign
) != GIMPLE_ASSIGN
)
1455 lhs
= gimple_assign_lhs (assign
);
1457 if (gimple_assign_rhs_code (assign
) != NEGATE_EXPR
)
1460 rhs
= gimple_assign_rhs1 (assign
);
1462 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1463 if (!(lhs
== arg0
&& rhs
== arg1
)
1464 && !(lhs
== arg1
&& rhs
== arg0
))
1467 cond
= last_stmt (cond_bb
);
1468 result
= PHI_RESULT (phi
);
1470 /* Only relationals comparing arg[01] against zero are interesting. */
1471 cond_code
= gimple_cond_code (cond
);
1472 if (cond_code
!= GT_EXPR
&& cond_code
!= GE_EXPR
1473 && cond_code
!= LT_EXPR
&& cond_code
!= LE_EXPR
)
1476 /* Make sure the conditional is arg[01] OP y. */
1477 if (gimple_cond_lhs (cond
) != rhs
)
1480 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond
)))
1481 ? real_zerop (gimple_cond_rhs (cond
))
1482 : integer_zerop (gimple_cond_rhs (cond
)))
1487 /* We need to know which is the true edge and which is the false
1488 edge so that we know if have abs or negative abs. */
1489 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1491 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1492 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1493 the false edge goes to OTHER_BLOCK. */
1494 if (cond_code
== GT_EXPR
|| cond_code
== GE_EXPR
)
1499 if (e
->dest
== middle_bb
)
1504 /* If the code negates only iff positive then make sure to not
1505 introduce undefined behavior when negating or computing the absolute.
1506 ??? We could use range info if present to check for arg1 == INT_MIN. */
1508 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
1509 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1
))))
1512 result
= duplicate_ssa_name (result
, NULL
);
1515 lhs
= make_ssa_name (TREE_TYPE (result
));
1519 /* Build the modify expression with abs expression. */
1520 new_stmt
= gimple_build_assign (lhs
, ABS_EXPR
, rhs
);
1522 gsi
= gsi_last_bb (cond_bb
);
1523 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1527 /* Get the right GSI. We want to insert after the recently
1528 added ABS_EXPR statement (which we know is the first statement
1530 new_stmt
= gimple_build_assign (result
, NEGATE_EXPR
, lhs
);
1532 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
1535 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1536 reset_flow_sensitive_info_in_bb (cond_bb
);
1538 /* Note that we optimized this PHI. */
1542 /* Auxiliary functions to determine the set of memory accesses which
1543 can't trap because they are preceded by accesses to the same memory
1544 portion. We do that for MEM_REFs, so we only need to track
1545 the SSA_NAME of the pointer indirectly referenced. The algorithm
1546 simply is a walk over all instructions in dominator order. When
1547 we see an MEM_REF we determine if we've already seen a same
1548 ref anywhere up to the root of the dominator tree. If we do the
1549 current access can't trap. If we don't see any dominating access
1550 the current access might trap, but might also make later accesses
1551 non-trapping, so we remember it. We need to be careful with loads
1552 or stores, for instance a load might not trap, while a store would,
1553 so if we see a dominating read access this doesn't mean that a later
1554 write access would not trap. Hence we also need to differentiate the
1555 type of access(es) seen.
1557 ??? We currently are very conservative and assume that a load might
1558 trap even if a store doesn't (write-only memory). This probably is
1559 overly conservative. */
1561 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1562 through it was seen, which would constitute a no-trap region for
1566 unsigned int ssa_name_ver
;
1569 HOST_WIDE_INT offset
, size
;
1573 /* Hashtable helpers. */
1575 struct ssa_names_hasher
: free_ptr_hash
<name_to_bb
>
1577 static inline hashval_t
hash (const name_to_bb
*);
1578 static inline bool equal (const name_to_bb
*, const name_to_bb
*);
1581 /* Used for quick clearing of the hash-table when we see calls.
1582 Hash entries with phase < nt_call_phase are invalid. */
1583 static unsigned int nt_call_phase
;
1585 /* The hash function. */
1588 ssa_names_hasher::hash (const name_to_bb
*n
)
1590 return n
->ssa_name_ver
^ (((hashval_t
) n
->store
) << 31)
1591 ^ (n
->offset
<< 6) ^ (n
->size
<< 3);
1594 /* The equality function of *P1 and *P2. */
1597 ssa_names_hasher::equal (const name_to_bb
*n1
, const name_to_bb
*n2
)
1599 return n1
->ssa_name_ver
== n2
->ssa_name_ver
1600 && n1
->store
== n2
->store
1601 && n1
->offset
== n2
->offset
1602 && n1
->size
== n2
->size
;
1605 class nontrapping_dom_walker
: public dom_walker
1608 nontrapping_dom_walker (cdi_direction direction
, hash_set
<tree
> *ps
)
1609 : dom_walker (direction
), m_nontrapping (ps
), m_seen_ssa_names (128) {}
1611 virtual edge
before_dom_children (basic_block
);
1612 virtual void after_dom_children (basic_block
);
1616 /* We see the expression EXP in basic block BB. If it's an interesting
1617 expression (an MEM_REF through an SSA_NAME) possibly insert the
1618 expression into the set NONTRAP or the hash table of seen expressions.
1619 STORE is true if this expression is on the LHS, otherwise it's on
1621 void add_or_mark_expr (basic_block
, tree
, bool);
1623 hash_set
<tree
> *m_nontrapping
;
1625 /* The hash table for remembering what we've seen. */
1626 hash_table
<ssa_names_hasher
> m_seen_ssa_names
;
1629 /* Called by walk_dominator_tree, when entering the block BB. */
1631 nontrapping_dom_walker::before_dom_children (basic_block bb
)
1635 gimple_stmt_iterator gsi
;
1637 /* If we haven't seen all our predecessors, clear the hash-table. */
1638 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1639 if ((((size_t)e
->src
->aux
) & 2) == 0)
1645 /* Mark this BB as being on the path to dominator root and as visited. */
1646 bb
->aux
= (void*)(1 | 2);
1648 /* And walk the statements in order. */
1649 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1651 gimple
*stmt
= gsi_stmt (gsi
);
1653 if ((gimple_code (stmt
) == GIMPLE_ASM
&& gimple_vdef (stmt
))
1654 || (is_gimple_call (stmt
)
1655 && (!nonfreeing_call_p (stmt
) || !nonbarrier_call_p (stmt
))))
1657 else if (gimple_assign_single_p (stmt
) && !gimple_has_volatile_ops (stmt
))
1659 add_or_mark_expr (bb
, gimple_assign_lhs (stmt
), true);
1660 add_or_mark_expr (bb
, gimple_assign_rhs1 (stmt
), false);
1666 /* Called by walk_dominator_tree, when basic block BB is exited. */
1668 nontrapping_dom_walker::after_dom_children (basic_block bb
)
1670 /* This BB isn't on the path to dominator root anymore. */
1674 /* We see the expression EXP in basic block BB. If it's an interesting
1675 expression (an MEM_REF through an SSA_NAME) possibly insert the
1676 expression into the set NONTRAP or the hash table of seen expressions.
1677 STORE is true if this expression is on the LHS, otherwise it's on
1680 nontrapping_dom_walker::add_or_mark_expr (basic_block bb
, tree exp
, bool store
)
1684 if (TREE_CODE (exp
) == MEM_REF
1685 && TREE_CODE (TREE_OPERAND (exp
, 0)) == SSA_NAME
1686 && tree_fits_shwi_p (TREE_OPERAND (exp
, 1))
1687 && (size
= int_size_in_bytes (TREE_TYPE (exp
))) > 0)
1689 tree name
= TREE_OPERAND (exp
, 0);
1690 struct name_to_bb map
;
1692 struct name_to_bb
*n2bb
;
1693 basic_block found_bb
= 0;
1695 /* Try to find the last seen MEM_REF through the same
1696 SSA_NAME, which can trap. */
1697 map
.ssa_name_ver
= SSA_NAME_VERSION (name
);
1701 map
.offset
= tree_to_shwi (TREE_OPERAND (exp
, 1));
1704 slot
= m_seen_ssa_names
.find_slot (&map
, INSERT
);
1706 if (n2bb
&& n2bb
->phase
>= nt_call_phase
)
1707 found_bb
= n2bb
->bb
;
1709 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1710 (it's in a basic block on the path from us to the dominator root)
1711 then we can't trap. */
1712 if (found_bb
&& (((size_t)found_bb
->aux
) & 1) == 1)
1714 m_nontrapping
->add (exp
);
1718 /* EXP might trap, so insert it into the hash table. */
1721 n2bb
->phase
= nt_call_phase
;
1726 n2bb
= XNEW (struct name_to_bb
);
1727 n2bb
->ssa_name_ver
= SSA_NAME_VERSION (name
);
1728 n2bb
->phase
= nt_call_phase
;
1730 n2bb
->store
= store
;
1731 n2bb
->offset
= map
.offset
;
1739 /* This is the entry point of gathering non trapping memory accesses.
1740 It will do a dominator walk over the whole function, and it will
1741 make use of the bb->aux pointers. It returns a set of trees
1742 (the MEM_REFs itself) which can't trap. */
1743 static hash_set
<tree
> *
1744 get_non_trapping (void)
1747 hash_set
<tree
> *nontrap
= new hash_set
<tree
>;
1748 /* We're going to do a dominator walk, so ensure that we have
1749 dominance information. */
1750 calculate_dominance_info (CDI_DOMINATORS
);
1752 nontrapping_dom_walker (CDI_DOMINATORS
, nontrap
)
1753 .walk (cfun
->cfg
->x_entry_block_ptr
);
1755 clear_aux_for_blocks ();
1759 /* Do the main work of conditional store replacement. We already know
1760 that the recognized pattern looks like so:
1763 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1766 fallthrough (edge E0)
1770 We check that MIDDLE_BB contains only one store, that that store
1771 doesn't trap (not via NOTRAP, but via checking if an access to the same
1772 memory location dominates us) and that the store has a "simple" RHS. */
1775 cond_store_replacement (basic_block middle_bb
, basic_block join_bb
,
1776 edge e0
, edge e1
, hash_set
<tree
> *nontrap
)
1778 gimple
*assign
= last_and_only_stmt (middle_bb
);
1779 tree lhs
, rhs
, name
, name2
;
1782 gimple_stmt_iterator gsi
;
1783 source_location locus
;
1785 /* Check if middle_bb contains of only one store. */
1787 || !gimple_assign_single_p (assign
)
1788 || gimple_has_volatile_ops (assign
))
1791 locus
= gimple_location (assign
);
1792 lhs
= gimple_assign_lhs (assign
);
1793 rhs
= gimple_assign_rhs1 (assign
);
1794 if (TREE_CODE (lhs
) != MEM_REF
1795 || TREE_CODE (TREE_OPERAND (lhs
, 0)) != SSA_NAME
1796 || !is_gimple_reg_type (TREE_TYPE (lhs
)))
1799 /* Prove that we can move the store down. We could also check
1800 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1801 whose value is not available readily, which we want to avoid. */
1802 if (!nontrap
->contains (lhs
))
1805 /* Now we've checked the constraints, so do the transformation:
1806 1) Remove the single store. */
1807 gsi
= gsi_for_stmt (assign
);
1808 unlink_stmt_vdef (assign
);
1809 gsi_remove (&gsi
, true);
1810 release_defs (assign
);
1812 /* 2) Insert a load from the memory of the store to the temporary
1813 on the edge which did not contain the store. */
1814 lhs
= unshare_expr (lhs
);
1815 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
1816 new_stmt
= gimple_build_assign (name
, lhs
);
1817 gimple_set_location (new_stmt
, locus
);
1818 gsi_insert_on_edge (e1
, new_stmt
);
1820 /* 3) Create a PHI node at the join block, with one argument
1821 holding the old RHS, and the other holding the temporary
1822 where we stored the old memory contents. */
1823 name2
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
1824 newphi
= create_phi_node (name2
, join_bb
);
1825 add_phi_arg (newphi
, rhs
, e0
, locus
);
1826 add_phi_arg (newphi
, name
, e1
, locus
);
1828 lhs
= unshare_expr (lhs
);
1829 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
1831 /* 4) Insert that PHI node. */
1832 gsi
= gsi_after_labels (join_bb
);
1833 if (gsi_end_p (gsi
))
1835 gsi
= gsi_last_bb (join_bb
);
1836 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
1839 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1844 /* Do the main work of conditional store replacement. */
1847 cond_if_else_store_replacement_1 (basic_block then_bb
, basic_block else_bb
,
1848 basic_block join_bb
, gimple
*then_assign
,
1849 gimple
*else_assign
)
1851 tree lhs_base
, lhs
, then_rhs
, else_rhs
, name
;
1852 source_location then_locus
, else_locus
;
1853 gimple_stmt_iterator gsi
;
1857 if (then_assign
== NULL
1858 || !gimple_assign_single_p (then_assign
)
1859 || gimple_clobber_p (then_assign
)
1860 || gimple_has_volatile_ops (then_assign
)
1861 || else_assign
== NULL
1862 || !gimple_assign_single_p (else_assign
)
1863 || gimple_clobber_p (else_assign
)
1864 || gimple_has_volatile_ops (else_assign
))
1867 lhs
= gimple_assign_lhs (then_assign
);
1868 if (!is_gimple_reg_type (TREE_TYPE (lhs
))
1869 || !operand_equal_p (lhs
, gimple_assign_lhs (else_assign
), 0))
1872 lhs_base
= get_base_address (lhs
);
1873 if (lhs_base
== NULL_TREE
1874 || (!DECL_P (lhs_base
) && TREE_CODE (lhs_base
) != MEM_REF
))
1877 then_rhs
= gimple_assign_rhs1 (then_assign
);
1878 else_rhs
= gimple_assign_rhs1 (else_assign
);
1879 then_locus
= gimple_location (then_assign
);
1880 else_locus
= gimple_location (else_assign
);
1882 /* Now we've checked the constraints, so do the transformation:
1883 1) Remove the stores. */
1884 gsi
= gsi_for_stmt (then_assign
);
1885 unlink_stmt_vdef (then_assign
);
1886 gsi_remove (&gsi
, true);
1887 release_defs (then_assign
);
1889 gsi
= gsi_for_stmt (else_assign
);
1890 unlink_stmt_vdef (else_assign
);
1891 gsi_remove (&gsi
, true);
1892 release_defs (else_assign
);
1894 /* 2) Create a PHI node at the join block, with one argument
1895 holding the old RHS, and the other holding the temporary
1896 where we stored the old memory contents. */
1897 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
1898 newphi
= create_phi_node (name
, join_bb
);
1899 add_phi_arg (newphi
, then_rhs
, EDGE_SUCC (then_bb
, 0), then_locus
);
1900 add_phi_arg (newphi
, else_rhs
, EDGE_SUCC (else_bb
, 0), else_locus
);
1902 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
1904 /* 3) Insert that PHI node. */
1905 gsi
= gsi_after_labels (join_bb
);
1906 if (gsi_end_p (gsi
))
1908 gsi
= gsi_last_bb (join_bb
);
1909 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
1912 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1917 /* Conditional store replacement. We already know
1918 that the recognized pattern looks like so:
1921 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1931 fallthrough (edge E0)
1935 We check that it is safe to sink the store to JOIN_BB by verifying that
1936 there are no read-after-write or write-after-write dependencies in
1937 THEN_BB and ELSE_BB. */
1940 cond_if_else_store_replacement (basic_block then_bb
, basic_block else_bb
,
1941 basic_block join_bb
)
1943 gimple
*then_assign
= last_and_only_stmt (then_bb
);
1944 gimple
*else_assign
= last_and_only_stmt (else_bb
);
1945 vec
<data_reference_p
> then_datarefs
, else_datarefs
;
1946 vec
<ddr_p
> then_ddrs
, else_ddrs
;
1947 gimple
*then_store
, *else_store
;
1948 bool found
, ok
= false, res
;
1949 struct data_dependence_relation
*ddr
;
1950 data_reference_p then_dr
, else_dr
;
1952 tree then_lhs
, else_lhs
;
1953 basic_block blocks
[3];
1955 if (MAX_STORES_TO_SINK
== 0)
1958 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1959 if (then_assign
&& else_assign
)
1960 return cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
1961 then_assign
, else_assign
);
1963 /* Find data references. */
1964 then_datarefs
.create (1);
1965 else_datarefs
.create (1);
1966 if ((find_data_references_in_bb (NULL
, then_bb
, &then_datarefs
)
1968 || !then_datarefs
.length ()
1969 || (find_data_references_in_bb (NULL
, else_bb
, &else_datarefs
)
1971 || !else_datarefs
.length ())
1973 free_data_refs (then_datarefs
);
1974 free_data_refs (else_datarefs
);
1978 /* Find pairs of stores with equal LHS. */
1979 auto_vec
<gimple
*, 1> then_stores
, else_stores
;
1980 FOR_EACH_VEC_ELT (then_datarefs
, i
, then_dr
)
1982 if (DR_IS_READ (then_dr
))
1985 then_store
= DR_STMT (then_dr
);
1986 then_lhs
= gimple_get_lhs (then_store
);
1987 if (then_lhs
== NULL_TREE
)
1991 FOR_EACH_VEC_ELT (else_datarefs
, j
, else_dr
)
1993 if (DR_IS_READ (else_dr
))
1996 else_store
= DR_STMT (else_dr
);
1997 else_lhs
= gimple_get_lhs (else_store
);
1998 if (else_lhs
== NULL_TREE
)
2001 if (operand_equal_p (then_lhs
, else_lhs
, 0))
2011 then_stores
.safe_push (then_store
);
2012 else_stores
.safe_push (else_store
);
2015 /* No pairs of stores found. */
2016 if (!then_stores
.length ()
2017 || then_stores
.length () > (unsigned) MAX_STORES_TO_SINK
)
2019 free_data_refs (then_datarefs
);
2020 free_data_refs (else_datarefs
);
2024 /* Compute and check data dependencies in both basic blocks. */
2025 then_ddrs
.create (1);
2026 else_ddrs
.create (1);
2027 if (!compute_all_dependences (then_datarefs
, &then_ddrs
,
2029 || !compute_all_dependences (else_datarefs
, &else_ddrs
,
2032 free_dependence_relations (then_ddrs
);
2033 free_dependence_relations (else_ddrs
);
2034 free_data_refs (then_datarefs
);
2035 free_data_refs (else_datarefs
);
2038 blocks
[0] = then_bb
;
2039 blocks
[1] = else_bb
;
2040 blocks
[2] = join_bb
;
2041 renumber_gimple_stmt_uids_in_blocks (blocks
, 3);
2043 /* Check that there are no read-after-write or write-after-write dependencies
2045 FOR_EACH_VEC_ELT (then_ddrs
, i
, ddr
)
2047 struct data_reference
*dra
= DDR_A (ddr
);
2048 struct data_reference
*drb
= DDR_B (ddr
);
2050 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2051 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2052 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2053 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2054 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2055 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2057 free_dependence_relations (then_ddrs
);
2058 free_dependence_relations (else_ddrs
);
2059 free_data_refs (then_datarefs
);
2060 free_data_refs (else_datarefs
);
2065 /* Check that there are no read-after-write or write-after-write dependencies
2067 FOR_EACH_VEC_ELT (else_ddrs
, i
, ddr
)
2069 struct data_reference
*dra
= DDR_A (ddr
);
2070 struct data_reference
*drb
= DDR_B (ddr
);
2072 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2073 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2074 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2075 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2076 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2077 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2079 free_dependence_relations (then_ddrs
);
2080 free_dependence_relations (else_ddrs
);
2081 free_data_refs (then_datarefs
);
2082 free_data_refs (else_datarefs
);
2087 /* Sink stores with same LHS. */
2088 FOR_EACH_VEC_ELT (then_stores
, i
, then_store
)
2090 else_store
= else_stores
[i
];
2091 res
= cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2092 then_store
, else_store
);
2096 free_dependence_relations (then_ddrs
);
2097 free_dependence_relations (else_ddrs
);
2098 free_data_refs (then_datarefs
);
2099 free_data_refs (else_datarefs
);
2104 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2107 local_mem_dependence (gimple
*stmt
, basic_block bb
)
2109 tree vuse
= gimple_vuse (stmt
);
2115 def
= SSA_NAME_DEF_STMT (vuse
);
2116 return (def
&& gimple_bb (def
) == bb
);
2119 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2120 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2121 and BB3 rejoins control flow following BB1 and BB2, look for
2122 opportunities to hoist loads as follows. If BB3 contains a PHI of
2123 two loads, one each occurring in BB1 and BB2, and the loads are
2124 provably of adjacent fields in the same structure, then move both
2125 loads into BB0. Of course this can only be done if there are no
2126 dependencies preventing such motion.
2128 One of the hoisted loads will always be speculative, so the
2129 transformation is currently conservative:
2131 - The fields must be strictly adjacent.
2132 - The two fields must occupy a single memory block that is
2133 guaranteed to not cross a page boundary.
2135 The last is difficult to prove, as such memory blocks should be
2136 aligned on the minimum of the stack alignment boundary and the
2137 alignment guaranteed by heap allocation interfaces. Thus we rely
2138 on a parameter for the alignment value.
2140 Provided a good value is used for the last case, the first
2141 restriction could possibly be relaxed. */
2144 hoist_adjacent_loads (basic_block bb0
, basic_block bb1
,
2145 basic_block bb2
, basic_block bb3
)
2147 int param_align
= PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE
);
2148 unsigned param_align_bits
= (unsigned) (param_align
* BITS_PER_UNIT
);
2151 /* Walk the phis in bb3 looking for an opportunity. We are looking
2152 for phis of two SSA names, one each of which is defined in bb1 and
2154 for (gsi
= gsi_start_phis (bb3
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2156 gphi
*phi_stmt
= gsi
.phi ();
2157 gimple
*def1
, *def2
;
2158 tree arg1
, arg2
, ref1
, ref2
, field1
, field2
;
2159 tree tree_offset1
, tree_offset2
, tree_size2
, next
;
2160 int offset1
, offset2
, size2
;
2162 gimple_stmt_iterator gsi2
;
2163 basic_block bb_for_def1
, bb_for_def2
;
2165 if (gimple_phi_num_args (phi_stmt
) != 2
2166 || virtual_operand_p (gimple_phi_result (phi_stmt
)))
2169 arg1
= gimple_phi_arg_def (phi_stmt
, 0);
2170 arg2
= gimple_phi_arg_def (phi_stmt
, 1);
2172 if (TREE_CODE (arg1
) != SSA_NAME
2173 || TREE_CODE (arg2
) != SSA_NAME
2174 || SSA_NAME_IS_DEFAULT_DEF (arg1
)
2175 || SSA_NAME_IS_DEFAULT_DEF (arg2
))
2178 def1
= SSA_NAME_DEF_STMT (arg1
);
2179 def2
= SSA_NAME_DEF_STMT (arg2
);
2181 if ((gimple_bb (def1
) != bb1
|| gimple_bb (def2
) != bb2
)
2182 && (gimple_bb (def2
) != bb1
|| gimple_bb (def1
) != bb2
))
2185 /* Check the mode of the arguments to be sure a conditional move
2186 can be generated for it. */
2187 if (optab_handler (movcc_optab
, TYPE_MODE (TREE_TYPE (arg1
)))
2188 == CODE_FOR_nothing
)
2191 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2192 if (!gimple_assign_single_p (def1
)
2193 || !gimple_assign_single_p (def2
)
2194 || gimple_has_volatile_ops (def1
)
2195 || gimple_has_volatile_ops (def2
))
2198 ref1
= gimple_assign_rhs1 (def1
);
2199 ref2
= gimple_assign_rhs1 (def2
);
2201 if (TREE_CODE (ref1
) != COMPONENT_REF
2202 || TREE_CODE (ref2
) != COMPONENT_REF
)
2205 /* The zeroth operand of the two component references must be
2206 identical. It is not sufficient to compare get_base_address of
2207 the two references, because this could allow for different
2208 elements of the same array in the two trees. It is not safe to
2209 assume that the existence of one array element implies the
2210 existence of a different one. */
2211 if (!operand_equal_p (TREE_OPERAND (ref1
, 0), TREE_OPERAND (ref2
, 0), 0))
2214 field1
= TREE_OPERAND (ref1
, 1);
2215 field2
= TREE_OPERAND (ref2
, 1);
2217 /* Check for field adjacency, and ensure field1 comes first. */
2218 for (next
= DECL_CHAIN (field1
);
2219 next
&& TREE_CODE (next
) != FIELD_DECL
;
2220 next
= DECL_CHAIN (next
))
2225 for (next
= DECL_CHAIN (field2
);
2226 next
&& TREE_CODE (next
) != FIELD_DECL
;
2227 next
= DECL_CHAIN (next
))
2233 std::swap (field1
, field2
);
2234 std::swap (def1
, def2
);
2237 bb_for_def1
= gimple_bb (def1
);
2238 bb_for_def2
= gimple_bb (def2
);
2240 /* Check for proper alignment of the first field. */
2241 tree_offset1
= bit_position (field1
);
2242 tree_offset2
= bit_position (field2
);
2243 tree_size2
= DECL_SIZE (field2
);
2245 if (!tree_fits_uhwi_p (tree_offset1
)
2246 || !tree_fits_uhwi_p (tree_offset2
)
2247 || !tree_fits_uhwi_p (tree_size2
))
2250 offset1
= tree_to_uhwi (tree_offset1
);
2251 offset2
= tree_to_uhwi (tree_offset2
);
2252 size2
= tree_to_uhwi (tree_size2
);
2253 align1
= DECL_ALIGN (field1
) % param_align_bits
;
2255 if (offset1
% BITS_PER_UNIT
!= 0)
2258 /* For profitability, the two field references should fit within
2259 a single cache line. */
2260 if (align1
+ offset2
- offset1
+ size2
> param_align_bits
)
2263 /* The two expressions cannot be dependent upon vdefs defined
2265 if (local_mem_dependence (def1
, bb_for_def1
)
2266 || local_mem_dependence (def2
, bb_for_def2
))
2269 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2270 bb0. We hoist the first one first so that a cache miss is handled
2271 efficiently regardless of hardware cache-fill policy. */
2272 gsi2
= gsi_for_stmt (def1
);
2273 gsi_move_to_bb_end (&gsi2
, bb0
);
2274 gsi2
= gsi_for_stmt (def2
);
2275 gsi_move_to_bb_end (&gsi2
, bb0
);
2277 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2280 "\nHoisting adjacent loads from %d and %d into %d: \n",
2281 bb_for_def1
->index
, bb_for_def2
->index
, bb0
->index
);
2282 print_gimple_stmt (dump_file
, def1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2283 print_gimple_stmt (dump_file
, def2
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2288 /* Determine whether we should attempt to hoist adjacent loads out of
2289 diamond patterns in pass_phiopt. Always hoist loads if
2290 -fhoist-adjacent-loads is specified and the target machine has
2291 both a conditional move instruction and a defined cache line size. */
2294 gate_hoist_loads (void)
2296 return (flag_hoist_adjacent_loads
== 1
2297 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE
)
2298 && HAVE_conditional_move
);
2301 /* This pass tries to replaces an if-then-else block with an
2302 assignment. We have four kinds of transformations. Some of these
2303 transformations are also performed by the ifcvt RTL optimizer.
2305 Conditional Replacement
2306 -----------------------
2308 This transformation, implemented in conditional_replacement,
2312 if (cond) goto bb2; else goto bb1;
2315 x = PHI <0 (bb1), 1 (bb0), ...>;
2323 x = PHI <x' (bb0), ...>;
2325 We remove bb1 as it becomes unreachable. This occurs often due to
2326 gimplification of conditionals.
2331 This transformation, implemented in value_replacement, replaces
2334 if (a != b) goto bb2; else goto bb1;
2337 x = PHI <a (bb1), b (bb0), ...>;
2343 x = PHI <b (bb0), ...>;
2345 This opportunity can sometimes occur as a result of other
2349 Another case caught by value replacement looks like this:
2355 if (t3 != 0) goto bb1; else goto bb2;
2371 This transformation, implemented in abs_replacement, replaces
2374 if (a >= 0) goto bb2; else goto bb1;
2378 x = PHI <x (bb1), a (bb0), ...>;
2385 x = PHI <x' (bb0), ...>;
2390 This transformation, minmax_replacement replaces
2393 if (a <= b) goto bb2; else goto bb1;
2396 x = PHI <b (bb1), a (bb0), ...>;
2401 x' = MIN_EXPR (a, b)
2403 x = PHI <x' (bb0), ...>;
2405 A similar transformation is done for MAX_EXPR.
2408 This pass also performs a fifth transformation of a slightly different
2411 Factor conversion in COND_EXPR
2412 ------------------------------
2414 This transformation factors the conversion out of COND_EXPR with
2415 factor_out_conditional_conversion.
2418 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2422 tmp = PHI <tmp, CST>
2425 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2431 Adjacent Load Hoisting
2432 ----------------------
2434 This transformation replaces
2437 if (...) goto bb2; else goto bb1;
2439 x1 = (<expr>).field1;
2442 x2 = (<expr>).field2;
2449 x1 = (<expr>).field1;
2450 x2 = (<expr>).field2;
2451 if (...) goto bb2; else goto bb1;
2458 The purpose of this transformation is to enable generation of conditional
2459 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2460 the loads is speculative, the transformation is restricted to very
2461 specific cases to avoid introducing a page fault. We are looking for
2469 where left and right are typically adjacent pointers in a tree structure. */
2473 const pass_data pass_data_phiopt
=
2475 GIMPLE_PASS
, /* type */
2476 "phiopt", /* name */
2477 OPTGROUP_NONE
, /* optinfo_flags */
2478 TV_TREE_PHIOPT
, /* tv_id */
2479 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2480 0, /* properties_provided */
2481 0, /* properties_destroyed */
2482 0, /* todo_flags_start */
2483 0, /* todo_flags_finish */
2486 class pass_phiopt
: public gimple_opt_pass
2489 pass_phiopt (gcc::context
*ctxt
)
2490 : gimple_opt_pass (pass_data_phiopt
, ctxt
)
2493 /* opt_pass methods: */
2494 opt_pass
* clone () { return new pass_phiopt (m_ctxt
); }
2495 virtual bool gate (function
*) { return flag_ssa_phiopt
; }
2496 virtual unsigned int execute (function
*)
2498 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2501 }; // class pass_phiopt
2506 make_pass_phiopt (gcc::context
*ctxt
)
2508 return new pass_phiopt (ctxt
);
2513 const pass_data pass_data_cselim
=
2515 GIMPLE_PASS
, /* type */
2516 "cselim", /* name */
2517 OPTGROUP_NONE
, /* optinfo_flags */
2518 TV_TREE_PHIOPT
, /* tv_id */
2519 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2520 0, /* properties_provided */
2521 0, /* properties_destroyed */
2522 0, /* todo_flags_start */
2523 0, /* todo_flags_finish */
2526 class pass_cselim
: public gimple_opt_pass
2529 pass_cselim (gcc::context
*ctxt
)
2530 : gimple_opt_pass (pass_data_cselim
, ctxt
)
2533 /* opt_pass methods: */
2534 virtual bool gate (function
*) { return flag_tree_cselim
; }
2535 virtual unsigned int execute (function
*) { return tree_ssa_cs_elim (); }
2537 }; // class pass_cselim
2542 make_pass_cselim (gcc::context
*ctxt
)
2544 return new pass_cselim (ctxt
);