1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "insn-codes.h"
29 #include "tree-pass.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
50 static unsigned int tree_ssa_phiopt_worker (bool, bool);
51 static bool conditional_replacement (basic_block
, basic_block
,
52 edge
, edge
, gphi
*, tree
, tree
);
53 static gphi
*factor_out_conditional_conversion (edge
, edge
, gphi
*, tree
, tree
,
55 static int value_replacement (basic_block
, basic_block
,
56 edge
, edge
, gimple
*, tree
, tree
);
57 static bool minmax_replacement (basic_block
, basic_block
,
58 edge
, edge
, gimple
*, tree
, tree
);
59 static bool abs_replacement (basic_block
, basic_block
,
60 edge
, edge
, gimple
*, tree
, tree
);
61 static bool cond_removal_in_popcount_pattern (basic_block
, basic_block
,
62 edge
, edge
, gimple
*, tree
, tree
);
63 static bool cond_store_replacement (basic_block
, basic_block
, edge
, edge
,
65 static bool cond_if_else_store_replacement (basic_block
, basic_block
, basic_block
);
66 static hash_set
<tree
> * get_non_trapping ();
67 static void replace_phi_edge_with_variable (basic_block
, edge
, gimple
*, tree
);
68 static void hoist_adjacent_loads (basic_block
, basic_block
,
69 basic_block
, basic_block
);
70 static bool gate_hoist_loads (void);
72 /* This pass tries to transform conditional stores into unconditional
73 ones, enabling further simplifications with the simpler then and else
74 blocks. In particular it replaces this:
77 if (cond) goto bb2; else goto bb1;
85 if (cond) goto bb1; else goto bb2;
89 condtmp = PHI <RHS, condtmp'>
92 This transformation can only be done under several constraints,
93 documented below. It also replaces:
96 if (cond) goto bb2; else goto bb1;
107 if (cond) goto bb3; else goto bb1;
110 condtmp = PHI <RHS1, RHS2>
114 tree_ssa_cs_elim (void)
117 /* ??? We are not interested in loop related info, but the following
118 will create it, ICEing as we didn't init loops with pre-headers.
119 An interfacing issue of find_data_references_in_bb. */
120 loop_optimizer_init (LOOPS_NORMAL
);
122 todo
= tree_ssa_phiopt_worker (true, false);
124 loop_optimizer_finalize ();
128 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
131 single_non_singleton_phi_for_edges (gimple_seq seq
, edge e0
, edge e1
)
133 gimple_stmt_iterator i
;
135 if (gimple_seq_singleton_p (seq
))
136 return as_a
<gphi
*> (gsi_stmt (gsi_start (seq
)));
137 for (i
= gsi_start (seq
); !gsi_end_p (i
); gsi_next (&i
))
139 gphi
*p
= as_a
<gphi
*> (gsi_stmt (i
));
140 /* If the PHI arguments are equal then we can skip this PHI. */
141 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p
, e0
->dest_idx
),
142 gimple_phi_arg_def (p
, e1
->dest_idx
)))
145 /* If we already have a PHI that has the two edge arguments are
146 different, then return it is not a singleton for these PHIs. */
155 /* The core routine of conditional store replacement and normal
156 phi optimizations. Both share much of the infrastructure in how
157 to match applicable basic block patterns. DO_STORE_ELIM is true
158 when we want to do conditional store replacement, false otherwise.
159 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
160 of diamond control flow patterns, false otherwise. */
162 tree_ssa_phiopt_worker (bool do_store_elim
, bool do_hoist_loads
)
165 basic_block
*bb_order
;
167 bool cfgchanged
= false;
168 hash_set
<tree
> *nontrap
= 0;
171 /* Calculate the set of non-trapping memory accesses. */
172 nontrap
= get_non_trapping ();
174 /* Search every basic block for COND_EXPR we may be able to optimize.
176 We walk the blocks in order that guarantees that a block with
177 a single predecessor is processed before the predecessor.
178 This ensures that we collapse inner ifs before visiting the
179 outer ones, and also that we do not try to visit a removed
181 bb_order
= single_pred_before_succ_order ();
182 n
= n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
;
184 for (i
= 0; i
< n
; i
++)
188 basic_block bb1
, bb2
;
194 cond_stmt
= last_stmt (bb
);
195 /* Check to see if the last statement is a GIMPLE_COND. */
197 || gimple_code (cond_stmt
) != GIMPLE_COND
)
200 e1
= EDGE_SUCC (bb
, 0);
202 e2
= EDGE_SUCC (bb
, 1);
205 /* We cannot do the optimization on abnormal edges. */
206 if ((e1
->flags
& EDGE_ABNORMAL
) != 0
207 || (e2
->flags
& EDGE_ABNORMAL
) != 0)
210 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
211 if (EDGE_COUNT (bb1
->succs
) == 0
213 || EDGE_COUNT (bb2
->succs
) == 0)
216 /* Find the bb which is the fall through to the other. */
217 if (EDGE_SUCC (bb1
, 0)->dest
== bb2
)
219 else if (EDGE_SUCC (bb2
, 0)->dest
== bb1
)
221 std::swap (bb1
, bb2
);
224 else if (do_store_elim
225 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
227 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
229 if (!single_succ_p (bb1
)
230 || (EDGE_SUCC (bb1
, 0)->flags
& EDGE_FALLTHRU
) == 0
231 || !single_succ_p (bb2
)
232 || (EDGE_SUCC (bb2
, 0)->flags
& EDGE_FALLTHRU
) == 0
233 || EDGE_COUNT (bb3
->preds
) != 2)
235 if (cond_if_else_store_replacement (bb1
, bb2
, bb3
))
239 else if (do_hoist_loads
240 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
242 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
244 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt
)))
245 && single_succ_p (bb1
)
246 && single_succ_p (bb2
)
247 && single_pred_p (bb1
)
248 && single_pred_p (bb2
)
249 && EDGE_COUNT (bb
->succs
) == 2
250 && EDGE_COUNT (bb3
->preds
) == 2
251 /* If one edge or the other is dominant, a conditional move
252 is likely to perform worse than the well-predicted branch. */
253 && !predictable_edge_p (EDGE_SUCC (bb
, 0))
254 && !predictable_edge_p (EDGE_SUCC (bb
, 1)))
255 hoist_adjacent_loads (bb
, bb1
, bb2
, bb3
);
261 e1
= EDGE_SUCC (bb1
, 0);
263 /* Make sure that bb1 is just a fall through. */
264 if (!single_succ_p (bb1
)
265 || (e1
->flags
& EDGE_FALLTHRU
) == 0)
268 /* Also make sure that bb1 only have one predecessor and that it
270 if (!single_pred_p (bb1
)
271 || single_pred (bb1
) != bb
)
276 /* bb1 is the middle block, bb2 the join block, bb the split block,
277 e1 the fallthrough edge from bb1 to bb2. We can't do the
278 optimization if the join block has more than two predecessors. */
279 if (EDGE_COUNT (bb2
->preds
) > 2)
281 if (cond_store_replacement (bb1
, bb2
, e1
, e2
, nontrap
))
286 gimple_seq phis
= phi_nodes (bb2
);
287 gimple_stmt_iterator gsi
;
288 bool candorest
= true;
290 /* Value replacement can work with more than one PHI
291 so try that first. */
292 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
294 phi
= as_a
<gphi
*> (gsi_stmt (gsi
));
295 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
296 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
297 if (value_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
) == 2)
308 phi
= single_non_singleton_phi_for_edges (phis
, e1
, e2
);
312 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
313 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
315 /* Something is wrong if we cannot find the arguments in the PHI
317 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
319 gphi
*newphi
= factor_out_conditional_conversion (e1
, e2
, phi
,
325 /* factor_out_conditional_conversion may create a new PHI in
326 BB2 and eliminate an existing PHI in BB2. Recompute values
327 that may be affected by that change. */
328 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
329 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
330 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
333 /* Do the replacement of conditional if it can be done. */
334 if (conditional_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
336 else if (abs_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
338 else if (cond_removal_in_popcount_pattern (bb
, bb1
, e1
, e2
,
341 else if (minmax_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
350 /* If the CFG has changed, we should cleanup the CFG. */
351 if (cfgchanged
&& do_store_elim
)
353 /* In cond-store replacement we have added some loads on edges
354 and new VOPS (as we moved the store, and created a load). */
355 gsi_commit_edge_inserts ();
356 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
359 return TODO_cleanup_cfg
;
363 /* Replace PHI node element whose edge is E in block BB with variable NEW.
364 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
365 is known to have two edges, one of which must reach BB). */
368 replace_phi_edge_with_variable (basic_block cond_block
,
369 edge e
, gimple
*phi
, tree new_tree
)
371 basic_block bb
= gimple_bb (phi
);
372 basic_block block_to_remove
;
373 gimple_stmt_iterator gsi
;
375 /* Change the PHI argument to new. */
376 SET_USE (PHI_ARG_DEF_PTR (phi
, e
->dest_idx
), new_tree
);
378 /* Remove the empty basic block. */
379 if (EDGE_SUCC (cond_block
, 0)->dest
== bb
)
381 EDGE_SUCC (cond_block
, 0)->flags
|= EDGE_FALLTHRU
;
382 EDGE_SUCC (cond_block
, 0)->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
383 EDGE_SUCC (cond_block
, 0)->probability
= profile_probability::always ();
385 block_to_remove
= EDGE_SUCC (cond_block
, 1)->dest
;
389 EDGE_SUCC (cond_block
, 1)->flags
|= EDGE_FALLTHRU
;
390 EDGE_SUCC (cond_block
, 1)->flags
391 &= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
392 EDGE_SUCC (cond_block
, 1)->probability
= profile_probability::always ();
394 block_to_remove
= EDGE_SUCC (cond_block
, 0)->dest
;
396 delete_basic_block (block_to_remove
);
398 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
399 gsi
= gsi_last_bb (cond_block
);
400 gsi_remove (&gsi
, true);
402 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
404 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
409 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
410 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
411 to the result of PHI stmt. COND_STMT is the controlling predicate.
412 Return the newly-created PHI, if any. */
415 factor_out_conditional_conversion (edge e0
, edge e1
, gphi
*phi
,
416 tree arg0
, tree arg1
, gimple
*cond_stmt
)
418 gimple
*arg0_def_stmt
= NULL
, *arg1_def_stmt
= NULL
, *new_stmt
;
419 tree new_arg0
= NULL_TREE
, new_arg1
= NULL_TREE
;
422 gimple_stmt_iterator gsi
, gsi_for_def
;
423 source_location locus
= gimple_location (phi
);
424 enum tree_code convert_code
;
426 /* Handle only PHI statements with two arguments. TODO: If all
427 other arguments to PHI are INTEGER_CST or if their defining
428 statement have the same unary operation, we can handle more
429 than two arguments too. */
430 if (gimple_phi_num_args (phi
) != 2)
433 /* First canonicalize to simplify tests. */
434 if (TREE_CODE (arg0
) != SSA_NAME
)
436 std::swap (arg0
, arg1
);
440 if (TREE_CODE (arg0
) != SSA_NAME
441 || (TREE_CODE (arg1
) != SSA_NAME
442 && TREE_CODE (arg1
) != INTEGER_CST
))
445 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
447 arg0_def_stmt
= SSA_NAME_DEF_STMT (arg0
);
448 if (!gimple_assign_cast_p (arg0_def_stmt
))
451 /* Use the RHS as new_arg0. */
452 convert_code
= gimple_assign_rhs_code (arg0_def_stmt
);
453 new_arg0
= gimple_assign_rhs1 (arg0_def_stmt
);
454 if (convert_code
== VIEW_CONVERT_EXPR
)
456 new_arg0
= TREE_OPERAND (new_arg0
, 0);
457 if (!is_gimple_reg_type (TREE_TYPE (new_arg0
)))
461 if (TREE_CODE (arg1
) == SSA_NAME
)
463 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
465 arg1_def_stmt
= SSA_NAME_DEF_STMT (arg1
);
466 if (!is_gimple_assign (arg1_def_stmt
)
467 || gimple_assign_rhs_code (arg1_def_stmt
) != convert_code
)
470 /* Use the RHS as new_arg1. */
471 new_arg1
= gimple_assign_rhs1 (arg1_def_stmt
);
472 if (convert_code
== VIEW_CONVERT_EXPR
)
473 new_arg1
= TREE_OPERAND (new_arg1
, 0);
477 /* If arg1 is an INTEGER_CST, fold it to new type. */
478 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0
))
479 && int_fits_type_p (arg1
, TREE_TYPE (new_arg0
)))
481 if (gimple_assign_cast_p (arg0_def_stmt
))
483 /* For the INTEGER_CST case, we are just moving the
484 conversion from one place to another, which can often
485 hurt as the conversion moves further away from the
486 statement that computes the value. So, perform this
487 only if new_arg0 is an operand of COND_STMT, or
488 if arg0_def_stmt is the only non-debug stmt in
489 its basic block, because then it is possible this
490 could enable further optimizations (minmax replacement
491 etc.). See PR71016. */
492 if (new_arg0
!= gimple_cond_lhs (cond_stmt
)
493 && new_arg0
!= gimple_cond_rhs (cond_stmt
)
494 && gimple_bb (arg0_def_stmt
) == e0
->src
)
496 gsi
= gsi_for_stmt (arg0_def_stmt
);
497 gsi_prev_nondebug (&gsi
);
498 if (!gsi_end_p (gsi
))
500 gsi
= gsi_for_stmt (arg0_def_stmt
);
501 gsi_next_nondebug (&gsi
);
502 if (!gsi_end_p (gsi
))
505 new_arg1
= fold_convert (TREE_TYPE (new_arg0
), arg1
);
514 /* If arg0/arg1 have > 1 use, then this transformation actually increases
515 the number of expressions evaluated at runtime. */
516 if (!has_single_use (arg0
)
517 || (arg1_def_stmt
&& !has_single_use (arg1
)))
520 /* If types of new_arg0 and new_arg1 are different bailout. */
521 if (!types_compatible_p (TREE_TYPE (new_arg0
), TREE_TYPE (new_arg1
)))
524 /* Create a new PHI stmt. */
525 result
= PHI_RESULT (phi
);
526 temp
= make_ssa_name (TREE_TYPE (new_arg0
), NULL
);
527 newphi
= create_phi_node (temp
, gimple_bb (phi
));
529 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
531 fprintf (dump_file
, "PHI ");
532 print_generic_expr (dump_file
, gimple_phi_result (phi
));
534 " changed to factor conversion out from COND_EXPR.\n");
535 fprintf (dump_file
, "New stmt with CAST that defines ");
536 print_generic_expr (dump_file
, result
);
537 fprintf (dump_file
, ".\n");
540 /* Remove the old cast(s) that has single use. */
541 gsi_for_def
= gsi_for_stmt (arg0_def_stmt
);
542 gsi_remove (&gsi_for_def
, true);
543 release_defs (arg0_def_stmt
);
547 gsi_for_def
= gsi_for_stmt (arg1_def_stmt
);
548 gsi_remove (&gsi_for_def
, true);
549 release_defs (arg1_def_stmt
);
552 add_phi_arg (newphi
, new_arg0
, e0
, locus
);
553 add_phi_arg (newphi
, new_arg1
, e1
, locus
);
555 /* Create the conversion stmt and insert it. */
556 if (convert_code
== VIEW_CONVERT_EXPR
)
558 temp
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (result
), temp
);
559 new_stmt
= gimple_build_assign (result
, temp
);
562 new_stmt
= gimple_build_assign (result
, convert_code
, temp
);
563 gsi
= gsi_after_labels (gimple_bb (phi
));
564 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
566 /* Remove the original PHI stmt. */
567 gsi
= gsi_for_stmt (phi
);
568 gsi_remove (&gsi
, true);
572 /* The function conditional_replacement does the main work of doing the
573 conditional replacement. Return true if the replacement is done.
574 Otherwise return false.
575 BB is the basic block where the replacement is going to be done on. ARG0
576 is argument 0 from PHI. Likewise for ARG1. */
579 conditional_replacement (basic_block cond_bb
, basic_block middle_bb
,
580 edge e0
, edge e1
, gphi
*phi
,
581 tree arg0
, tree arg1
)
587 gimple_stmt_iterator gsi
;
588 edge true_edge
, false_edge
;
589 tree new_var
, new_var2
;
592 /* FIXME: Gimplification of complex type is too hard for now. */
593 /* We aren't prepared to handle vectors either (and it is a question
594 if it would be worthwhile anyway). */
595 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
596 || POINTER_TYPE_P (TREE_TYPE (arg0
)))
597 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
598 || POINTER_TYPE_P (TREE_TYPE (arg1
))))
601 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
602 convert it to the conditional. */
603 if ((integer_zerop (arg0
) && integer_onep (arg1
))
604 || (integer_zerop (arg1
) && integer_onep (arg0
)))
606 else if ((integer_zerop (arg0
) && integer_all_onesp (arg1
))
607 || (integer_zerop (arg1
) && integer_all_onesp (arg0
)))
612 if (!empty_block_p (middle_bb
))
615 /* At this point we know we have a GIMPLE_COND with two successors.
616 One successor is BB, the other successor is an empty block which
617 falls through into BB.
619 There is a single PHI node at the join point (BB) and its arguments
620 are constants (0, 1) or (0, -1).
622 So, given the condition COND, and the two PHI arguments, we can
623 rewrite this PHI into non-branching code:
625 dest = (COND) or dest = COND'
627 We use the condition as-is if the argument associated with the
628 true edge has the value one or the argument associated with the
629 false edge as the value zero. Note that those conditions are not
630 the same since only one of the outgoing edges from the GIMPLE_COND
631 will directly reach BB and thus be associated with an argument. */
633 stmt
= last_stmt (cond_bb
);
634 result
= PHI_RESULT (phi
);
636 /* To handle special cases like floating point comparison, it is easier and
637 less error-prone to build a tree and gimplify it on the fly though it is
639 cond
= fold_build2_loc (gimple_location (stmt
),
640 gimple_cond_code (stmt
), boolean_type_node
,
641 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
643 /* We need to know which is the true edge and which is the false
644 edge so that we know when to invert the condition below. */
645 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
646 if ((e0
== true_edge
&& integer_zerop (arg0
))
647 || (e0
== false_edge
&& !integer_zerop (arg0
))
648 || (e1
== true_edge
&& integer_zerop (arg1
))
649 || (e1
== false_edge
&& !integer_zerop (arg1
)))
650 cond
= fold_build1_loc (gimple_location (stmt
),
651 TRUTH_NOT_EXPR
, TREE_TYPE (cond
), cond
);
655 cond
= fold_convert_loc (gimple_location (stmt
),
656 TREE_TYPE (result
), cond
);
657 cond
= fold_build1_loc (gimple_location (stmt
),
658 NEGATE_EXPR
, TREE_TYPE (cond
), cond
);
661 /* Insert our new statements at the end of conditional block before the
663 gsi
= gsi_for_stmt (stmt
);
664 new_var
= force_gimple_operand_gsi (&gsi
, cond
, true, NULL
, true,
667 if (!useless_type_conversion_p (TREE_TYPE (result
), TREE_TYPE (new_var
)))
669 source_location locus_0
, locus_1
;
671 new_var2
= make_ssa_name (TREE_TYPE (result
));
672 new_stmt
= gimple_build_assign (new_var2
, CONVERT_EXPR
, new_var
);
673 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
676 /* Set the locus to the first argument, unless is doesn't have one. */
677 locus_0
= gimple_phi_arg_location (phi
, 0);
678 locus_1
= gimple_phi_arg_location (phi
, 1);
679 if (locus_0
== UNKNOWN_LOCATION
)
681 gimple_set_location (new_stmt
, locus_0
);
684 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, new_var
);
686 /* Note that we optimized this PHI. */
690 /* Update *ARG which is defined in STMT so that it contains the
691 computed value if that seems profitable. Return true if the
692 statement is made dead by that rewriting. */
695 jump_function_from_stmt (tree
*arg
, gimple
*stmt
)
697 enum tree_code code
= gimple_assign_rhs_code (stmt
);
698 if (code
== ADDR_EXPR
)
700 /* For arg = &p->i transform it to p, if possible. */
701 tree rhs1
= gimple_assign_rhs1 (stmt
);
703 tree tem
= get_addr_base_and_unit_offset (TREE_OPERAND (rhs1
, 0),
706 && TREE_CODE (tem
) == MEM_REF
707 && known_eq (mem_ref_offset (tem
) + offset
, 0))
709 *arg
= TREE_OPERAND (tem
, 0);
713 /* TODO: Much like IPA-CP jump-functions we want to handle constant
714 additions symbolically here, and we'd need to update the comparison
715 code that compares the arg + cst tuples in our caller. For now the
716 code above exactly handles the VEC_BASE pattern from vec.h. */
720 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
721 of the form SSA_NAME NE 0.
723 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
724 the two input values of the EQ_EXPR match arg0 and arg1.
726 If so update *code and return TRUE. Otherwise return FALSE. */
729 rhs_is_fed_for_value_replacement (const_tree arg0
, const_tree arg1
,
730 enum tree_code
*code
, const_tree rhs
)
732 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
734 if (TREE_CODE (rhs
) == SSA_NAME
)
736 gimple
*def1
= SSA_NAME_DEF_STMT (rhs
);
738 /* Verify the defining statement has an EQ_EXPR on the RHS. */
739 if (is_gimple_assign (def1
) && gimple_assign_rhs_code (def1
) == EQ_EXPR
)
741 /* Finally verify the source operands of the EQ_EXPR are equal
743 tree op0
= gimple_assign_rhs1 (def1
);
744 tree op1
= gimple_assign_rhs2 (def1
);
745 if ((operand_equal_for_phi_arg_p (arg0
, op0
)
746 && operand_equal_for_phi_arg_p (arg1
, op1
))
747 || (operand_equal_for_phi_arg_p (arg0
, op1
)
748 && operand_equal_for_phi_arg_p (arg1
, op0
)))
750 /* We will perform the optimization. */
751 *code
= gimple_assign_rhs_code (def1
);
759 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
761 Also return TRUE if arg0/arg1 are equal to the source arguments of a
762 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
764 Return FALSE otherwise. */
767 operand_equal_for_value_replacement (const_tree arg0
, const_tree arg1
,
768 enum tree_code
*code
, gimple
*cond
)
771 tree lhs
= gimple_cond_lhs (cond
);
772 tree rhs
= gimple_cond_rhs (cond
);
774 if ((operand_equal_for_phi_arg_p (arg0
, lhs
)
775 && operand_equal_for_phi_arg_p (arg1
, rhs
))
776 || (operand_equal_for_phi_arg_p (arg1
, lhs
)
777 && operand_equal_for_phi_arg_p (arg0
, rhs
)))
780 /* Now handle more complex case where we have an EQ comparison
781 which feeds a BIT_AND_EXPR which feeds COND.
783 First verify that COND is of the form SSA_NAME NE 0. */
784 if (*code
!= NE_EXPR
|| !integer_zerop (rhs
)
785 || TREE_CODE (lhs
) != SSA_NAME
)
788 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
789 def
= SSA_NAME_DEF_STMT (lhs
);
790 if (!is_gimple_assign (def
) || gimple_assign_rhs_code (def
) != BIT_AND_EXPR
)
793 /* Now verify arg0/arg1 correspond to the source arguments of an
794 EQ comparison feeding the BIT_AND_EXPR. */
796 tree tmp
= gimple_assign_rhs1 (def
);
797 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
800 tmp
= gimple_assign_rhs2 (def
);
801 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
807 /* Returns true if ARG is a neutral element for operation CODE
808 on the RIGHT side. */
811 neutral_element_p (tree_code code
, tree arg
, bool right
)
818 return integer_zerop (arg
);
825 case POINTER_PLUS_EXPR
:
826 return right
&& integer_zerop (arg
);
829 return integer_onep (arg
);
836 return right
&& integer_onep (arg
);
839 return integer_all_onesp (arg
);
846 /* Returns true if ARG is an absorbing element for operation CODE. */
849 absorbing_element_p (tree_code code
, tree arg
, bool right
, tree rval
)
854 return integer_all_onesp (arg
);
858 return integer_zerop (arg
);
864 return !right
&& integer_zerop (arg
);
876 && integer_zerop (arg
)
877 && tree_single_nonzero_warnv_p (rval
, NULL
));
884 /* The function value_replacement does the main work of doing the value
885 replacement. Return non-zero if the replacement is done. Otherwise return
886 0. If we remove the middle basic block, return 2.
887 BB is the basic block where the replacement is going to be done on. ARG0
888 is argument 0 from the PHI. Likewise for ARG1. */
891 value_replacement (basic_block cond_bb
, basic_block middle_bb
,
892 edge e0
, edge e1
, gimple
*phi
,
893 tree arg0
, tree arg1
)
895 gimple_stmt_iterator gsi
;
897 edge true_edge
, false_edge
;
899 bool emtpy_or_with_defined_p
= true;
901 /* If the type says honor signed zeros we cannot do this
903 if (HONOR_SIGNED_ZEROS (arg1
))
906 /* If there is a statement in MIDDLE_BB that defines one of the PHI
907 arguments, then adjust arg0 or arg1. */
908 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
909 while (!gsi_end_p (gsi
))
911 gimple
*stmt
= gsi_stmt (gsi
);
913 gsi_next_nondebug (&gsi
);
914 if (!is_gimple_assign (stmt
))
916 emtpy_or_with_defined_p
= false;
919 /* Now try to adjust arg0 or arg1 according to the computation
921 lhs
= gimple_assign_lhs (stmt
);
923 && jump_function_from_stmt (&arg0
, stmt
))
925 && jump_function_from_stmt (&arg1
, stmt
)))
926 emtpy_or_with_defined_p
= false;
929 cond
= last_stmt (cond_bb
);
930 code
= gimple_cond_code (cond
);
932 /* This transformation is only valid for equality comparisons. */
933 if (code
!= NE_EXPR
&& code
!= EQ_EXPR
)
936 /* We need to know which is the true edge and which is the false
937 edge so that we know if have abs or negative abs. */
938 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
940 /* At this point we know we have a COND_EXPR with two successors.
941 One successor is BB, the other successor is an empty block which
942 falls through into BB.
944 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
946 There is a single PHI node at the join point (BB) with two arguments.
948 We now need to verify that the two arguments in the PHI node match
949 the two arguments to the equality comparison. */
951 if (operand_equal_for_value_replacement (arg0
, arg1
, &code
, cond
))
956 /* For NE_EXPR, we want to build an assignment result = arg where
957 arg is the PHI argument associated with the true edge. For
958 EQ_EXPR we want the PHI argument associated with the false edge. */
959 e
= (code
== NE_EXPR
? true_edge
: false_edge
);
961 /* Unfortunately, E may not reach BB (it may instead have gone to
962 OTHER_BLOCK). If that is the case, then we want the single outgoing
963 edge from OTHER_BLOCK which reaches BB and represents the desired
964 path from COND_BLOCK. */
965 if (e
->dest
== middle_bb
)
966 e
= single_succ_edge (e
->dest
);
968 /* Now we know the incoming edge to BB that has the argument for the
969 RHS of our new assignment statement. */
975 /* If the middle basic block was empty or is defining the
976 PHI arguments and this is a single phi where the args are different
977 for the edges e0 and e1 then we can remove the middle basic block. */
978 if (emtpy_or_with_defined_p
979 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)),
982 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, arg
);
983 /* Note that we optimized this PHI. */
988 /* Replace the PHI arguments with arg. */
989 SET_PHI_ARG_DEF (phi
, e0
->dest_idx
, arg
);
990 SET_PHI_ARG_DEF (phi
, e1
->dest_idx
, arg
);
991 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
993 fprintf (dump_file
, "PHI ");
994 print_generic_expr (dump_file
, gimple_phi_result (phi
));
995 fprintf (dump_file
, " reduced for COND_EXPR in block %d to ",
997 print_generic_expr (dump_file
, arg
);
998 fprintf (dump_file
, ".\n");
1005 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1006 gsi
= gsi_last_nondebug_bb (middle_bb
);
1007 if (gsi_end_p (gsi
))
1010 gimple
*assign
= gsi_stmt (gsi
);
1011 if (!is_gimple_assign (assign
)
1012 || gimple_assign_rhs_class (assign
) != GIMPLE_BINARY_RHS
1013 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
1014 && !POINTER_TYPE_P (TREE_TYPE (arg0
))))
1017 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1018 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
1021 /* Allow up to 2 cheap preparation statements that prepare argument
1029 iftmp.0_6 = x_5(D) r<< _1;
1031 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1042 # _2 = PHI <x_5(D)(2), _6(3)> */
1043 gimple
*prep_stmt
[2] = { NULL
, NULL
};
1045 for (prep_cnt
= 0; ; prep_cnt
++)
1047 gsi_prev_nondebug (&gsi
);
1048 if (gsi_end_p (gsi
))
1051 gimple
*g
= gsi_stmt (gsi
);
1052 if (gimple_code (g
) == GIMPLE_LABEL
)
1055 if (prep_cnt
== 2 || !is_gimple_assign (g
))
1058 tree lhs
= gimple_assign_lhs (g
);
1059 tree rhs1
= gimple_assign_rhs1 (g
);
1060 use_operand_p use_p
;
1062 if (TREE_CODE (lhs
) != SSA_NAME
1063 || TREE_CODE (rhs1
) != SSA_NAME
1064 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1065 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1066 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
1067 || use_stmt
!= (prep_cnt
? prep_stmt
[prep_cnt
- 1] : assign
))
1069 switch (gimple_assign_rhs_code (g
))
1077 if (TREE_CODE (gimple_assign_rhs2 (g
)) != INTEGER_CST
)
1083 prep_stmt
[prep_cnt
] = g
;
1086 /* Only transform if it removes the condition. */
1087 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)), e0
, e1
))
1090 /* Size-wise, this is always profitable. */
1091 if (optimize_bb_for_speed_p (cond_bb
)
1092 /* The special case is useless if it has a low probability. */
1093 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
1094 && EDGE_PRED (middle_bb
, 0)->probability
< profile_probability::even ()
1095 /* If assign is cheap, there is no point avoiding it. */
1096 && estimate_num_insns (bb_seq (middle_bb
), &eni_time_weights
)
1097 >= 3 * estimate_num_insns (cond
, &eni_time_weights
))
1100 tree lhs
= gimple_assign_lhs (assign
);
1101 tree rhs1
= gimple_assign_rhs1 (assign
);
1102 tree rhs2
= gimple_assign_rhs2 (assign
);
1103 enum tree_code code_def
= gimple_assign_rhs_code (assign
);
1104 tree cond_lhs
= gimple_cond_lhs (cond
);
1105 tree cond_rhs
= gimple_cond_rhs (cond
);
1107 /* Propagate the cond_rhs constant through preparation stmts,
1108 make sure UB isn't invoked while doing that. */
1109 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1111 gimple
*g
= prep_stmt
[i
];
1112 tree grhs1
= gimple_assign_rhs1 (g
);
1113 if (!operand_equal_for_phi_arg_p (cond_lhs
, grhs1
))
1115 cond_lhs
= gimple_assign_lhs (g
);
1116 cond_rhs
= fold_convert (TREE_TYPE (grhs1
), cond_rhs
);
1117 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1118 || TREE_OVERFLOW (cond_rhs
))
1120 if (gimple_assign_rhs_class (g
) == GIMPLE_BINARY_RHS
)
1122 cond_rhs
= int_const_binop (gimple_assign_rhs_code (g
), cond_rhs
,
1123 gimple_assign_rhs2 (g
));
1124 if (TREE_OVERFLOW (cond_rhs
))
1127 cond_rhs
= fold_convert (TREE_TYPE (cond_lhs
), cond_rhs
);
1128 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1129 || TREE_OVERFLOW (cond_rhs
))
1133 if (((code
== NE_EXPR
&& e1
== false_edge
)
1134 || (code
== EQ_EXPR
&& e1
== true_edge
))
1137 && operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1138 && neutral_element_p (code_def
, cond_rhs
, true))
1140 && operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1141 && neutral_element_p (code_def
, cond_rhs
, false))
1142 || (operand_equal_for_phi_arg_p (arg1
, cond_rhs
)
1143 && ((operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1144 && absorbing_element_p (code_def
, cond_rhs
, true, rhs2
))
1145 || (operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1146 && absorbing_element_p (code_def
,
1147 cond_rhs
, false, rhs2
))))))
1149 gsi
= gsi_for_stmt (cond
);
1150 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1158 # RANGE [0, 4294967294]
1159 u_6 = n_5 + 4294967295;
1162 # u_3 = PHI <u_6(3), 4294967295(2)> */
1163 reset_flow_sensitive_info (lhs
);
1164 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
1166 /* If available, we can use VR of phi result at least. */
1167 tree phires
= gimple_phi_result (phi
);
1168 struct range_info_def
*phires_range_info
1169 = SSA_NAME_RANGE_INFO (phires
);
1170 if (phires_range_info
)
1171 duplicate_ssa_name_range_info (lhs
, SSA_NAME_RANGE_TYPE (phires
),
1174 gimple_stmt_iterator gsi_from
;
1175 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1177 tree plhs
= gimple_assign_lhs (prep_stmt
[i
]);
1178 reset_flow_sensitive_info (plhs
);
1179 gsi_from
= gsi_for_stmt (prep_stmt
[i
]);
1180 gsi_move_before (&gsi_from
, &gsi
);
1182 gsi_from
= gsi_for_stmt (assign
);
1183 gsi_move_before (&gsi_from
, &gsi
);
1184 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, lhs
);
1191 /* The function minmax_replacement does the main work of doing the minmax
1192 replacement. Return true if the replacement is done. Otherwise return
1194 BB is the basic block where the replacement is going to be done on. ARG0
1195 is argument 0 from the PHI. Likewise for ARG1. */
1198 minmax_replacement (basic_block cond_bb
, basic_block middle_bb
,
1199 edge e0
, edge e1
, gimple
*phi
,
1200 tree arg0
, tree arg1
)
1205 edge true_edge
, false_edge
;
1206 enum tree_code cmp
, minmax
, ass_code
;
1207 tree smaller
, alt_smaller
, larger
, alt_larger
, arg_true
, arg_false
;
1208 gimple_stmt_iterator gsi
, gsi_from
;
1210 type
= TREE_TYPE (PHI_RESULT (phi
));
1212 /* The optimization may be unsafe due to NaNs. */
1213 if (HONOR_NANS (type
) || HONOR_SIGNED_ZEROS (type
))
1216 cond
= as_a
<gcond
*> (last_stmt (cond_bb
));
1217 cmp
= gimple_cond_code (cond
);
1219 /* This transformation is only valid for order comparisons. Record which
1220 operand is smaller/larger if the result of the comparison is true. */
1221 alt_smaller
= NULL_TREE
;
1222 alt_larger
= NULL_TREE
;
1223 if (cmp
== LT_EXPR
|| cmp
== LE_EXPR
)
1225 smaller
= gimple_cond_lhs (cond
);
1226 larger
= gimple_cond_rhs (cond
);
1227 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1228 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1229 if (TREE_CODE (larger
) == INTEGER_CST
)
1233 wi::overflow_type overflow
;
1234 wide_int alt
= wi::sub (wi::to_wide (larger
), 1,
1235 TYPE_SIGN (TREE_TYPE (larger
)),
1238 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1242 wi::overflow_type overflow
;
1243 wide_int alt
= wi::add (wi::to_wide (larger
), 1,
1244 TYPE_SIGN (TREE_TYPE (larger
)),
1247 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1251 else if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
1253 smaller
= gimple_cond_rhs (cond
);
1254 larger
= gimple_cond_lhs (cond
);
1255 /* If we have larger > CST it is equivalent to larger >= CST+1.
1256 Likewise larger >= CST is equivalent to larger > CST-1. */
1257 if (TREE_CODE (smaller
) == INTEGER_CST
)
1259 wi::overflow_type overflow
;
1262 wide_int alt
= wi::add (wi::to_wide (smaller
), 1,
1263 TYPE_SIGN (TREE_TYPE (smaller
)),
1266 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1270 wide_int alt
= wi::sub (wi::to_wide (smaller
), 1,
1271 TYPE_SIGN (TREE_TYPE (smaller
)),
1274 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1281 /* We need to know which is the true edge and which is the false
1282 edge so that we know if have abs or negative abs. */
1283 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1285 /* Forward the edges over the middle basic block. */
1286 if (true_edge
->dest
== middle_bb
)
1287 true_edge
= EDGE_SUCC (true_edge
->dest
, 0);
1288 if (false_edge
->dest
== middle_bb
)
1289 false_edge
= EDGE_SUCC (false_edge
->dest
, 0);
1291 if (true_edge
== e0
)
1293 gcc_assert (false_edge
== e1
);
1299 gcc_assert (false_edge
== e0
);
1300 gcc_assert (true_edge
== e1
);
1305 if (empty_block_p (middle_bb
))
1307 if ((operand_equal_for_phi_arg_p (arg_true
, smaller
)
1309 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1310 && (operand_equal_for_phi_arg_p (arg_false
, larger
)
1312 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1316 if (smaller < larger)
1322 else if ((operand_equal_for_phi_arg_p (arg_false
, smaller
)
1324 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1325 && (operand_equal_for_phi_arg_p (arg_true
, larger
)
1327 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1334 /* Recognize the following case, assuming d <= u:
1340 This is equivalent to
1345 gimple
*assign
= last_and_only_stmt (middle_bb
);
1346 tree lhs
, op0
, op1
, bound
;
1349 || gimple_code (assign
) != GIMPLE_ASSIGN
)
1352 lhs
= gimple_assign_lhs (assign
);
1353 ass_code
= gimple_assign_rhs_code (assign
);
1354 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
1356 op0
= gimple_assign_rhs1 (assign
);
1357 op1
= gimple_assign_rhs2 (assign
);
1359 if (true_edge
->src
== middle_bb
)
1361 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1362 if (!operand_equal_for_phi_arg_p (lhs
, arg_true
))
1365 if (operand_equal_for_phi_arg_p (arg_false
, larger
)
1367 && operand_equal_for_phi_arg_p (arg_false
, alt_larger
)))
1371 if (smaller < larger)
1373 r' = MAX_EXPR (smaller, bound)
1375 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1376 if (ass_code
!= MAX_EXPR
)
1380 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1382 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1384 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1386 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1391 /* We need BOUND <= LARGER. */
1392 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1396 else if (operand_equal_for_phi_arg_p (arg_false
, smaller
)
1398 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1402 if (smaller < larger)
1404 r' = MIN_EXPR (larger, bound)
1406 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1407 if (ass_code
!= MIN_EXPR
)
1411 if (operand_equal_for_phi_arg_p (op0
, larger
)
1413 && operand_equal_for_phi_arg_p (op0
, alt_larger
)))
1415 else if (operand_equal_for_phi_arg_p (op1
, larger
)
1417 && operand_equal_for_phi_arg_p (op1
, alt_larger
)))
1422 /* We need BOUND >= SMALLER. */
1423 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1432 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1433 if (!operand_equal_for_phi_arg_p (lhs
, arg_false
))
1436 if (operand_equal_for_phi_arg_p (arg_true
, larger
)
1438 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
)))
1442 if (smaller > larger)
1444 r' = MIN_EXPR (smaller, bound)
1446 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1447 if (ass_code
!= MIN_EXPR
)
1451 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1453 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1455 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1457 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1462 /* We need BOUND >= LARGER. */
1463 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1467 else if (operand_equal_for_phi_arg_p (arg_true
, smaller
)
1469 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1473 if (smaller > larger)
1475 r' = MAX_EXPR (larger, bound)
1477 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1478 if (ass_code
!= MAX_EXPR
)
1482 if (operand_equal_for_phi_arg_p (op0
, larger
))
1484 else if (operand_equal_for_phi_arg_p (op1
, larger
))
1489 /* We need BOUND <= SMALLER. */
1490 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1498 /* Move the statement from the middle block. */
1499 gsi
= gsi_last_bb (cond_bb
);
1500 gsi_from
= gsi_last_nondebug_bb (middle_bb
);
1501 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from
),
1503 gsi_move_before (&gsi_from
, &gsi
);
1506 /* Create an SSA var to hold the min/max result. If we're the only
1507 things setting the target PHI, then we can clone the PHI
1508 variable. Otherwise we must create a new one. */
1509 result
= PHI_RESULT (phi
);
1510 if (EDGE_COUNT (gimple_bb (phi
)->preds
) == 2)
1511 result
= duplicate_ssa_name (result
, NULL
);
1513 result
= make_ssa_name (TREE_TYPE (result
));
1515 /* Emit the statement to compute min/max. */
1516 new_stmt
= gimple_build_assign (result
, minmax
, arg0
, arg1
);
1517 gsi
= gsi_last_bb (cond_bb
);
1518 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1520 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1534 _2 = (unsigned long) b_4(D);
1535 _9 = __builtin_popcountl (_2);
1537 _9 = __builtin_popcountl (b_4(D));
1540 c_12 = PHI <0(2), _9(3)>
1544 _2 = (unsigned long) b_4(D);
1545 _9 = __builtin_popcountl (_2);
1547 _9 = __builtin_popcountl (b_4(D));
1554 cond_removal_in_popcount_pattern (basic_block cond_bb
, basic_block middle_bb
,
1556 gimple
*phi
, tree arg0
, tree arg1
)
1559 gimple_stmt_iterator gsi
, gsi_from
;
1561 gimple
*cast
= NULL
;
1565 _2 = (unsigned long) b_4(D);
1566 _9 = __builtin_popcountl (_2);
1568 _9 = __builtin_popcountl (b_4(D));
1569 are the only stmts in the middle_bb. */
1571 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
1572 if (gsi_end_p (gsi
))
1574 cast
= gsi_stmt (gsi
);
1575 gsi_next_nondebug (&gsi
);
1576 if (!gsi_end_p (gsi
))
1578 popcount
= gsi_stmt (gsi
);
1579 gsi_next_nondebug (&gsi
);
1580 if (!gsi_end_p (gsi
))
1589 /* Check that we have a popcount builtin. */
1590 if (!is_gimple_call (popcount
))
1592 combined_fn cfn
= gimple_call_combined_fn (popcount
);
1601 arg
= gimple_call_arg (popcount
, 0);
1602 lhs
= gimple_get_lhs (popcount
);
1606 /* We have a cast stmt feeding popcount builtin. */
1607 /* Check that we have a cast prior to that. */
1608 if (gimple_code (cast
) != GIMPLE_ASSIGN
1609 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast
)))
1611 /* Result of the cast stmt is the argument to the builtin. */
1612 if (arg
!= gimple_assign_lhs (cast
))
1614 arg
= gimple_assign_rhs1 (cast
);
1618 if (e2
->flags
& EDGE_TRUE_VALUE
)
1620 std::swap (arg0
, arg1
);
1624 /* Check PHI arguments. */
1625 if (lhs
!= arg0
|| !integer_zerop (arg1
))
1628 cond
= last_stmt (cond_bb
);
1630 /* Cond_bb has a check for b_4 != 0 before calling the popcount
1632 if (gimple_code (cond
) != GIMPLE_COND
1633 || gimple_cond_code (cond
) != NE_EXPR
1634 || !integer_zerop (gimple_cond_rhs (cond
))
1635 || arg
!= gimple_cond_lhs (cond
))
1638 /* And insert the popcount builtin and cast stmt before the cond_bb. */
1639 gsi
= gsi_last_bb (cond_bb
);
1642 gsi_from
= gsi_for_stmt (cast
);
1643 gsi_move_before (&gsi_from
, &gsi
);
1644 reset_flow_sensitive_info (gimple_get_lhs (cast
));
1646 gsi_from
= gsi_for_stmt (popcount
);
1647 gsi_move_before (&gsi_from
, &gsi
);
1648 reset_flow_sensitive_info (gimple_get_lhs (popcount
));
1650 /* Now update the PHI and remove unneeded bbs. */
1651 replace_phi_edge_with_variable (cond_bb
, e2
, phi
, lhs
);
1655 /* The function absolute_replacement does the main work of doing the absolute
1656 replacement. Return true if the replacement is done. Otherwise return
1658 bb is the basic block where the replacement is going to be done on. arg0
1659 is argument 0 from the phi. Likewise for arg1. */
1662 abs_replacement (basic_block cond_bb
, basic_block middle_bb
,
1663 edge e0 ATTRIBUTE_UNUSED
, edge e1
,
1664 gimple
*phi
, tree arg0
, tree arg1
)
1669 gimple_stmt_iterator gsi
;
1670 edge true_edge
, false_edge
;
1675 enum tree_code cond_code
;
1677 /* If the type says honor signed zeros we cannot do this
1679 if (HONOR_SIGNED_ZEROS (arg1
))
1682 /* OTHER_BLOCK must have only one executable statement which must have the
1683 form arg0 = -arg1 or arg1 = -arg0. */
1685 assign
= last_and_only_stmt (middle_bb
);
1686 /* If we did not find the proper negation assignment, then we can not
1691 /* If we got here, then we have found the only executable statement
1692 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1693 arg1 = -arg0, then we can not optimize. */
1694 if (gimple_code (assign
) != GIMPLE_ASSIGN
)
1697 lhs
= gimple_assign_lhs (assign
);
1699 if (gimple_assign_rhs_code (assign
) != NEGATE_EXPR
)
1702 rhs
= gimple_assign_rhs1 (assign
);
1704 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1705 if (!(lhs
== arg0
&& rhs
== arg1
)
1706 && !(lhs
== arg1
&& rhs
== arg0
))
1709 cond
= last_stmt (cond_bb
);
1710 result
= PHI_RESULT (phi
);
1712 /* Only relationals comparing arg[01] against zero are interesting. */
1713 cond_code
= gimple_cond_code (cond
);
1714 if (cond_code
!= GT_EXPR
&& cond_code
!= GE_EXPR
1715 && cond_code
!= LT_EXPR
&& cond_code
!= LE_EXPR
)
1718 /* Make sure the conditional is arg[01] OP y. */
1719 if (gimple_cond_lhs (cond
) != rhs
)
1722 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond
)))
1723 ? real_zerop (gimple_cond_rhs (cond
))
1724 : integer_zerop (gimple_cond_rhs (cond
)))
1729 /* We need to know which is the true edge and which is the false
1730 edge so that we know if have abs or negative abs. */
1731 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1733 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1734 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1735 the false edge goes to OTHER_BLOCK. */
1736 if (cond_code
== GT_EXPR
|| cond_code
== GE_EXPR
)
1741 if (e
->dest
== middle_bb
)
1746 /* If the code negates only iff positive then make sure to not
1747 introduce undefined behavior when negating or computing the absolute.
1748 ??? We could use range info if present to check for arg1 == INT_MIN. */
1750 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
1751 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1
))))
1754 result
= duplicate_ssa_name (result
, NULL
);
1757 lhs
= make_ssa_name (TREE_TYPE (result
));
1761 /* Build the modify expression with abs expression. */
1762 new_stmt
= gimple_build_assign (lhs
, ABS_EXPR
, rhs
);
1764 gsi
= gsi_last_bb (cond_bb
);
1765 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
1769 /* Get the right GSI. We want to insert after the recently
1770 added ABS_EXPR statement (which we know is the first statement
1772 new_stmt
= gimple_build_assign (result
, NEGATE_EXPR
, lhs
);
1774 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
1777 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1779 /* Note that we optimized this PHI. */
1783 /* Auxiliary functions to determine the set of memory accesses which
1784 can't trap because they are preceded by accesses to the same memory
1785 portion. We do that for MEM_REFs, so we only need to track
1786 the SSA_NAME of the pointer indirectly referenced. The algorithm
1787 simply is a walk over all instructions in dominator order. When
1788 we see an MEM_REF we determine if we've already seen a same
1789 ref anywhere up to the root of the dominator tree. If we do the
1790 current access can't trap. If we don't see any dominating access
1791 the current access might trap, but might also make later accesses
1792 non-trapping, so we remember it. We need to be careful with loads
1793 or stores, for instance a load might not trap, while a store would,
1794 so if we see a dominating read access this doesn't mean that a later
1795 write access would not trap. Hence we also need to differentiate the
1796 type of access(es) seen.
1798 ??? We currently are very conservative and assume that a load might
1799 trap even if a store doesn't (write-only memory). This probably is
1800 overly conservative. */
1802 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1803 through it was seen, which would constitute a no-trap region for
1807 unsigned int ssa_name_ver
;
1810 HOST_WIDE_INT offset
, size
;
1814 /* Hashtable helpers. */
1816 struct ssa_names_hasher
: free_ptr_hash
<name_to_bb
>
1818 static inline hashval_t
hash (const name_to_bb
*);
1819 static inline bool equal (const name_to_bb
*, const name_to_bb
*);
1822 /* Used for quick clearing of the hash-table when we see calls.
1823 Hash entries with phase < nt_call_phase are invalid. */
1824 static unsigned int nt_call_phase
;
1826 /* The hash function. */
1829 ssa_names_hasher::hash (const name_to_bb
*n
)
1831 return n
->ssa_name_ver
^ (((hashval_t
) n
->store
) << 31)
1832 ^ (n
->offset
<< 6) ^ (n
->size
<< 3);
1835 /* The equality function of *P1 and *P2. */
1838 ssa_names_hasher::equal (const name_to_bb
*n1
, const name_to_bb
*n2
)
1840 return n1
->ssa_name_ver
== n2
->ssa_name_ver
1841 && n1
->store
== n2
->store
1842 && n1
->offset
== n2
->offset
1843 && n1
->size
== n2
->size
;
1846 class nontrapping_dom_walker
: public dom_walker
1849 nontrapping_dom_walker (cdi_direction direction
, hash_set
<tree
> *ps
)
1850 : dom_walker (direction
), m_nontrapping (ps
), m_seen_ssa_names (128) {}
1852 virtual edge
before_dom_children (basic_block
);
1853 virtual void after_dom_children (basic_block
);
1857 /* We see the expression EXP in basic block BB. If it's an interesting
1858 expression (an MEM_REF through an SSA_NAME) possibly insert the
1859 expression into the set NONTRAP or the hash table of seen expressions.
1860 STORE is true if this expression is on the LHS, otherwise it's on
1862 void add_or_mark_expr (basic_block
, tree
, bool);
1864 hash_set
<tree
> *m_nontrapping
;
1866 /* The hash table for remembering what we've seen. */
1867 hash_table
<ssa_names_hasher
> m_seen_ssa_names
;
1870 /* Called by walk_dominator_tree, when entering the block BB. */
1872 nontrapping_dom_walker::before_dom_children (basic_block bb
)
1876 gimple_stmt_iterator gsi
;
1878 /* If we haven't seen all our predecessors, clear the hash-table. */
1879 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1880 if ((((size_t)e
->src
->aux
) & 2) == 0)
1886 /* Mark this BB as being on the path to dominator root and as visited. */
1887 bb
->aux
= (void*)(1 | 2);
1889 /* And walk the statements in order. */
1890 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1892 gimple
*stmt
= gsi_stmt (gsi
);
1894 if ((gimple_code (stmt
) == GIMPLE_ASM
&& gimple_vdef (stmt
))
1895 || (is_gimple_call (stmt
)
1896 && (!nonfreeing_call_p (stmt
) || !nonbarrier_call_p (stmt
))))
1898 else if (gimple_assign_single_p (stmt
) && !gimple_has_volatile_ops (stmt
))
1900 add_or_mark_expr (bb
, gimple_assign_lhs (stmt
), true);
1901 add_or_mark_expr (bb
, gimple_assign_rhs1 (stmt
), false);
1907 /* Called by walk_dominator_tree, when basic block BB is exited. */
1909 nontrapping_dom_walker::after_dom_children (basic_block bb
)
1911 /* This BB isn't on the path to dominator root anymore. */
1915 /* We see the expression EXP in basic block BB. If it's an interesting
1916 expression (an MEM_REF through an SSA_NAME) possibly insert the
1917 expression into the set NONTRAP or the hash table of seen expressions.
1918 STORE is true if this expression is on the LHS, otherwise it's on
1921 nontrapping_dom_walker::add_or_mark_expr (basic_block bb
, tree exp
, bool store
)
1925 if (TREE_CODE (exp
) == MEM_REF
1926 && TREE_CODE (TREE_OPERAND (exp
, 0)) == SSA_NAME
1927 && tree_fits_shwi_p (TREE_OPERAND (exp
, 1))
1928 && (size
= int_size_in_bytes (TREE_TYPE (exp
))) > 0)
1930 tree name
= TREE_OPERAND (exp
, 0);
1931 struct name_to_bb map
;
1933 struct name_to_bb
*n2bb
;
1934 basic_block found_bb
= 0;
1936 /* Try to find the last seen MEM_REF through the same
1937 SSA_NAME, which can trap. */
1938 map
.ssa_name_ver
= SSA_NAME_VERSION (name
);
1942 map
.offset
= tree_to_shwi (TREE_OPERAND (exp
, 1));
1945 slot
= m_seen_ssa_names
.find_slot (&map
, INSERT
);
1947 if (n2bb
&& n2bb
->phase
>= nt_call_phase
)
1948 found_bb
= n2bb
->bb
;
1950 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1951 (it's in a basic block on the path from us to the dominator root)
1952 then we can't trap. */
1953 if (found_bb
&& (((size_t)found_bb
->aux
) & 1) == 1)
1955 m_nontrapping
->add (exp
);
1959 /* EXP might trap, so insert it into the hash table. */
1962 n2bb
->phase
= nt_call_phase
;
1967 n2bb
= XNEW (struct name_to_bb
);
1968 n2bb
->ssa_name_ver
= SSA_NAME_VERSION (name
);
1969 n2bb
->phase
= nt_call_phase
;
1971 n2bb
->store
= store
;
1972 n2bb
->offset
= map
.offset
;
1980 /* This is the entry point of gathering non trapping memory accesses.
1981 It will do a dominator walk over the whole function, and it will
1982 make use of the bb->aux pointers. It returns a set of trees
1983 (the MEM_REFs itself) which can't trap. */
1984 static hash_set
<tree
> *
1985 get_non_trapping (void)
1988 hash_set
<tree
> *nontrap
= new hash_set
<tree
>;
1989 /* We're going to do a dominator walk, so ensure that we have
1990 dominance information. */
1991 calculate_dominance_info (CDI_DOMINATORS
);
1993 nontrapping_dom_walker (CDI_DOMINATORS
, nontrap
)
1994 .walk (cfun
->cfg
->x_entry_block_ptr
);
1996 clear_aux_for_blocks ();
2000 /* Do the main work of conditional store replacement. We already know
2001 that the recognized pattern looks like so:
2004 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2007 fallthrough (edge E0)
2011 We check that MIDDLE_BB contains only one store, that that store
2012 doesn't trap (not via NOTRAP, but via checking if an access to the same
2013 memory location dominates us) and that the store has a "simple" RHS. */
2016 cond_store_replacement (basic_block middle_bb
, basic_block join_bb
,
2017 edge e0
, edge e1
, hash_set
<tree
> *nontrap
)
2019 gimple
*assign
= last_and_only_stmt (middle_bb
);
2020 tree lhs
, rhs
, name
, name2
;
2023 gimple_stmt_iterator gsi
;
2024 source_location locus
;
2026 /* Check if middle_bb contains of only one store. */
2028 || !gimple_assign_single_p (assign
)
2029 || gimple_has_volatile_ops (assign
))
2032 locus
= gimple_location (assign
);
2033 lhs
= gimple_assign_lhs (assign
);
2034 rhs
= gimple_assign_rhs1 (assign
);
2035 if (TREE_CODE (lhs
) != MEM_REF
2036 || TREE_CODE (TREE_OPERAND (lhs
, 0)) != SSA_NAME
2037 || !is_gimple_reg_type (TREE_TYPE (lhs
)))
2040 /* Prove that we can move the store down. We could also check
2041 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2042 whose value is not available readily, which we want to avoid. */
2043 if (!nontrap
->contains (lhs
))
2046 /* Now we've checked the constraints, so do the transformation:
2047 1) Remove the single store. */
2048 gsi
= gsi_for_stmt (assign
);
2049 unlink_stmt_vdef (assign
);
2050 gsi_remove (&gsi
, true);
2051 release_defs (assign
);
2053 /* Make both store and load use alias-set zero as we have to
2054 deal with the case of the store being a conditional change
2055 of the dynamic type. */
2056 lhs
= unshare_expr (lhs
);
2058 while (handled_component_p (*basep
))
2059 basep
= &TREE_OPERAND (*basep
, 0);
2060 if (TREE_CODE (*basep
) == MEM_REF
2061 || TREE_CODE (*basep
) == TARGET_MEM_REF
)
2062 TREE_OPERAND (*basep
, 1)
2063 = fold_convert (ptr_type_node
, TREE_OPERAND (*basep
, 1));
2065 *basep
= build2 (MEM_REF
, TREE_TYPE (*basep
),
2066 build_fold_addr_expr (*basep
),
2067 build_zero_cst (ptr_type_node
));
2069 /* 2) Insert a load from the memory of the store to the temporary
2070 on the edge which did not contain the store. */
2071 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2072 new_stmt
= gimple_build_assign (name
, lhs
);
2073 gimple_set_location (new_stmt
, locus
);
2074 gsi_insert_on_edge (e1
, new_stmt
);
2076 /* 3) Create a PHI node at the join block, with one argument
2077 holding the old RHS, and the other holding the temporary
2078 where we stored the old memory contents. */
2079 name2
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2080 newphi
= create_phi_node (name2
, join_bb
);
2081 add_phi_arg (newphi
, rhs
, e0
, locus
);
2082 add_phi_arg (newphi
, name
, e1
, locus
);
2084 lhs
= unshare_expr (lhs
);
2085 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
2087 /* 4) Insert that PHI node. */
2088 gsi
= gsi_after_labels (join_bb
);
2089 if (gsi_end_p (gsi
))
2091 gsi
= gsi_last_bb (join_bb
);
2092 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
2095 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
2100 /* Do the main work of conditional store replacement. */
2103 cond_if_else_store_replacement_1 (basic_block then_bb
, basic_block else_bb
,
2104 basic_block join_bb
, gimple
*then_assign
,
2105 gimple
*else_assign
)
2107 tree lhs_base
, lhs
, then_rhs
, else_rhs
, name
;
2108 source_location then_locus
, else_locus
;
2109 gimple_stmt_iterator gsi
;
2113 if (then_assign
== NULL
2114 || !gimple_assign_single_p (then_assign
)
2115 || gimple_clobber_p (then_assign
)
2116 || gimple_has_volatile_ops (then_assign
)
2117 || else_assign
== NULL
2118 || !gimple_assign_single_p (else_assign
)
2119 || gimple_clobber_p (else_assign
)
2120 || gimple_has_volatile_ops (else_assign
))
2123 lhs
= gimple_assign_lhs (then_assign
);
2124 if (!is_gimple_reg_type (TREE_TYPE (lhs
))
2125 || !operand_equal_p (lhs
, gimple_assign_lhs (else_assign
), 0))
2128 lhs_base
= get_base_address (lhs
);
2129 if (lhs_base
== NULL_TREE
2130 || (!DECL_P (lhs_base
) && TREE_CODE (lhs_base
) != MEM_REF
))
2133 then_rhs
= gimple_assign_rhs1 (then_assign
);
2134 else_rhs
= gimple_assign_rhs1 (else_assign
);
2135 then_locus
= gimple_location (then_assign
);
2136 else_locus
= gimple_location (else_assign
);
2138 /* Now we've checked the constraints, so do the transformation:
2139 1) Remove the stores. */
2140 gsi
= gsi_for_stmt (then_assign
);
2141 unlink_stmt_vdef (then_assign
);
2142 gsi_remove (&gsi
, true);
2143 release_defs (then_assign
);
2145 gsi
= gsi_for_stmt (else_assign
);
2146 unlink_stmt_vdef (else_assign
);
2147 gsi_remove (&gsi
, true);
2148 release_defs (else_assign
);
2150 /* 2) Create a PHI node at the join block, with one argument
2151 holding the old RHS, and the other holding the temporary
2152 where we stored the old memory contents. */
2153 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2154 newphi
= create_phi_node (name
, join_bb
);
2155 add_phi_arg (newphi
, then_rhs
, EDGE_SUCC (then_bb
, 0), then_locus
);
2156 add_phi_arg (newphi
, else_rhs
, EDGE_SUCC (else_bb
, 0), else_locus
);
2158 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
2160 /* 3) Insert that PHI node. */
2161 gsi
= gsi_after_labels (join_bb
);
2162 if (gsi_end_p (gsi
))
2164 gsi
= gsi_last_bb (join_bb
);
2165 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
2168 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
2173 /* Return the single store in BB with VDEF or NULL if there are
2174 other stores in the BB or loads following the store. */
2177 single_trailing_store_in_bb (basic_block bb
, tree vdef
)
2179 if (SSA_NAME_IS_DEFAULT_DEF (vdef
))
2181 gimple
*store
= SSA_NAME_DEF_STMT (vdef
);
2182 if (gimple_bb (store
) != bb
2183 || gimple_code (store
) == GIMPLE_PHI
)
2186 /* Verify there is no other store in this BB. */
2187 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store
))
2188 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store
))) == bb
2189 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store
))) != GIMPLE_PHI
)
2192 /* Verify there is no load or store after the store. */
2193 use_operand_p use_p
;
2194 imm_use_iterator imm_iter
;
2195 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_vdef (store
))
2196 if (USE_STMT (use_p
) != store
2197 && gimple_bb (USE_STMT (use_p
)) == bb
)
2203 /* Conditional store replacement. We already know
2204 that the recognized pattern looks like so:
2207 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2217 fallthrough (edge E0)
2221 We check that it is safe to sink the store to JOIN_BB by verifying that
2222 there are no read-after-write or write-after-write dependencies in
2223 THEN_BB and ELSE_BB. */
2226 cond_if_else_store_replacement (basic_block then_bb
, basic_block else_bb
,
2227 basic_block join_bb
)
2229 vec
<data_reference_p
> then_datarefs
, else_datarefs
;
2230 vec
<ddr_p
> then_ddrs
, else_ddrs
;
2231 gimple
*then_store
, *else_store
;
2232 bool found
, ok
= false, res
;
2233 struct data_dependence_relation
*ddr
;
2234 data_reference_p then_dr
, else_dr
;
2236 tree then_lhs
, else_lhs
;
2237 basic_block blocks
[3];
2239 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2240 cheap enough to always handle as it allows us to elide dependence
2243 for (gphi_iterator si
= gsi_start_phis (join_bb
); !gsi_end_p (si
);
2245 if (virtual_operand_p (gimple_phi_result (si
.phi ())))
2252 tree then_vdef
= PHI_ARG_DEF_FROM_EDGE (vphi
, single_succ_edge (then_bb
));
2253 tree else_vdef
= PHI_ARG_DEF_FROM_EDGE (vphi
, single_succ_edge (else_bb
));
2254 gimple
*then_assign
= single_trailing_store_in_bb (then_bb
, then_vdef
);
2257 gimple
*else_assign
= single_trailing_store_in_bb (else_bb
, else_vdef
);
2259 return cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2260 then_assign
, else_assign
);
2263 if (MAX_STORES_TO_SINK
== 0)
2266 /* Find data references. */
2267 then_datarefs
.create (1);
2268 else_datarefs
.create (1);
2269 if ((find_data_references_in_bb (NULL
, then_bb
, &then_datarefs
)
2271 || !then_datarefs
.length ()
2272 || (find_data_references_in_bb (NULL
, else_bb
, &else_datarefs
)
2274 || !else_datarefs
.length ())
2276 free_data_refs (then_datarefs
);
2277 free_data_refs (else_datarefs
);
2281 /* Find pairs of stores with equal LHS. */
2282 auto_vec
<gimple
*, 1> then_stores
, else_stores
;
2283 FOR_EACH_VEC_ELT (then_datarefs
, i
, then_dr
)
2285 if (DR_IS_READ (then_dr
))
2288 then_store
= DR_STMT (then_dr
);
2289 then_lhs
= gimple_get_lhs (then_store
);
2290 if (then_lhs
== NULL_TREE
)
2294 FOR_EACH_VEC_ELT (else_datarefs
, j
, else_dr
)
2296 if (DR_IS_READ (else_dr
))
2299 else_store
= DR_STMT (else_dr
);
2300 else_lhs
= gimple_get_lhs (else_store
);
2301 if (else_lhs
== NULL_TREE
)
2304 if (operand_equal_p (then_lhs
, else_lhs
, 0))
2314 then_stores
.safe_push (then_store
);
2315 else_stores
.safe_push (else_store
);
2318 /* No pairs of stores found. */
2319 if (!then_stores
.length ()
2320 || then_stores
.length () > (unsigned) MAX_STORES_TO_SINK
)
2322 free_data_refs (then_datarefs
);
2323 free_data_refs (else_datarefs
);
2327 /* Compute and check data dependencies in both basic blocks. */
2328 then_ddrs
.create (1);
2329 else_ddrs
.create (1);
2330 if (!compute_all_dependences (then_datarefs
, &then_ddrs
,
2332 || !compute_all_dependences (else_datarefs
, &else_ddrs
,
2335 free_dependence_relations (then_ddrs
);
2336 free_dependence_relations (else_ddrs
);
2337 free_data_refs (then_datarefs
);
2338 free_data_refs (else_datarefs
);
2341 blocks
[0] = then_bb
;
2342 blocks
[1] = else_bb
;
2343 blocks
[2] = join_bb
;
2344 renumber_gimple_stmt_uids_in_blocks (blocks
, 3);
2346 /* Check that there are no read-after-write or write-after-write dependencies
2348 FOR_EACH_VEC_ELT (then_ddrs
, i
, ddr
)
2350 struct data_reference
*dra
= DDR_A (ddr
);
2351 struct data_reference
*drb
= DDR_B (ddr
);
2353 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2354 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2355 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2356 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2357 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2358 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2360 free_dependence_relations (then_ddrs
);
2361 free_dependence_relations (else_ddrs
);
2362 free_data_refs (then_datarefs
);
2363 free_data_refs (else_datarefs
);
2368 /* Check that there are no read-after-write or write-after-write dependencies
2370 FOR_EACH_VEC_ELT (else_ddrs
, i
, ddr
)
2372 struct data_reference
*dra
= DDR_A (ddr
);
2373 struct data_reference
*drb
= DDR_B (ddr
);
2375 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2376 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2377 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2378 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2379 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2380 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2382 free_dependence_relations (then_ddrs
);
2383 free_dependence_relations (else_ddrs
);
2384 free_data_refs (then_datarefs
);
2385 free_data_refs (else_datarefs
);
2390 /* Sink stores with same LHS. */
2391 FOR_EACH_VEC_ELT (then_stores
, i
, then_store
)
2393 else_store
= else_stores
[i
];
2394 res
= cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2395 then_store
, else_store
);
2399 free_dependence_relations (then_ddrs
);
2400 free_dependence_relations (else_ddrs
);
2401 free_data_refs (then_datarefs
);
2402 free_data_refs (else_datarefs
);
2407 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2410 local_mem_dependence (gimple
*stmt
, basic_block bb
)
2412 tree vuse
= gimple_vuse (stmt
);
2418 def
= SSA_NAME_DEF_STMT (vuse
);
2419 return (def
&& gimple_bb (def
) == bb
);
2422 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2423 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2424 and BB3 rejoins control flow following BB1 and BB2, look for
2425 opportunities to hoist loads as follows. If BB3 contains a PHI of
2426 two loads, one each occurring in BB1 and BB2, and the loads are
2427 provably of adjacent fields in the same structure, then move both
2428 loads into BB0. Of course this can only be done if there are no
2429 dependencies preventing such motion.
2431 One of the hoisted loads will always be speculative, so the
2432 transformation is currently conservative:
2434 - The fields must be strictly adjacent.
2435 - The two fields must occupy a single memory block that is
2436 guaranteed to not cross a page boundary.
2438 The last is difficult to prove, as such memory blocks should be
2439 aligned on the minimum of the stack alignment boundary and the
2440 alignment guaranteed by heap allocation interfaces. Thus we rely
2441 on a parameter for the alignment value.
2443 Provided a good value is used for the last case, the first
2444 restriction could possibly be relaxed. */
2447 hoist_adjacent_loads (basic_block bb0
, basic_block bb1
,
2448 basic_block bb2
, basic_block bb3
)
2450 int param_align
= PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE
);
2451 unsigned param_align_bits
= (unsigned) (param_align
* BITS_PER_UNIT
);
2454 /* Walk the phis in bb3 looking for an opportunity. We are looking
2455 for phis of two SSA names, one each of which is defined in bb1 and
2457 for (gsi
= gsi_start_phis (bb3
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2459 gphi
*phi_stmt
= gsi
.phi ();
2460 gimple
*def1
, *def2
;
2461 tree arg1
, arg2
, ref1
, ref2
, field1
, field2
;
2462 tree tree_offset1
, tree_offset2
, tree_size2
, next
;
2463 int offset1
, offset2
, size2
;
2465 gimple_stmt_iterator gsi2
;
2466 basic_block bb_for_def1
, bb_for_def2
;
2468 if (gimple_phi_num_args (phi_stmt
) != 2
2469 || virtual_operand_p (gimple_phi_result (phi_stmt
)))
2472 arg1
= gimple_phi_arg_def (phi_stmt
, 0);
2473 arg2
= gimple_phi_arg_def (phi_stmt
, 1);
2475 if (TREE_CODE (arg1
) != SSA_NAME
2476 || TREE_CODE (arg2
) != SSA_NAME
2477 || SSA_NAME_IS_DEFAULT_DEF (arg1
)
2478 || SSA_NAME_IS_DEFAULT_DEF (arg2
))
2481 def1
= SSA_NAME_DEF_STMT (arg1
);
2482 def2
= SSA_NAME_DEF_STMT (arg2
);
2484 if ((gimple_bb (def1
) != bb1
|| gimple_bb (def2
) != bb2
)
2485 && (gimple_bb (def2
) != bb1
|| gimple_bb (def1
) != bb2
))
2488 /* Check the mode of the arguments to be sure a conditional move
2489 can be generated for it. */
2490 if (optab_handler (movcc_optab
, TYPE_MODE (TREE_TYPE (arg1
)))
2491 == CODE_FOR_nothing
)
2494 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2495 if (!gimple_assign_single_p (def1
)
2496 || !gimple_assign_single_p (def2
)
2497 || gimple_has_volatile_ops (def1
)
2498 || gimple_has_volatile_ops (def2
))
2501 ref1
= gimple_assign_rhs1 (def1
);
2502 ref2
= gimple_assign_rhs1 (def2
);
2504 if (TREE_CODE (ref1
) != COMPONENT_REF
2505 || TREE_CODE (ref2
) != COMPONENT_REF
)
2508 /* The zeroth operand of the two component references must be
2509 identical. It is not sufficient to compare get_base_address of
2510 the two references, because this could allow for different
2511 elements of the same array in the two trees. It is not safe to
2512 assume that the existence of one array element implies the
2513 existence of a different one. */
2514 if (!operand_equal_p (TREE_OPERAND (ref1
, 0), TREE_OPERAND (ref2
, 0), 0))
2517 field1
= TREE_OPERAND (ref1
, 1);
2518 field2
= TREE_OPERAND (ref2
, 1);
2520 /* Check for field adjacency, and ensure field1 comes first. */
2521 for (next
= DECL_CHAIN (field1
);
2522 next
&& TREE_CODE (next
) != FIELD_DECL
;
2523 next
= DECL_CHAIN (next
))
2528 for (next
= DECL_CHAIN (field2
);
2529 next
&& TREE_CODE (next
) != FIELD_DECL
;
2530 next
= DECL_CHAIN (next
))
2536 std::swap (field1
, field2
);
2537 std::swap (def1
, def2
);
2540 bb_for_def1
= gimple_bb (def1
);
2541 bb_for_def2
= gimple_bb (def2
);
2543 /* Check for proper alignment of the first field. */
2544 tree_offset1
= bit_position (field1
);
2545 tree_offset2
= bit_position (field2
);
2546 tree_size2
= DECL_SIZE (field2
);
2548 if (!tree_fits_uhwi_p (tree_offset1
)
2549 || !tree_fits_uhwi_p (tree_offset2
)
2550 || !tree_fits_uhwi_p (tree_size2
))
2553 offset1
= tree_to_uhwi (tree_offset1
);
2554 offset2
= tree_to_uhwi (tree_offset2
);
2555 size2
= tree_to_uhwi (tree_size2
);
2556 align1
= DECL_ALIGN (field1
) % param_align_bits
;
2558 if (offset1
% BITS_PER_UNIT
!= 0)
2561 /* For profitability, the two field references should fit within
2562 a single cache line. */
2563 if (align1
+ offset2
- offset1
+ size2
> param_align_bits
)
2566 /* The two expressions cannot be dependent upon vdefs defined
2568 if (local_mem_dependence (def1
, bb_for_def1
)
2569 || local_mem_dependence (def2
, bb_for_def2
))
2572 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2573 bb0. We hoist the first one first so that a cache miss is handled
2574 efficiently regardless of hardware cache-fill policy. */
2575 gsi2
= gsi_for_stmt (def1
);
2576 gsi_move_to_bb_end (&gsi2
, bb0
);
2577 gsi2
= gsi_for_stmt (def2
);
2578 gsi_move_to_bb_end (&gsi2
, bb0
);
2580 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2583 "\nHoisting adjacent loads from %d and %d into %d: \n",
2584 bb_for_def1
->index
, bb_for_def2
->index
, bb0
->index
);
2585 print_gimple_stmt (dump_file
, def1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2586 print_gimple_stmt (dump_file
, def2
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2591 /* Determine whether we should attempt to hoist adjacent loads out of
2592 diamond patterns in pass_phiopt. Always hoist loads if
2593 -fhoist-adjacent-loads is specified and the target machine has
2594 both a conditional move instruction and a defined cache line size. */
2597 gate_hoist_loads (void)
2599 return (flag_hoist_adjacent_loads
== 1
2600 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE
)
2601 && HAVE_conditional_move
);
2604 /* This pass tries to replaces an if-then-else block with an
2605 assignment. We have four kinds of transformations. Some of these
2606 transformations are also performed by the ifcvt RTL optimizer.
2608 Conditional Replacement
2609 -----------------------
2611 This transformation, implemented in conditional_replacement,
2615 if (cond) goto bb2; else goto bb1;
2618 x = PHI <0 (bb1), 1 (bb0), ...>;
2626 x = PHI <x' (bb0), ...>;
2628 We remove bb1 as it becomes unreachable. This occurs often due to
2629 gimplification of conditionals.
2634 This transformation, implemented in value_replacement, replaces
2637 if (a != b) goto bb2; else goto bb1;
2640 x = PHI <a (bb1), b (bb0), ...>;
2646 x = PHI <b (bb0), ...>;
2648 This opportunity can sometimes occur as a result of other
2652 Another case caught by value replacement looks like this:
2658 if (t3 != 0) goto bb1; else goto bb2;
2674 This transformation, implemented in abs_replacement, replaces
2677 if (a >= 0) goto bb2; else goto bb1;
2681 x = PHI <x (bb1), a (bb0), ...>;
2688 x = PHI <x' (bb0), ...>;
2693 This transformation, minmax_replacement replaces
2696 if (a <= b) goto bb2; else goto bb1;
2699 x = PHI <b (bb1), a (bb0), ...>;
2704 x' = MIN_EXPR (a, b)
2706 x = PHI <x' (bb0), ...>;
2708 A similar transformation is done for MAX_EXPR.
2711 This pass also performs a fifth transformation of a slightly different
2714 Factor conversion in COND_EXPR
2715 ------------------------------
2717 This transformation factors the conversion out of COND_EXPR with
2718 factor_out_conditional_conversion.
2721 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2725 tmp = PHI <tmp, CST>
2728 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2734 Adjacent Load Hoisting
2735 ----------------------
2737 This transformation replaces
2740 if (...) goto bb2; else goto bb1;
2742 x1 = (<expr>).field1;
2745 x2 = (<expr>).field2;
2752 x1 = (<expr>).field1;
2753 x2 = (<expr>).field2;
2754 if (...) goto bb2; else goto bb1;
2761 The purpose of this transformation is to enable generation of conditional
2762 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2763 the loads is speculative, the transformation is restricted to very
2764 specific cases to avoid introducing a page fault. We are looking for
2772 where left and right are typically adjacent pointers in a tree structure. */
2776 const pass_data pass_data_phiopt
=
2778 GIMPLE_PASS
, /* type */
2779 "phiopt", /* name */
2780 OPTGROUP_NONE
, /* optinfo_flags */
2781 TV_TREE_PHIOPT
, /* tv_id */
2782 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2783 0, /* properties_provided */
2784 0, /* properties_destroyed */
2785 0, /* todo_flags_start */
2786 0, /* todo_flags_finish */
2789 class pass_phiopt
: public gimple_opt_pass
2792 pass_phiopt (gcc::context
*ctxt
)
2793 : gimple_opt_pass (pass_data_phiopt
, ctxt
)
2796 /* opt_pass methods: */
2797 opt_pass
* clone () { return new pass_phiopt (m_ctxt
); }
2798 virtual bool gate (function
*) { return flag_ssa_phiopt
; }
2799 virtual unsigned int execute (function
*)
2801 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2804 }; // class pass_phiopt
2809 make_pass_phiopt (gcc::context
*ctxt
)
2811 return new pass_phiopt (ctxt
);
2816 const pass_data pass_data_cselim
=
2818 GIMPLE_PASS
, /* type */
2819 "cselim", /* name */
2820 OPTGROUP_NONE
, /* optinfo_flags */
2821 TV_TREE_PHIOPT
, /* tv_id */
2822 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2823 0, /* properties_provided */
2824 0, /* properties_destroyed */
2825 0, /* todo_flags_start */
2826 0, /* todo_flags_finish */
2829 class pass_cselim
: public gimple_opt_pass
2832 pass_cselim (gcc::context
*ctxt
)
2833 : gimple_opt_pass (pass_data_cselim
, ctxt
)
2836 /* opt_pass methods: */
2837 virtual bool gate (function
*) { return flag_tree_cselim
; }
2838 virtual unsigned int execute (function
*) { return tree_ssa_cs_elim (); }
2840 }; // class pass_cselim
2845 make_pass_cselim (gcc::context
*ctxt
)
2847 return new pass_cselim (ctxt
);