1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "insn-codes.h"
29 #include "tree-pass.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
55 #include "tree-ssa-propagate.h"
56 #include "tree-ssa-dce.h"
58 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
61 single_non_singleton_phi_for_edges (gimple_seq seq
, edge e0
, edge e1
)
63 gimple_stmt_iterator i
;
65 if (gimple_seq_singleton_p (seq
))
67 phi
= as_a
<gphi
*> (gsi_stmt (gsi_start (seq
)));
68 /* Never return virtual phis. */
69 if (virtual_operand_p (gimple_phi_result (phi
)))
73 for (i
= gsi_start (seq
); !gsi_end_p (i
); gsi_next (&i
))
75 gphi
*p
= as_a
<gphi
*> (gsi_stmt (i
));
76 /* If the PHI arguments are equal then we can skip this PHI. */
77 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p
, e0
->dest_idx
),
78 gimple_phi_arg_def (p
, e1
->dest_idx
)))
81 /* Punt on virtual phis with different arguments from the edges. */
82 if (virtual_operand_p (gimple_phi_result (p
)))
85 /* If we already have a PHI that has the two edge arguments are
86 different, then return it is not a singleton for these PHIs. */
95 /* Replace PHI node element whose edge is E in block BB with variable NEW.
96 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
97 is known to have two edges, one of which must reach BB). */
100 replace_phi_edge_with_variable (basic_block cond_block
,
101 edge e
, gphi
*phi
, tree new_tree
,
102 bitmap dce_ssa_names
= nullptr)
104 basic_block bb
= gimple_bb (phi
);
105 gimple_stmt_iterator gsi
;
106 tree phi_result
= PHI_RESULT (phi
);
107 bool deleteboth
= false;
109 /* Duplicate range info if they are the only things setting the target PHI.
110 This is needed as later on, the new_tree will be replacing
111 The assignement of the PHI.
122 And _4 gets propagated into the use of a_3 and losing the range info.
123 This can't be done for more than 2 incoming edges as the propagation
125 The new_tree needs to be defined in the same basic block as the conditional. */
126 if (TREE_CODE (new_tree
) == SSA_NAME
127 && EDGE_COUNT (gimple_bb (phi
)->preds
) == 2
128 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result
))
129 && !SSA_NAME_RANGE_INFO (new_tree
)
130 && SSA_NAME_RANGE_INFO (phi_result
)
131 && gimple_bb (SSA_NAME_DEF_STMT (new_tree
)) == cond_block
132 && dbg_cnt (phiopt_edge_range
))
133 duplicate_ssa_name_range_info (new_tree
, phi_result
);
135 /* Change the PHI argument to new. */
136 SET_USE (PHI_ARG_DEF_PTR (phi
, e
->dest_idx
), new_tree
);
138 /* Remove the empty basic block. */
139 edge edge_to_remove
= NULL
, keep_edge
= NULL
;
140 if (EDGE_SUCC (cond_block
, 0)->dest
== bb
)
142 edge_to_remove
= EDGE_SUCC (cond_block
, 1);
143 keep_edge
= EDGE_SUCC (cond_block
, 0);
145 else if (EDGE_SUCC (cond_block
, 1)->dest
== bb
)
147 edge_to_remove
= EDGE_SUCC (cond_block
, 0);
148 keep_edge
= EDGE_SUCC (cond_block
, 1);
150 else if ((keep_edge
= find_edge (cond_block
, e
->src
)))
152 basic_block bb1
= EDGE_SUCC (cond_block
, 0)->dest
;
153 basic_block bb2
= EDGE_SUCC (cond_block
, 1)->dest
;
154 if (single_pred_p (bb1
) && single_pred_p (bb2
)
155 && single_succ_p (bb1
) && single_succ_p (bb2
)
156 && empty_block_p (bb1
) && empty_block_p (bb2
))
162 if (edge_to_remove
&& EDGE_COUNT (edge_to_remove
->dest
->preds
) == 1)
164 e
->flags
|= EDGE_FALLTHRU
;
165 e
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
166 e
->probability
= profile_probability::always ();
167 delete_basic_block (edge_to_remove
->dest
);
169 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
170 gsi
= gsi_last_bb (cond_block
);
171 gsi_remove (&gsi
, true);
175 basic_block bb1
= EDGE_SUCC (cond_block
, 0)->dest
;
176 basic_block bb2
= EDGE_SUCC (cond_block
, 1)->dest
;
178 edge newedge
= redirect_edge_and_branch (keep_edge
, bb
);
180 /* The new edge should be the same. */
181 gcc_assert (newedge
== keep_edge
);
183 keep_edge
->flags
|= EDGE_FALLTHRU
;
184 keep_edge
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
185 keep_edge
->probability
= profile_probability::always ();
187 /* Copy the edge's phi entry from the old one. */
188 copy_phi_arg_into_existing_phi (e
, keep_edge
);
190 /* Delete the old 2 empty basic blocks */
191 delete_basic_block (bb1
);
192 delete_basic_block (bb2
);
194 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
195 gsi
= gsi_last_bb (cond_block
);
196 gsi_remove (&gsi
, true);
200 /* If there are other edges into the middle block make
201 CFG cleanup deal with the edge removal to avoid
202 updating dominators here in a non-trivial way. */
203 gcond
*cond
= as_a
<gcond
*> (*gsi_last_bb (cond_block
));
204 if (keep_edge
->flags
& EDGE_FALSE_VALUE
)
205 gimple_cond_make_false (cond
);
206 else if (keep_edge
->flags
& EDGE_TRUE_VALUE
)
207 gimple_cond_make_true (cond
);
211 simple_dce_from_worklist (dce_ssa_names
);
213 statistics_counter_event (cfun
, "Replace PHI with variable", 1);
215 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
217 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
222 /* PR66726: Factor operations out of COND_EXPR. If the arguments of the PHI
223 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
224 to the result of PHI stmt. COND_STMT is the controlling predicate.
225 Return the newly-created PHI, if any. */
228 factor_out_conditional_operation (edge e0
, edge e1
, gphi
*phi
,
229 tree arg0
, tree arg1
, gimple
*cond_stmt
)
231 gimple
*arg0_def_stmt
= NULL
, *arg1_def_stmt
= NULL
, *new_stmt
;
232 tree new_arg0
= NULL_TREE
, new_arg1
= NULL_TREE
;
235 gimple_stmt_iterator gsi
, gsi_for_def
;
236 location_t locus
= gimple_location (phi
);
237 enum tree_code op_code
;
239 /* Handle only PHI statements with two arguments. TODO: If all
240 other arguments to PHI are INTEGER_CST or if their defining
241 statement have the same unary operation, we can handle more
242 than two arguments too. */
243 if (gimple_phi_num_args (phi
) != 2)
246 /* First canonicalize to simplify tests. */
247 if (TREE_CODE (arg0
) != SSA_NAME
)
249 std::swap (arg0
, arg1
);
253 if (TREE_CODE (arg0
) != SSA_NAME
254 || (TREE_CODE (arg1
) != SSA_NAME
255 && TREE_CODE (arg1
) != INTEGER_CST
))
258 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
259 an unary operation. */
260 arg0_def_stmt
= SSA_NAME_DEF_STMT (arg0
);
261 if (!is_gimple_assign (arg0_def_stmt
)
262 || (gimple_assign_rhs_class (arg0_def_stmt
) != GIMPLE_UNARY_RHS
263 && gimple_assign_rhs_code (arg0_def_stmt
) != VIEW_CONVERT_EXPR
))
266 /* Use the RHS as new_arg0. */
267 op_code
= gimple_assign_rhs_code (arg0_def_stmt
);
268 new_arg0
= gimple_assign_rhs1 (arg0_def_stmt
);
269 if (op_code
== VIEW_CONVERT_EXPR
)
271 new_arg0
= TREE_OPERAND (new_arg0
, 0);
272 if (!is_gimple_reg_type (TREE_TYPE (new_arg0
)))
275 if (TREE_CODE (new_arg0
) == SSA_NAME
276 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0
))
279 if (TREE_CODE (arg1
) == SSA_NAME
)
281 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
282 is an unary operation. */
283 arg1_def_stmt
= SSA_NAME_DEF_STMT (arg1
);
284 if (!is_gimple_assign (arg1_def_stmt
)
285 || gimple_assign_rhs_code (arg1_def_stmt
) != op_code
)
288 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
289 if (dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
), gimple_bb (arg0_def_stmt
))
290 && dominated_by_p (CDI_DOMINATORS
,
291 gimple_bb (phi
), gimple_bb (arg1_def_stmt
)))
294 /* Use the RHS as new_arg1. */
295 new_arg1
= gimple_assign_rhs1 (arg1_def_stmt
);
296 if (op_code
== VIEW_CONVERT_EXPR
)
297 new_arg1
= TREE_OPERAND (new_arg1
, 0);
298 if (TREE_CODE (new_arg1
) == SSA_NAME
299 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1
))
304 /* TODO: handle more than just casts here. */
305 if (!gimple_assign_cast_p (arg0_def_stmt
))
308 /* arg0_def_stmt should be conditional. */
309 if (dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
), gimple_bb (arg0_def_stmt
)))
311 /* If arg1 is an INTEGER_CST, fold it to new type. */
312 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0
))
313 && (int_fits_type_p (arg1
, TREE_TYPE (new_arg0
))
314 || (TYPE_PRECISION (TREE_TYPE (new_arg0
))
315 == TYPE_PRECISION (TREE_TYPE (arg1
)))))
317 if (gimple_assign_cast_p (arg0_def_stmt
))
319 /* For the INTEGER_CST case, we are just moving the
320 conversion from one place to another, which can often
321 hurt as the conversion moves further away from the
322 statement that computes the value. So, perform this
323 only if new_arg0 is an operand of COND_STMT, or
324 if arg0_def_stmt is the only non-debug stmt in
325 its basic block, because then it is possible this
326 could enable further optimizations (minmax replacement
328 Note no-op conversions don't have this issue as
329 it will not generate any zero/sign extend in that case. */
330 if ((TYPE_PRECISION (TREE_TYPE (new_arg0
))
331 != TYPE_PRECISION (TREE_TYPE (arg1
)))
332 && new_arg0
!= gimple_cond_lhs (cond_stmt
)
333 && new_arg0
!= gimple_cond_rhs (cond_stmt
)
334 && gimple_bb (arg0_def_stmt
) == e0
->src
)
336 gsi
= gsi_for_stmt (arg0_def_stmt
);
337 gsi_prev_nondebug (&gsi
);
338 if (!gsi_end_p (gsi
))
341 = dyn_cast
<gassign
*> (gsi_stmt (gsi
)))
343 tree lhs
= gimple_assign_lhs (assign
);
344 enum tree_code ass_code
345 = gimple_assign_rhs_code (assign
);
346 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
348 if (lhs
!= gimple_assign_rhs1 (arg0_def_stmt
))
350 gsi_prev_nondebug (&gsi
);
351 if (!gsi_end_p (gsi
))
357 gsi
= gsi_for_stmt (arg0_def_stmt
);
358 gsi_next_nondebug (&gsi
);
359 if (!gsi_end_p (gsi
))
362 new_arg1
= fold_convert (TREE_TYPE (new_arg0
), arg1
);
364 /* Drop the overlow that fold_convert might add. */
365 if (TREE_OVERFLOW (new_arg1
))
366 new_arg1
= drop_tree_overflow (new_arg1
);
375 /* If arg0/arg1 have > 1 use, then this transformation actually increases
376 the number of expressions evaluated at runtime. */
377 if (!has_single_use (arg0
)
378 || (arg1_def_stmt
&& !has_single_use (arg1
)))
381 /* If types of new_arg0 and new_arg1 are different bailout. */
382 if (!types_compatible_p (TREE_TYPE (new_arg0
), TREE_TYPE (new_arg1
)))
385 /* Create a new PHI stmt. */
386 result
= PHI_RESULT (phi
);
387 temp
= make_ssa_name (TREE_TYPE (new_arg0
), NULL
);
388 newphi
= create_phi_node (temp
, gimple_bb (phi
));
390 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
392 fprintf (dump_file
, "PHI ");
393 print_generic_expr (dump_file
, gimple_phi_result (phi
));
395 " changed to factor operation out from COND_EXPR.\n");
396 fprintf (dump_file
, "New stmt with OPERATION that defines ");
397 print_generic_expr (dump_file
, result
);
398 fprintf (dump_file
, ".\n");
401 /* Remove the old operation(s) that has single use. */
402 gsi_for_def
= gsi_for_stmt (arg0_def_stmt
);
403 gsi_remove (&gsi_for_def
, true);
404 release_defs (arg0_def_stmt
);
408 gsi_for_def
= gsi_for_stmt (arg1_def_stmt
);
409 gsi_remove (&gsi_for_def
, true);
410 release_defs (arg1_def_stmt
);
413 add_phi_arg (newphi
, new_arg0
, e0
, locus
);
414 add_phi_arg (newphi
, new_arg1
, e1
, locus
);
416 /* Create the operation stmt and insert it. */
417 if (op_code
== VIEW_CONVERT_EXPR
)
419 temp
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (result
), temp
);
420 new_stmt
= gimple_build_assign (result
, temp
);
423 new_stmt
= gimple_build_assign (result
, op_code
, temp
);
424 gsi
= gsi_after_labels (gimple_bb (phi
));
425 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
427 /* Remove the original PHI stmt. */
428 gsi
= gsi_for_stmt (phi
);
429 gsi_remove (&gsi
, true);
431 statistics_counter_event (cfun
, "factored out operation", 1);
437 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
438 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
440 phiopt_early_allow (gimple_seq
&seq
, gimple_match_op
&op
)
442 /* Don't allow functions. */
443 if (!op
.code
.is_tree_code ())
445 tree_code code
= (tree_code
)op
.code
;
447 /* For non-empty sequence, only allow one statement
448 except for MIN/MAX, allow max 2 statements,
449 each with MIN/MAX. */
450 if (!gimple_seq_empty_p (seq
))
452 if (code
== MIN_EXPR
|| code
== MAX_EXPR
)
454 if (!gimple_seq_singleton_p (seq
))
457 gimple
*stmt
= gimple_seq_first_stmt (seq
);
458 /* Only allow assignments. */
459 if (!is_gimple_assign (stmt
))
461 code
= gimple_assign_rhs_code (stmt
);
462 return code
== MIN_EXPR
|| code
== MAX_EXPR
;
464 /* Check to make sure op was already a SSA_NAME. */
465 if (code
!= SSA_NAME
)
467 if (!gimple_seq_singleton_p (seq
))
469 gimple
*stmt
= gimple_seq_first_stmt (seq
);
470 /* Only allow assignments. */
471 if (!is_gimple_assign (stmt
))
473 if (gimple_assign_lhs (stmt
) != op
.ops
[0])
475 code
= gimple_assign_rhs_code (stmt
);
497 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
498 Return NULL if nothing can be simplified or the resulting simplified value
499 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
501 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
502 to simplify CMP ? ARG0 : ARG1.
503 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
505 gimple_simplify_phiopt (bool early_p
, tree type
, gimple
*comp_stmt
,
506 tree arg0
, tree arg1
,
509 gimple_seq seq1
= NULL
;
510 enum tree_code comp_code
= gimple_cond_code (comp_stmt
);
511 location_t loc
= gimple_location (comp_stmt
);
512 tree cmp0
= gimple_cond_lhs (comp_stmt
);
513 tree cmp1
= gimple_cond_rhs (comp_stmt
);
514 /* To handle special cases like floating point comparison, it is easier and
515 less error-prone to build a tree and gimplify it on the fly though it is
517 Don't use fold_build2 here as that might create (bool)a instead of just
519 tree cond
= build2_loc (loc
, comp_code
, boolean_type_node
,
522 if (dump_file
&& (dump_flags
& TDF_FOLDING
))
524 fprintf (dump_file
, "\nphiopt match-simplify trying:\n\t");
525 print_generic_expr (dump_file
, cond
);
526 fprintf (dump_file
, " ? ");
527 print_generic_expr (dump_file
, arg0
);
528 fprintf (dump_file
, " : ");
529 print_generic_expr (dump_file
, arg1
);
530 fprintf (dump_file
, "\n");
533 gimple_match_op
op (gimple_match_cond::UNCOND
,
534 COND_EXPR
, type
, cond
, arg0
, arg1
);
536 if (op
.resimplify (&seq1
, follow_all_ssa_edges
))
538 bool allowed
= !early_p
|| phiopt_early_allow (seq1
, op
);
539 tree result
= maybe_push_res_to_seq (&op
, &seq1
);
540 if (dump_file
&& (dump_flags
& TDF_FOLDING
))
542 fprintf (dump_file
, "\nphiopt match-simplify back:\n");
544 print_gimple_seq (dump_file
, seq1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
545 fprintf (dump_file
, "result: ");
547 print_generic_expr (dump_file
, result
);
549 fprintf (dump_file
, " (none)");
550 fprintf (dump_file
, "\n");
552 fprintf (dump_file
, "rejected because early\n");
554 /* Early we want only to allow some generated tree codes. */
555 if (allowed
&& result
)
557 if (loc
!= UNKNOWN_LOCATION
)
558 annotate_all_with_location (seq1
, loc
);
559 gimple_seq_add_seq_without_update (seq
, seq1
);
563 gimple_seq_discard (seq1
);
566 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
567 comp_code
= invert_tree_comparison (comp_code
, HONOR_NANS (cmp0
));
569 if (comp_code
== ERROR_MARK
)
572 cond
= build2_loc (loc
,
573 comp_code
, boolean_type_node
,
576 if (dump_file
&& (dump_flags
& TDF_FOLDING
))
578 fprintf (dump_file
, "\nphiopt match-simplify trying:\n\t");
579 print_generic_expr (dump_file
, cond
);
580 fprintf (dump_file
, " ? ");
581 print_generic_expr (dump_file
, arg1
);
582 fprintf (dump_file
, " : ");
583 print_generic_expr (dump_file
, arg0
);
584 fprintf (dump_file
, "\n");
587 gimple_match_op
op1 (gimple_match_cond::UNCOND
,
588 COND_EXPR
, type
, cond
, arg1
, arg0
);
590 if (op1
.resimplify (&seq1
, follow_all_ssa_edges
))
592 bool allowed
= !early_p
|| phiopt_early_allow (seq1
, op1
);
593 tree result
= maybe_push_res_to_seq (&op1
, &seq1
);
594 if (dump_file
&& (dump_flags
& TDF_FOLDING
))
596 fprintf (dump_file
, "\nphiopt match-simplify back:\n");
598 print_gimple_seq (dump_file
, seq1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
599 fprintf (dump_file
, "result: ");
601 print_generic_expr (dump_file
, result
);
603 fprintf (dump_file
, " (none)");
604 fprintf (dump_file
, "\n");
606 fprintf (dump_file
, "rejected because early\n");
608 /* Early we want only to allow some generated tree codes. */
609 if (allowed
&& result
)
611 if (loc
!= UNKNOWN_LOCATION
)
612 annotate_all_with_location (seq1
, loc
);
613 gimple_seq_add_seq_without_update (seq
, seq1
);
617 gimple_seq_discard (seq1
);
622 /* empty_bb_or_one_feeding_into_p returns true if bb was empty basic block
623 or it has one cheap preparation statement that feeds into the PHI
624 statement and it sets STMT to that statement. */
626 empty_bb_or_one_feeding_into_p (basic_block bb
,
631 gimple
*stmt_to_move
= nullptr;
634 if (empty_block_p (bb
))
637 if (!single_pred_p (bb
))
640 /* The middle bb cannot have phi nodes as we don't
641 move those assignments yet. */
642 if (!gimple_seq_empty_p (phi_nodes (bb
)))
645 gimple_stmt_iterator gsi
;
647 gsi
= gsi_start_nondebug_after_labels_bb (bb
);
648 while (!gsi_end_p (gsi
))
650 gimple
*s
= gsi_stmt (gsi
);
651 gsi_next_nondebug (&gsi
);
652 /* Skip over Predict and nop statements. */
653 if (gimple_code (s
) == GIMPLE_PREDICT
654 || gimple_code (s
) == GIMPLE_NOP
)
656 /* If there is more one statement return false. */
662 /* The only statement here was a Predict or a nop statement
667 if (gimple_vuse (stmt_to_move
))
670 if (gimple_could_trap_p (stmt_to_move
)
671 || gimple_has_side_effects (stmt_to_move
))
676 FOR_EACH_SSA_TREE_OPERAND (use
, stmt_to_move
, it
, SSA_OP_USE
)
677 if (ssa_name_maybe_undef_p (use
))
680 /* Allow assignments but allow some builtin/internal calls.
681 As const calls don't match any of the above, yet they could
682 still have some side-effects - they could contain
683 gimple_could_trap_p statements, like floating point
684 exceptions or integer division by zero. See PR70586.
685 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
687 Allow some known builtin/internal calls that are known not to
688 trap: logical functions (e.g. bswap and bit counting). */
689 if (!is_gimple_assign (stmt_to_move
))
691 if (!is_gimple_call (stmt_to_move
))
693 combined_fn cfn
= gimple_call_combined_fn (stmt_to_move
);
698 case CFN_BUILT_IN_BSWAP16
:
699 case CFN_BUILT_IN_BSWAP32
:
700 case CFN_BUILT_IN_BSWAP64
:
701 case CFN_BUILT_IN_BSWAP128
:
707 case CFN_BUILT_IN_CLRSB
:
708 case CFN_BUILT_IN_CLRSBL
:
709 case CFN_BUILT_IN_CLRSBLL
:
710 lhs
= gimple_call_lhs (stmt_to_move
);
715 lhs
= gimple_assign_lhs (stmt_to_move
);
720 /* Allow only a statement which feeds into the other stmt. */
721 if (!lhs
|| TREE_CODE (lhs
) != SSA_NAME
722 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
730 /* Move STMT to before GSI and insert its defining
731 name into INSERTED_EXPRS bitmap. */
733 move_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
, auto_bitmap
&inserted_exprs
)
737 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
739 fprintf (dump_file
, "statement un-sinked:\n");
740 print_gimple_stmt (dump_file
, stmt
, 0,
741 TDF_VOPS
|TDF_MEMSYMS
);
744 tree name
= gimple_get_lhs (stmt
);
745 // Mark the name to be renamed if there is one.
746 bitmap_set_bit (inserted_exprs
, SSA_NAME_VERSION (name
));
747 gimple_stmt_iterator gsi1
= gsi_for_stmt (stmt
);
748 gsi_move_before (&gsi1
, gsi
);
749 reset_flow_sensitive_info (name
);
752 /* RAII style class to temporarily remove flow sensitive
753 from ssa names defined by a gimple statement. */
754 class auto_flow_sensitive
757 auto_flow_sensitive (gimple
*s
);
758 ~auto_flow_sensitive ();
760 auto_vec
<std::pair
<tree
, flow_sensitive_info_storage
>, 2> stack
;
763 /* Constructor for auto_flow_sensitive. Saves
764 off the ssa names' flow sensitive information
765 that was defined by gimple statement S and
766 resets it to be non-flow based ones. */
768 auto_flow_sensitive::auto_flow_sensitive (gimple
*s
)
774 FOR_EACH_SSA_TREE_OPERAND (def
, s
, it
, SSA_OP_DEF
)
776 flow_sensitive_info_storage storage
;
777 storage
.save_and_clear (def
);
778 stack
.safe_push (std::make_pair (def
, storage
));
782 /* Deconstructor, restores the flow sensitive information
783 for the SSA names that had been saved off. */
785 auto_flow_sensitive::~auto_flow_sensitive ()
788 p
.second
.restore (p
.first
);
791 /* The function match_simplify_replacement does the main work of doing the
792 replacement using match and simplify. Return true if the replacement is done.
793 Otherwise return false.
794 BB is the basic block where the replacement is going to be done on. ARG0
795 is argument 0 from PHI. Likewise for ARG1. */
798 match_simplify_replacement (basic_block cond_bb
, basic_block middle_bb
,
799 basic_block middle_bb_alt
,
800 edge e0
, edge e1
, gphi
*phi
,
801 tree arg0
, tree arg1
, bool early_p
,
805 gimple_stmt_iterator gsi
;
806 edge true_edge
, false_edge
;
807 gimple_seq seq
= NULL
;
809 gimple
*stmt_to_move
= NULL
;
810 gimple
*stmt_to_move_alt
= NULL
;
811 tree arg_true
, arg_false
;
813 /* Special case A ? B : B as this will always simplify to B. */
814 if (operand_equal_for_phi_arg_p (arg0
, arg1
))
817 /* If the basic block only has a cheap preparation statement,
818 allow it and move it once the transformation is done. */
819 if (!empty_bb_or_one_feeding_into_p (middle_bb
, phi
, stmt_to_move
))
823 && middle_bb
!= middle_bb_alt
824 && !empty_bb_or_one_feeding_into_p (middle_bb_alt
, phi
,
828 /* At this point we know we have a GIMPLE_COND with two successors.
829 One successor is BB, the other successor is an empty block which
830 falls through into BB.
832 There is a single PHI node at the join point (BB).
834 So, given the condition COND, and the two PHI arguments, match and simplify
835 can happen on (COND) ? arg0 : arg1. */
837 stmt
= last_nondebug_stmt (cond_bb
);
839 /* We need to know which is the true edge and which is the false
840 edge so that we know when to invert the condition below. */
841 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
843 /* Forward the edges over the middle basic block. */
844 if (true_edge
->dest
== middle_bb
)
845 true_edge
= EDGE_SUCC (true_edge
->dest
, 0);
846 if (false_edge
->dest
== middle_bb
)
847 false_edge
= EDGE_SUCC (false_edge
->dest
, 0);
849 /* When THREEWAY_P then e1 will point to the edge of the final transition
850 from middle-bb to end. */
854 gcc_assert (false_edge
== e1
);
860 gcc_assert (false_edge
== e0
);
862 gcc_assert (true_edge
== e1
);
867 /* Do not make conditional undefs unconditional. */
868 if ((TREE_CODE (arg_true
) == SSA_NAME
869 && ssa_name_maybe_undef_p (arg_true
))
870 || (TREE_CODE (arg_false
) == SSA_NAME
871 && ssa_name_maybe_undef_p (arg_false
)))
874 tree type
= TREE_TYPE (gimple_phi_result (phi
));
876 auto_flow_sensitive
s1(stmt_to_move
);
877 auto_flow_sensitive
s_alt(stmt_to_move_alt
);
879 result
= gimple_simplify_phiopt (early_p
, type
, stmt
,
886 if (dump_file
&& (dump_flags
& TDF_FOLDING
))
887 fprintf (dump_file
, "accepted the phiopt match-simplify.\n");
889 auto_bitmap exprs_maybe_dce
;
891 /* Mark the cond statements' lhs/rhs as maybe dce. */
892 if (TREE_CODE (gimple_cond_lhs (stmt
)) == SSA_NAME
893 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_lhs (stmt
)))
894 bitmap_set_bit (exprs_maybe_dce
,
895 SSA_NAME_VERSION (gimple_cond_lhs (stmt
)));
896 if (TREE_CODE (gimple_cond_rhs (stmt
)) == SSA_NAME
897 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_rhs (stmt
)))
898 bitmap_set_bit (exprs_maybe_dce
,
899 SSA_NAME_VERSION (gimple_cond_rhs (stmt
)));
901 gsi
= gsi_last_bb (cond_bb
);
902 /* Insert the sequence generated from gimple_simplify_phiopt. */
905 // Mark the lhs of the new statements maybe for dce
906 gimple_stmt_iterator gsi1
= gsi_start (seq
);
907 for (; !gsi_end_p (gsi1
); gsi_next (&gsi1
))
909 gimple
*stmt
= gsi_stmt (gsi1
);
910 tree name
= gimple_get_lhs (stmt
);
911 if (name
&& TREE_CODE (name
) == SSA_NAME
)
912 bitmap_set_bit (exprs_maybe_dce
, SSA_NAME_VERSION (name
));
914 gsi_insert_seq_before (&gsi
, seq
, GSI_CONTINUE_LINKING
);
917 /* If there was a statement to move, move it to right before
918 the original conditional. */
919 move_stmt (stmt_to_move
, &gsi
, exprs_maybe_dce
);
920 move_stmt (stmt_to_move_alt
, &gsi
, exprs_maybe_dce
);
922 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
, exprs_maybe_dce
);
924 /* Add Statistic here even though replace_phi_edge_with_variable already
925 does it as we want to be able to count when match-simplify happens vs
927 statistics_counter_event (cfun
, "match-simplify PHI replacement", 1);
929 /* Note that we optimized this PHI. */
933 /* Update *ARG which is defined in STMT so that it contains the
934 computed value if that seems profitable. Return true if the
935 statement is made dead by that rewriting. */
938 jump_function_from_stmt (tree
*arg
, gimple
*stmt
)
940 enum tree_code code
= gimple_assign_rhs_code (stmt
);
941 if (code
== ADDR_EXPR
)
943 /* For arg = &p->i transform it to p, if possible. */
944 tree rhs1
= gimple_assign_rhs1 (stmt
);
946 tree tem
= get_addr_base_and_unit_offset (TREE_OPERAND (rhs1
, 0),
949 && TREE_CODE (tem
) == MEM_REF
950 && known_eq (mem_ref_offset (tem
) + offset
, 0))
952 *arg
= TREE_OPERAND (tem
, 0);
956 /* TODO: Much like IPA-CP jump-functions we want to handle constant
957 additions symbolically here, and we'd need to update the comparison
958 code that compares the arg + cst tuples in our caller. For now the
959 code above exactly handles the VEC_BASE pattern from vec.h. */
963 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
964 of the form SSA_NAME NE 0.
966 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
967 the two input values of the EQ_EXPR match arg0 and arg1.
969 If so update *code and return TRUE. Otherwise return FALSE. */
972 rhs_is_fed_for_value_replacement (const_tree arg0
, const_tree arg1
,
973 enum tree_code
*code
, const_tree rhs
)
975 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
977 if (TREE_CODE (rhs
) == SSA_NAME
)
979 gimple
*def1
= SSA_NAME_DEF_STMT (rhs
);
981 /* Verify the defining statement has an EQ_EXPR on the RHS. */
982 if (is_gimple_assign (def1
) && gimple_assign_rhs_code (def1
) == EQ_EXPR
)
984 /* Finally verify the source operands of the EQ_EXPR are equal
986 tree op0
= gimple_assign_rhs1 (def1
);
987 tree op1
= gimple_assign_rhs2 (def1
);
988 if ((operand_equal_for_phi_arg_p (arg0
, op0
)
989 && operand_equal_for_phi_arg_p (arg1
, op1
))
990 || (operand_equal_for_phi_arg_p (arg0
, op1
)
991 && operand_equal_for_phi_arg_p (arg1
, op0
)))
993 /* We will perform the optimization. */
994 *code
= gimple_assign_rhs_code (def1
);
1002 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
1004 Also return TRUE if arg0/arg1 are equal to the source arguments of a
1005 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
1007 Return FALSE otherwise. */
1010 operand_equal_for_value_replacement (const_tree arg0
, const_tree arg1
,
1011 enum tree_code
*code
, gimple
*cond
)
1014 tree lhs
= gimple_cond_lhs (cond
);
1015 tree rhs
= gimple_cond_rhs (cond
);
1017 if ((operand_equal_for_phi_arg_p (arg0
, lhs
)
1018 && operand_equal_for_phi_arg_p (arg1
, rhs
))
1019 || (operand_equal_for_phi_arg_p (arg1
, lhs
)
1020 && operand_equal_for_phi_arg_p (arg0
, rhs
)))
1023 /* Now handle more complex case where we have an EQ comparison
1024 which feeds a BIT_AND_EXPR which feeds COND.
1026 First verify that COND is of the form SSA_NAME NE 0. */
1027 if (*code
!= NE_EXPR
|| !integer_zerop (rhs
)
1028 || TREE_CODE (lhs
) != SSA_NAME
)
1031 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1032 def
= SSA_NAME_DEF_STMT (lhs
);
1033 if (!is_gimple_assign (def
) || gimple_assign_rhs_code (def
) != BIT_AND_EXPR
)
1036 /* Now verify arg0/arg1 correspond to the source arguments of an
1037 EQ comparison feeding the BIT_AND_EXPR. */
1039 tree tmp
= gimple_assign_rhs1 (def
);
1040 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
1043 tmp
= gimple_assign_rhs2 (def
);
1044 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
1050 /* Returns true if ARG is a neutral element for operation CODE
1051 on the RIGHT side. */
1054 neutral_element_p (tree_code code
, tree arg
, bool right
)
1061 return integer_zerop (arg
);
1068 case POINTER_PLUS_EXPR
:
1069 return right
&& integer_zerop (arg
);
1072 return integer_onep (arg
);
1074 case TRUNC_DIV_EXPR
:
1076 case FLOOR_DIV_EXPR
:
1077 case ROUND_DIV_EXPR
:
1078 case EXACT_DIV_EXPR
:
1079 return right
&& integer_onep (arg
);
1082 return integer_all_onesp (arg
);
1089 /* Returns true if ARG is an absorbing element for operation CODE. */
1092 absorbing_element_p (tree_code code
, tree arg
, bool right
, tree rval
)
1097 return integer_all_onesp (arg
);
1101 return integer_zerop (arg
);
1107 return !right
&& integer_zerop (arg
);
1109 case TRUNC_DIV_EXPR
:
1111 case FLOOR_DIV_EXPR
:
1112 case ROUND_DIV_EXPR
:
1113 case EXACT_DIV_EXPR
:
1114 case TRUNC_MOD_EXPR
:
1116 case FLOOR_MOD_EXPR
:
1117 case ROUND_MOD_EXPR
:
1119 && integer_zerop (arg
)
1120 && tree_single_nonzero_warnv_p (rval
, NULL
));
1127 /* The function value_replacement does the main work of doing the value
1128 replacement. Return non-zero if the replacement is done. Otherwise return
1129 0. If we remove the middle basic block, return 2.
1130 BB is the basic block where the replacement is going to be done on. ARG0
1131 is argument 0 from the PHI. Likewise for ARG1. */
1134 value_replacement (basic_block cond_bb
, basic_block middle_bb
,
1135 edge e0
, edge e1
, gphi
*phi
, tree arg0
, tree arg1
)
1137 gimple_stmt_iterator gsi
;
1138 edge true_edge
, false_edge
;
1139 enum tree_code code
;
1140 bool empty_or_with_defined_p
= true;
1142 /* If the type says honor signed zeros we cannot do this
1144 if (HONOR_SIGNED_ZEROS (arg1
))
1147 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1148 arguments, then adjust arg0 or arg1. */
1149 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
1150 while (!gsi_end_p (gsi
))
1152 gimple
*stmt
= gsi_stmt (gsi
);
1154 gsi_next_nondebug (&gsi
);
1155 if (!is_gimple_assign (stmt
))
1157 if (gimple_code (stmt
) != GIMPLE_PREDICT
1158 && gimple_code (stmt
) != GIMPLE_NOP
)
1159 empty_or_with_defined_p
= false;
1162 /* Now try to adjust arg0 or arg1 according to the computation
1163 in the statement. */
1164 lhs
= gimple_assign_lhs (stmt
);
1166 && jump_function_from_stmt (&arg0
, stmt
))
1168 && jump_function_from_stmt (&arg1
, stmt
)))
1169 empty_or_with_defined_p
= false;
1172 gcond
*cond
= as_a
<gcond
*> (*gsi_last_bb (cond_bb
));
1173 code
= gimple_cond_code (cond
);
1175 /* This transformation is only valid for equality comparisons. */
1176 if (code
!= NE_EXPR
&& code
!= EQ_EXPR
)
1179 /* We need to know which is the true edge and which is the false
1180 edge so that we know if have abs or negative abs. */
1181 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1183 /* At this point we know we have a COND_EXPR with two successors.
1184 One successor is BB, the other successor is an empty block which
1185 falls through into BB.
1187 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1189 There is a single PHI node at the join point (BB) with two arguments.
1191 We now need to verify that the two arguments in the PHI node match
1192 the two arguments to the equality comparison. */
1194 bool equal_p
= operand_equal_for_value_replacement (arg0
, arg1
, &code
, cond
);
1195 bool maybe_equal_p
= false;
1197 && empty_or_with_defined_p
1198 && TREE_CODE (gimple_cond_rhs (cond
)) == INTEGER_CST
1199 && (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond
), arg0
)
1200 ? TREE_CODE (arg1
) == INTEGER_CST
1201 : (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond
), arg1
)
1202 && TREE_CODE (arg0
) == INTEGER_CST
)))
1203 maybe_equal_p
= true;
1204 if (equal_p
|| maybe_equal_p
)
1209 /* For NE_EXPR, we want to build an assignment result = arg where
1210 arg is the PHI argument associated with the true edge. For
1211 EQ_EXPR we want the PHI argument associated with the false edge. */
1212 e
= (code
== NE_EXPR
? true_edge
: false_edge
);
1214 /* Unfortunately, E may not reach BB (it may instead have gone to
1215 OTHER_BLOCK). If that is the case, then we want the single outgoing
1216 edge from OTHER_BLOCK which reaches BB and represents the desired
1217 path from COND_BLOCK. */
1218 if (e
->dest
== middle_bb
)
1219 e
= single_succ_edge (e
->dest
);
1221 /* Now we know the incoming edge to BB that has the argument for the
1222 RHS of our new assignment statement. */
1228 /* If the middle basic block was empty or is defining the
1229 PHI arguments and this is a single phi where the args are different
1230 for the edges e0 and e1 then we can remove the middle basic block. */
1231 if (empty_or_with_defined_p
1232 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)),
1235 use_operand_p use_p
;
1238 /* Even if arg0/arg1 isn't equal to second operand of cond, we
1239 can optimize away the bb if we can prove it doesn't care whether
1240 phi result is arg0/arg1 or second operand of cond. Consider:
1241 <bb 2> [local count: 118111600]:
1243 goto <bb 4>; [97.00%]
1245 goto <bb 3>; [3.00%]
1247 <bb 3> [local count: 3540129]:
1249 <bb 4> [local count: 118111600]:
1250 # i_6 = PHI <i_2(D)(3), 6(2)>
1252 Here, carg is 4, oarg is 6, crhs is 0, and because
1253 (4 != 0) == (6 != 0), we don't care if i_6 is 4 or 6, both
1254 have the same outcome. So, can can optimize this to:
1256 If the single imm use of phi result >, >=, < or <=, similarly
1257 we can check if both carg and oarg compare the same against
1258 crhs using ccode. */
1260 && TREE_CODE (arg
) != INTEGER_CST
1261 && single_imm_use (gimple_phi_result (phi
), &use_p
, &use_stmt
))
1263 enum tree_code ccode
= ERROR_MARK
;
1264 tree clhs
= NULL_TREE
, crhs
= NULL_TREE
;
1265 tree carg
= gimple_cond_rhs (cond
);
1266 tree oarg
= e0
== e
? arg1
: arg0
;
1267 if (is_gimple_assign (use_stmt
)
1268 && (TREE_CODE_CLASS (gimple_assign_rhs_code (use_stmt
))
1271 ccode
= gimple_assign_rhs_code (use_stmt
);
1272 clhs
= gimple_assign_rhs1 (use_stmt
);
1273 crhs
= gimple_assign_rhs2 (use_stmt
);
1275 else if (gimple_code (use_stmt
) == GIMPLE_COND
)
1277 ccode
= gimple_cond_code (use_stmt
);
1278 clhs
= gimple_cond_lhs (use_stmt
);
1279 crhs
= gimple_cond_rhs (use_stmt
);
1281 if (ccode
!= ERROR_MARK
1282 && clhs
== gimple_phi_result (phi
)
1283 && TREE_CODE (crhs
) == INTEGER_CST
)
1288 if (!tree_int_cst_equal (crhs
, carg
)
1289 && !tree_int_cst_equal (crhs
, oarg
))
1293 if (tree_int_cst_lt (crhs
, carg
)
1294 == tree_int_cst_lt (crhs
, oarg
))
1298 if (tree_int_cst_le (crhs
, carg
)
1299 == tree_int_cst_le (crhs
, oarg
))
1303 if (tree_int_cst_lt (carg
, crhs
)
1304 == tree_int_cst_lt (oarg
, crhs
))
1308 if (tree_int_cst_le (carg
, crhs
)
1309 == tree_int_cst_le (oarg
, crhs
))
1317 tree phires
= gimple_phi_result (phi
);
1318 if (SSA_NAME_RANGE_INFO (phires
))
1320 /* After the optimization PHI result can have value
1321 which it couldn't have previously. */
1323 if (get_global_range_query ()->range_of_expr (r
, phires
,
1326 wide_int warg
= wi::to_wide (carg
);
1327 int_range
<2> tmp (TREE_TYPE (carg
), warg
, warg
);
1329 reset_flow_sensitive_info (phires
);
1330 set_range_info (phires
, r
);
1333 reset_flow_sensitive_info (phires
);
1336 if (equal_p
&& MAY_HAVE_DEBUG_BIND_STMTS
)
1338 imm_use_iterator imm_iter
;
1339 tree phires
= gimple_phi_result (phi
);
1340 tree temp
= NULL_TREE
;
1341 bool reset_p
= false;
1343 /* Add # DEBUG D#1 => arg != carg ? arg : oarg. */
1344 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, phires
)
1346 if (!is_gimple_debug (use_stmt
))
1348 if (temp
== NULL_TREE
)
1350 if (!single_pred_p (middle_bb
)
1351 || EDGE_COUNT (gimple_bb (phi
)->preds
) != 2)
1353 /* But only if middle_bb has a single
1354 predecessor and phi bb has two, otherwise
1355 we could use a SSA_NAME not usable in that
1356 place or wrong-debug. */
1360 gimple_stmt_iterator gsi
1361 = gsi_after_labels (gimple_bb (phi
));
1362 tree type
= TREE_TYPE (phires
);
1363 temp
= build_debug_expr_decl (type
);
1364 tree t
= build2 (NE_EXPR
, boolean_type_node
,
1366 t
= build3 (COND_EXPR
, type
, t
, arg
, oarg
);
1367 gimple
*g
= gimple_build_debug_bind (temp
, t
, phi
);
1368 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
1370 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
1371 replace_exp (use_p
, temp
);
1372 update_stmt (use_stmt
);
1375 reset_debug_uses (phi
);
1380 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, arg
);
1381 /* Note that we optimized this PHI. */
1387 if (!single_pred_p (middle_bb
))
1389 statistics_counter_event (cfun
, "Replace PHI with "
1390 "variable/value_replacement", 1);
1392 /* Replace the PHI arguments with arg. */
1393 SET_PHI_ARG_DEF (phi
, e0
->dest_idx
, arg
);
1394 SET_PHI_ARG_DEF (phi
, e1
->dest_idx
, arg
);
1395 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1397 fprintf (dump_file
, "PHI ");
1398 print_generic_expr (dump_file
, gimple_phi_result (phi
));
1399 fprintf (dump_file
, " reduced for COND_EXPR in block %d to ",
1401 print_generic_expr (dump_file
, arg
);
1402 fprintf (dump_file
, ".\n");
1408 if (!single_pred_p (middle_bb
))
1411 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1412 gsi
= gsi_last_nondebug_bb (middle_bb
);
1413 if (gsi_end_p (gsi
))
1416 gimple
*assign
= gsi_stmt (gsi
);
1417 if (!is_gimple_assign (assign
)
1418 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
1419 && !POINTER_TYPE_P (TREE_TYPE (arg0
))))
1422 if (gimple_assign_rhs_class (assign
) != GIMPLE_BINARY_RHS
)
1424 /* If last stmt of the middle_bb is a conversion, handle it like
1425 a preparation statement through constant evaluation with
1427 enum tree_code sc
= gimple_assign_rhs_code (assign
);
1428 if (CONVERT_EXPR_CODE_P (sc
))
1434 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1435 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
1438 /* Allow up to 2 cheap preparation statements that prepare argument
1446 iftmp.0_6 = x_5(D) r<< _1;
1448 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1459 # _2 = PHI <x_5(D)(2), _6(3)> */
1460 gimple
*prep_stmt
[2] = { NULL
, NULL
};
1462 for (prep_cnt
= 0; ; prep_cnt
++)
1464 if (prep_cnt
|| assign
)
1465 gsi_prev_nondebug (&gsi
);
1466 if (gsi_end_p (gsi
))
1469 gimple
*g
= gsi_stmt (gsi
);
1470 if (gimple_code (g
) == GIMPLE_LABEL
)
1473 if (prep_cnt
== 2 || !is_gimple_assign (g
))
1476 tree lhs
= gimple_assign_lhs (g
);
1477 tree rhs1
= gimple_assign_rhs1 (g
);
1478 use_operand_p use_p
;
1480 if (TREE_CODE (lhs
) != SSA_NAME
1481 || TREE_CODE (rhs1
) != SSA_NAME
1482 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1483 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1484 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
1485 || ((prep_cnt
|| assign
)
1486 && use_stmt
!= (prep_cnt
? prep_stmt
[prep_cnt
- 1] : assign
)))
1488 switch (gimple_assign_rhs_code (g
))
1496 if (TREE_CODE (gimple_assign_rhs2 (g
)) != INTEGER_CST
)
1502 prep_stmt
[prep_cnt
] = g
;
1505 /* Only transform if it removes the condition. */
1506 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)), e0
, e1
))
1509 /* Size-wise, this is always profitable. */
1510 if (optimize_bb_for_speed_p (cond_bb
)
1511 /* The special case is useless if it has a low probability. */
1512 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
1513 && EDGE_PRED (middle_bb
, 0)->probability
< profile_probability::even ()
1514 /* If assign is cheap, there is no point avoiding it. */
1515 && estimate_num_insns_seq (bb_seq (middle_bb
), &eni_time_weights
)
1516 >= 3 * estimate_num_insns (cond
, &eni_time_weights
))
1519 tree cond_lhs
= gimple_cond_lhs (cond
);
1520 tree cond_rhs
= gimple_cond_rhs (cond
);
1522 /* Propagate the cond_rhs constant through preparation stmts,
1523 make sure UB isn't invoked while doing that. */
1524 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1526 gimple
*g
= prep_stmt
[i
];
1527 tree grhs1
= gimple_assign_rhs1 (g
);
1528 if (!operand_equal_for_phi_arg_p (cond_lhs
, grhs1
))
1530 cond_lhs
= gimple_assign_lhs (g
);
1531 cond_rhs
= fold_convert (TREE_TYPE (grhs1
), cond_rhs
);
1532 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1533 || TREE_OVERFLOW (cond_rhs
))
1535 if (gimple_assign_rhs_class (g
) == GIMPLE_BINARY_RHS
)
1537 cond_rhs
= int_const_binop (gimple_assign_rhs_code (g
), cond_rhs
,
1538 gimple_assign_rhs2 (g
));
1539 if (TREE_OVERFLOW (cond_rhs
))
1542 cond_rhs
= fold_convert (TREE_TYPE (cond_lhs
), cond_rhs
);
1543 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1544 || TREE_OVERFLOW (cond_rhs
))
1548 tree lhs
, rhs1
, rhs2
;
1549 enum tree_code code_def
;
1552 lhs
= gimple_assign_lhs (assign
);
1553 rhs1
= gimple_assign_rhs1 (assign
);
1554 rhs2
= gimple_assign_rhs2 (assign
);
1555 code_def
= gimple_assign_rhs_code (assign
);
1559 gcc_assert (prep_cnt
> 0);
1563 code_def
= ERROR_MARK
;
1566 if (((code
== NE_EXPR
&& e1
== false_edge
)
1567 || (code
== EQ_EXPR
&& e1
== true_edge
))
1570 && operand_equal_for_phi_arg_p (arg1
, cond_rhs
))
1573 && operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1574 && neutral_element_p (code_def
, cond_rhs
, true))
1577 && operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1578 && neutral_element_p (code_def
, cond_rhs
, false))
1580 && operand_equal_for_phi_arg_p (arg1
, cond_rhs
)
1581 && ((operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1582 && absorbing_element_p (code_def
, cond_rhs
, true, rhs2
))
1583 || (operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1584 && absorbing_element_p (code_def
,
1585 cond_rhs
, false, rhs2
))))))
1587 gsi
= gsi_for_stmt (cond
);
1588 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1596 # RANGE [0, 4294967294]
1597 u_6 = n_5 + 4294967295;
1600 # u_3 = PHI <u_6(3), 4294967295(2)> */
1601 reset_flow_sensitive_info (lhs
);
1602 gimple_stmt_iterator gsi_from
;
1603 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1605 tree plhs
= gimple_assign_lhs (prep_stmt
[i
]);
1606 reset_flow_sensitive_info (plhs
);
1607 gsi_from
= gsi_for_stmt (prep_stmt
[i
]);
1608 gsi_move_before (&gsi_from
, &gsi
);
1612 gsi_from
= gsi_for_stmt (assign
);
1613 gsi_move_before (&gsi_from
, &gsi
);
1615 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, lhs
);
1622 /* If VAR is an SSA_NAME that points to a BIT_NOT_EXPR then return the TREE for
1623 the value being inverted. */
1626 strip_bit_not (tree var
)
1628 if (TREE_CODE (var
) != SSA_NAME
)
1631 gimple
*assign
= SSA_NAME_DEF_STMT (var
);
1632 if (gimple_code (assign
) != GIMPLE_ASSIGN
)
1635 if (gimple_assign_rhs_code (assign
) != BIT_NOT_EXPR
)
1638 return gimple_assign_rhs1 (assign
);
1641 /* Invert a MIN to a MAX or a MAX to a MIN expression CODE. */
1644 invert_minmax_code (enum tree_code code
)
1656 /* The function minmax_replacement does the main work of doing the minmax
1657 replacement. Return true if the replacement is done. Otherwise return
1659 BB is the basic block where the replacement is going to be done on. ARG0
1660 is argument 0 from the PHI. Likewise for ARG1.
1662 If THREEWAY_P then expect the BB to be laid out in diamond shape with each
1663 BB containing only a MIN or MAX expression. */
1666 minmax_replacement (basic_block cond_bb
, basic_block middle_bb
, basic_block alt_middle_bb
,
1667 edge e0
, edge e1
, gphi
*phi
, tree arg0
, tree arg1
, bool threeway_p
)
1670 edge true_edge
, false_edge
;
1671 enum tree_code minmax
, ass_code
;
1672 tree smaller
, larger
, arg_true
, arg_false
;
1673 gimple_stmt_iterator gsi
, gsi_from
;
1675 tree type
= TREE_TYPE (PHI_RESULT (phi
));
1677 gcond
*cond
= as_a
<gcond
*> (*gsi_last_bb (cond_bb
));
1678 enum tree_code cmp
= gimple_cond_code (cond
);
1679 tree rhs
= gimple_cond_rhs (cond
);
1681 /* Turn EQ/NE of extreme values to order comparisons. */
1682 if ((cmp
== NE_EXPR
|| cmp
== EQ_EXPR
)
1683 && TREE_CODE (rhs
) == INTEGER_CST
1684 && INTEGRAL_TYPE_P (TREE_TYPE (rhs
)))
1686 if (wi::eq_p (wi::to_wide (rhs
), wi::min_value (TREE_TYPE (rhs
))))
1688 cmp
= (cmp
== EQ_EXPR
) ? LT_EXPR
: GE_EXPR
;
1689 rhs
= wide_int_to_tree (TREE_TYPE (rhs
),
1690 wi::min_value (TREE_TYPE (rhs
)) + 1);
1692 else if (wi::eq_p (wi::to_wide (rhs
), wi::max_value (TREE_TYPE (rhs
))))
1694 cmp
= (cmp
== EQ_EXPR
) ? GT_EXPR
: LE_EXPR
;
1695 rhs
= wide_int_to_tree (TREE_TYPE (rhs
),
1696 wi::max_value (TREE_TYPE (rhs
)) - 1);
1700 /* This transformation is only valid for order comparisons. Record which
1701 operand is smaller/larger if the result of the comparison is true. */
1702 tree alt_smaller
= NULL_TREE
;
1703 tree alt_larger
= NULL_TREE
;
1704 if (cmp
== LT_EXPR
|| cmp
== LE_EXPR
)
1706 smaller
= gimple_cond_lhs (cond
);
1708 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1709 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1710 if (TREE_CODE (larger
) == INTEGER_CST
1711 && INTEGRAL_TYPE_P (TREE_TYPE (larger
)))
1715 wi::overflow_type overflow
;
1716 wide_int alt
= wi::sub (wi::to_wide (larger
), 1,
1717 TYPE_SIGN (TREE_TYPE (larger
)),
1720 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1724 wi::overflow_type overflow
;
1725 wide_int alt
= wi::add (wi::to_wide (larger
), 1,
1726 TYPE_SIGN (TREE_TYPE (larger
)),
1729 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1733 else if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
1736 larger
= gimple_cond_lhs (cond
);
1737 /* If we have larger > CST it is equivalent to larger >= CST+1.
1738 Likewise larger >= CST is equivalent to larger > CST-1. */
1739 if (TREE_CODE (smaller
) == INTEGER_CST
1740 && INTEGRAL_TYPE_P (TREE_TYPE (smaller
)))
1742 wi::overflow_type overflow
;
1745 wide_int alt
= wi::add (wi::to_wide (smaller
), 1,
1746 TYPE_SIGN (TREE_TYPE (smaller
)),
1749 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1753 wide_int alt
= wi::sub (wi::to_wide (smaller
), 1,
1754 TYPE_SIGN (TREE_TYPE (smaller
)),
1757 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1764 /* Handle the special case of (signed_type)x < 0 being equivalent
1765 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1766 to x <= MAX_VAL(signed_type). */
1767 if ((cmp
== GE_EXPR
|| cmp
== LT_EXPR
)
1768 && INTEGRAL_TYPE_P (type
)
1769 && TYPE_UNSIGNED (type
)
1770 && integer_zerop (rhs
))
1772 tree op
= gimple_cond_lhs (cond
);
1773 if (TREE_CODE (op
) == SSA_NAME
1774 && INTEGRAL_TYPE_P (TREE_TYPE (op
))
1775 && !TYPE_UNSIGNED (TREE_TYPE (op
)))
1777 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1778 if (gimple_assign_cast_p (def_stmt
))
1780 tree op1
= gimple_assign_rhs1 (def_stmt
);
1781 if (INTEGRAL_TYPE_P (TREE_TYPE (op1
))
1782 && TYPE_UNSIGNED (TREE_TYPE (op1
))
1783 && (TYPE_PRECISION (TREE_TYPE (op
))
1784 == TYPE_PRECISION (TREE_TYPE (op1
)))
1785 && useless_type_conversion_p (type
, TREE_TYPE (op1
)))
1787 wide_int w1
= wi::max_value (TREE_TYPE (op
));
1788 wide_int w2
= wi::add (w1
, 1);
1792 smaller
= wide_int_to_tree (TREE_TYPE (op1
), w1
);
1793 alt_smaller
= wide_int_to_tree (TREE_TYPE (op1
), w2
);
1794 alt_larger
= NULL_TREE
;
1799 larger
= wide_int_to_tree (TREE_TYPE (op1
), w1
);
1800 alt_larger
= wide_int_to_tree (TREE_TYPE (op1
), w2
);
1801 alt_smaller
= NULL_TREE
;
1808 /* We need to know which is the true edge and which is the false
1809 edge so that we know if have abs or negative abs. */
1810 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1812 /* Forward the edges over the middle basic block. */
1813 if (true_edge
->dest
== middle_bb
)
1814 true_edge
= EDGE_SUCC (true_edge
->dest
, 0);
1815 if (false_edge
->dest
== middle_bb
)
1816 false_edge
= EDGE_SUCC (false_edge
->dest
, 0);
1818 /* When THREEWAY_P then e1 will point to the edge of the final transition
1819 from middle-bb to end. */
1820 if (true_edge
== e0
)
1823 gcc_assert (false_edge
== e1
);
1829 gcc_assert (false_edge
== e0
);
1831 gcc_assert (true_edge
== e1
);
1836 if (empty_block_p (middle_bb
)
1838 || empty_block_p (alt_middle_bb
)))
1840 if ((operand_equal_for_phi_arg_p (arg_true
, smaller
)
1842 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1843 && (operand_equal_for_phi_arg_p (arg_false
, larger
)
1845 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1849 if (smaller < larger)
1855 else if ((operand_equal_for_phi_arg_p (arg_false
, smaller
)
1857 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1858 && (operand_equal_for_phi_arg_p (arg_true
, larger
)
1860 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1865 else if (HONOR_NANS (type
) || HONOR_SIGNED_ZEROS (type
))
1866 /* The optimization may be unsafe due to NaNs. */
1868 else if (middle_bb
!= alt_middle_bb
&& threeway_p
)
1870 /* Recognize the following case:
1872 if (smaller < larger)
1873 a = MIN (smaller, c);
1875 b = MIN (larger, c);
1878 This is equivalent to
1880 a = MIN (smaller, c);
1881 x = MIN (larger, a); */
1883 gimple
*assign
= last_and_only_stmt (middle_bb
);
1884 tree lhs
, op0
, op1
, bound
;
1885 tree alt_lhs
, alt_op0
, alt_op1
;
1886 bool invert
= false;
1888 /* When THREEWAY_P then e1 will point to the edge of the final transition
1889 from middle-bb to end. */
1890 if (true_edge
== e0
)
1891 gcc_assert (false_edge
== EDGE_PRED (e1
->src
, 0));
1893 gcc_assert (true_edge
== EDGE_PRED (e1
->src
, 0));
1895 bool valid_minmax_p
= false;
1896 gimple_stmt_iterator it1
1897 = gsi_start_nondebug_after_labels_bb (middle_bb
);
1898 gimple_stmt_iterator it2
1899 = gsi_start_nondebug_after_labels_bb (alt_middle_bb
);
1900 if (gsi_one_nondebug_before_end_p (it1
)
1901 && gsi_one_nondebug_before_end_p (it2
))
1903 gimple
*stmt1
= gsi_stmt (it1
);
1904 gimple
*stmt2
= gsi_stmt (it2
);
1905 if (is_gimple_assign (stmt1
) && is_gimple_assign (stmt2
))
1907 enum tree_code code1
= gimple_assign_rhs_code (stmt1
);
1908 enum tree_code code2
= gimple_assign_rhs_code (stmt2
);
1909 valid_minmax_p
= (code1
== MIN_EXPR
|| code1
== MAX_EXPR
)
1910 && (code2
== MIN_EXPR
|| code2
== MAX_EXPR
);
1914 if (!valid_minmax_p
)
1918 || gimple_code (assign
) != GIMPLE_ASSIGN
)
1921 lhs
= gimple_assign_lhs (assign
);
1922 ass_code
= gimple_assign_rhs_code (assign
);
1923 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
1926 op0
= gimple_assign_rhs1 (assign
);
1927 op1
= gimple_assign_rhs2 (assign
);
1929 assign
= last_and_only_stmt (alt_middle_bb
);
1931 || gimple_code (assign
) != GIMPLE_ASSIGN
)
1934 alt_lhs
= gimple_assign_lhs (assign
);
1935 if (ass_code
!= gimple_assign_rhs_code (assign
))
1938 if (!operand_equal_for_phi_arg_p (lhs
, arg_true
)
1939 || !operand_equal_for_phi_arg_p (alt_lhs
, arg_false
))
1942 alt_op0
= gimple_assign_rhs1 (assign
);
1943 alt_op1
= gimple_assign_rhs2 (assign
);
1945 if ((operand_equal_for_phi_arg_p (op0
, smaller
)
1947 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1948 && (operand_equal_for_phi_arg_p (alt_op0
, larger
)
1950 && operand_equal_for_phi_arg_p (alt_op0
, alt_larger
))))
1952 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1953 if (!operand_equal_for_phi_arg_p (op1
, alt_op1
))
1956 if ((arg0
= strip_bit_not (op0
)) != NULL
1957 && (arg1
= strip_bit_not (alt_op0
)) != NULL
1958 && (bound
= strip_bit_not (op1
)) != NULL
)
1961 ass_code
= invert_minmax_code (ass_code
);
1972 else if ((operand_equal_for_phi_arg_p (op0
, larger
)
1974 && operand_equal_for_phi_arg_p (op0
, alt_larger
)))
1975 && (operand_equal_for_phi_arg_p (alt_op0
, smaller
)
1977 && operand_equal_for_phi_arg_p (alt_op0
, alt_smaller
))))
1979 /* We got here if the condition is true, i.e., SMALLER > LARGER. */
1980 if (!operand_equal_for_phi_arg_p (op1
, alt_op1
))
1983 if ((arg0
= strip_bit_not (op0
)) != NULL
1984 && (arg1
= strip_bit_not (alt_op0
)) != NULL
1985 && (bound
= strip_bit_not (op1
)) != NULL
)
1988 ass_code
= invert_minmax_code (ass_code
);
2002 /* Emit the statement to compute min/max. */
2003 location_t locus
= gimple_location (last_nondebug_stmt (cond_bb
));
2004 gimple_seq stmts
= NULL
;
2005 tree phi_result
= PHI_RESULT (phi
);
2006 result
= gimple_build (&stmts
, locus
, minmax
, TREE_TYPE (phi_result
),
2008 result
= gimple_build (&stmts
, locus
, ass_code
, TREE_TYPE (phi_result
),
2011 result
= gimple_build (&stmts
, locus
, BIT_NOT_EXPR
, TREE_TYPE (phi_result
),
2014 gsi
= gsi_last_bb (cond_bb
);
2015 gsi_insert_seq_before (&gsi
, stmts
, GSI_NEW_STMT
);
2017 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
2021 else if (!threeway_p
2022 || empty_block_p (alt_middle_bb
))
2024 /* Recognize the following case, assuming d <= u:
2030 This is equivalent to
2035 gimple
*assign
= last_and_only_stmt (middle_bb
);
2036 tree lhs
, op0
, op1
, bound
;
2038 if (!single_pred_p (middle_bb
))
2042 || gimple_code (assign
) != GIMPLE_ASSIGN
)
2045 lhs
= gimple_assign_lhs (assign
);
2046 ass_code
= gimple_assign_rhs_code (assign
);
2047 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
2049 op0
= gimple_assign_rhs1 (assign
);
2050 op1
= gimple_assign_rhs2 (assign
);
2052 if (true_edge
->src
== middle_bb
)
2054 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
2055 if (!operand_equal_for_phi_arg_p (lhs
, arg_true
))
2058 if (operand_equal_for_phi_arg_p (arg_false
, larger
)
2060 && operand_equal_for_phi_arg_p (arg_false
, alt_larger
)))
2064 if (smaller < larger)
2066 r' = MAX_EXPR (smaller, bound)
2068 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
2069 if (ass_code
!= MAX_EXPR
)
2073 if (operand_equal_for_phi_arg_p (op0
, smaller
)
2075 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
2077 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
2079 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
2084 /* We need BOUND <= LARGER. */
2085 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
2089 else if (operand_equal_for_phi_arg_p (arg_false
, smaller
)
2091 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
2095 if (smaller < larger)
2097 r' = MIN_EXPR (larger, bound)
2099 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
2100 if (ass_code
!= MIN_EXPR
)
2104 if (operand_equal_for_phi_arg_p (op0
, larger
)
2106 && operand_equal_for_phi_arg_p (op0
, alt_larger
)))
2108 else if (operand_equal_for_phi_arg_p (op1
, larger
)
2110 && operand_equal_for_phi_arg_p (op1
, alt_larger
)))
2115 /* We need BOUND >= SMALLER. */
2116 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
2125 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
2126 if (!operand_equal_for_phi_arg_p (lhs
, arg_false
))
2129 if (operand_equal_for_phi_arg_p (arg_true
, larger
)
2131 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
)))
2135 if (smaller > larger)
2137 r' = MIN_EXPR (smaller, bound)
2139 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
2140 if (ass_code
!= MIN_EXPR
)
2144 if (operand_equal_for_phi_arg_p (op0
, smaller
)
2146 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
2148 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
2150 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
2155 /* We need BOUND >= LARGER. */
2156 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
2160 else if (operand_equal_for_phi_arg_p (arg_true
, smaller
)
2162 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
2166 if (smaller > larger)
2168 r' = MAX_EXPR (larger, bound)
2170 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
2171 if (ass_code
!= MAX_EXPR
)
2175 if (operand_equal_for_phi_arg_p (op0
, larger
))
2177 else if (operand_equal_for_phi_arg_p (op1
, larger
))
2182 /* We need BOUND <= SMALLER. */
2183 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
2191 /* Move the statement from the middle block. */
2192 gsi
= gsi_last_bb (cond_bb
);
2193 gsi_from
= gsi_last_nondebug_bb (middle_bb
);
2194 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from
),
2196 gsi_move_before (&gsi_from
, &gsi
);
2201 /* Emit the statement to compute min/max. */
2202 gimple_seq stmts
= NULL
;
2203 tree phi_result
= PHI_RESULT (phi
);
2205 /* When we can't use a MIN/MAX_EXPR still make sure the expression
2206 stays in a form to be recognized by ISA that map to IEEE x > y ? x : y
2207 semantics (that's not IEEE max semantics). */
2208 if (HONOR_NANS (type
) || HONOR_SIGNED_ZEROS (type
))
2210 result
= gimple_build (&stmts
, cmp
, boolean_type_node
,
2211 gimple_cond_lhs (cond
), rhs
);
2212 result
= gimple_build (&stmts
, COND_EXPR
, TREE_TYPE (phi_result
),
2213 result
, arg_true
, arg_false
);
2216 result
= gimple_build (&stmts
, minmax
, TREE_TYPE (phi_result
), arg0
, arg1
);
2218 gsi
= gsi_last_bb (cond_bb
);
2219 gsi_insert_seq_before (&gsi
, stmts
, GSI_NEW_STMT
);
2221 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
2226 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
2227 For strong ordering <=> try to match something like:
2228 <bb 2> : // cond3_bb (== cond2_bb)
2229 if (x_4(D) != y_5(D))
2235 if (x_4(D) < y_5(D))
2240 <bb 4> : // middle_bb
2243 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2244 _1 = iftmp.0_2 == 0;
2246 and for partial ordering <=> something like:
2248 <bb 2> : // cond3_bb
2249 if (a_3(D) == b_5(D))
2250 goto <bb 6>; [50.00%]
2252 goto <bb 3>; [50.00%]
2254 <bb 3> [local count: 536870913]: // cond2_bb
2255 if (a_3(D) < b_5(D))
2256 goto <bb 6>; [50.00%]
2258 goto <bb 4>; [50.00%]
2260 <bb 4> [local count: 268435456]: // cond_bb
2261 if (a_3(D) > b_5(D))
2262 goto <bb 6>; [50.00%]
2264 goto <bb 5>; [50.00%]
2266 <bb 5> [local count: 134217728]: // middle_bb
2268 <bb 6> [local count: 1073741824]: // phi_bb
2269 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2270 _2 = SR.27_4 > 0; */
2273 spaceship_replacement (basic_block cond_bb
, basic_block middle_bb
,
2274 edge e0
, edge e1
, gphi
*phi
,
2275 tree arg0
, tree arg1
)
2277 tree phires
= PHI_RESULT (phi
);
2278 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires
))
2279 || TYPE_UNSIGNED (TREE_TYPE (phires
))
2280 || !tree_fits_shwi_p (arg0
)
2281 || !tree_fits_shwi_p (arg1
)
2282 || !IN_RANGE (tree_to_shwi (arg0
), -1, 2)
2283 || !IN_RANGE (tree_to_shwi (arg1
), -1, 2))
2286 basic_block phi_bb
= gimple_bb (phi
);
2287 gcc_assert (phi_bb
== e0
->dest
&& phi_bb
== e1
->dest
);
2288 if (!IN_RANGE (EDGE_COUNT (phi_bb
->preds
), 3, 4))
2291 use_operand_p use_p
;
2293 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires
))
2295 if (!single_imm_use (phires
, &use_p
, &use_stmt
))
2299 gimple
*orig_use_stmt
= use_stmt
;
2300 tree orig_use_lhs
= NULL_TREE
;
2301 int prec
= TYPE_PRECISION (TREE_TYPE (phires
));
2302 bool is_cast
= false;
2304 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2305 into res <= 1 and has left a type-cast for signed types. */
2306 if (gimple_assign_cast_p (use_stmt
))
2308 orig_use_lhs
= gimple_assign_lhs (use_stmt
);
2309 /* match.pd would have only done this for a signed type,
2310 so the conversion must be to an unsigned one. */
2311 tree ty1
= TREE_TYPE (gimple_assign_rhs1 (use_stmt
));
2312 tree ty2
= TREE_TYPE (orig_use_lhs
);
2314 if (!TYPE_UNSIGNED (ty2
) || !INTEGRAL_TYPE_P (ty2
))
2316 if (TYPE_PRECISION (ty1
) > TYPE_PRECISION (ty2
))
2318 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs
))
2320 if (!single_imm_use (orig_use_lhs
, &use_p
, &use_stmt
))
2325 else if (is_gimple_assign (use_stmt
)
2326 && gimple_assign_rhs_code (use_stmt
) == BIT_AND_EXPR
2327 && TREE_CODE (gimple_assign_rhs2 (use_stmt
)) == INTEGER_CST
2328 && (wi::to_wide (gimple_assign_rhs2 (use_stmt
))
2329 == wi::shifted_mask (1, prec
- 1, false, prec
)))
2331 /* For partial_ordering result operator>= with unspec as second
2332 argument is (res & 1) == res, folded by match.pd into
2334 orig_use_lhs
= gimple_assign_lhs (use_stmt
);
2335 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs
))
2337 if (!single_imm_use (orig_use_lhs
, &use_p
, &use_stmt
))
2340 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2342 cmp
= gimple_cond_code (use_stmt
);
2343 lhs
= gimple_cond_lhs (use_stmt
);
2344 rhs
= gimple_cond_rhs (use_stmt
);
2346 else if (is_gimple_assign (use_stmt
))
2348 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
2350 cmp
= gimple_assign_rhs_code (use_stmt
);
2351 lhs
= gimple_assign_rhs1 (use_stmt
);
2352 rhs
= gimple_assign_rhs2 (use_stmt
);
2354 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
2356 tree cond
= gimple_assign_rhs1 (use_stmt
);
2357 if (!COMPARISON_CLASS_P (cond
))
2359 cmp
= TREE_CODE (cond
);
2360 lhs
= TREE_OPERAND (cond
, 0);
2361 rhs
= TREE_OPERAND (cond
, 1);
2380 if (lhs
!= (orig_use_lhs
? orig_use_lhs
: phires
)
2381 || !tree_fits_shwi_p (rhs
)
2382 || !IN_RANGE (tree_to_shwi (rhs
), -1, 1))
2387 if (TREE_CODE (rhs
) != INTEGER_CST
)
2389 /* As for -ffast-math we assume the 2 return to be
2390 impossible, canonicalize (unsigned) res <= 1U or
2391 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2392 or (unsigned) res >= 2U as res < 0. */
2396 if (!integer_onep (rhs
))
2401 if (wi::ne_p (wi::to_widest (rhs
), 2))
2406 if (!integer_onep (rhs
))
2411 if (wi::ne_p (wi::to_widest (rhs
), 2))
2418 rhs
= build_zero_cst (TREE_TYPE (phires
));
2420 else if (orig_use_lhs
)
2422 if ((cmp
!= EQ_EXPR
&& cmp
!= NE_EXPR
) || !integer_zerop (rhs
))
2424 /* As for -ffast-math we assume the 2 return to be
2425 impossible, canonicalize (res & ~1) == 0 into
2426 res >= 0 and (res & ~1) != 0 as res < 0. */
2427 cmp
= cmp
== EQ_EXPR
? GE_EXPR
: LT_EXPR
;
2430 if (!empty_block_p (middle_bb
))
2433 gcond
*cond1
= as_a
<gcond
*> (*gsi_last_bb (cond_bb
));
2434 enum tree_code cmp1
= gimple_cond_code (cond1
);
2445 tree lhs1
= gimple_cond_lhs (cond1
);
2446 tree rhs1
= gimple_cond_rhs (cond1
);
2447 /* The optimization may be unsafe due to NaNs. */
2448 if (HONOR_NANS (TREE_TYPE (lhs1
)))
2450 if (TREE_CODE (lhs1
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1
))
2452 if (TREE_CODE (rhs1
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1
))
2455 if (!single_pred_p (cond_bb
) || !cond_only_block_p (cond_bb
))
2458 basic_block cond2_bb
= single_pred (cond_bb
);
2459 if (EDGE_COUNT (cond2_bb
->succs
) != 2)
2461 edge cond2_phi_edge
;
2462 if (EDGE_SUCC (cond2_bb
, 0)->dest
== cond_bb
)
2464 if (EDGE_SUCC (cond2_bb
, 1)->dest
!= phi_bb
)
2466 cond2_phi_edge
= EDGE_SUCC (cond2_bb
, 1);
2468 else if (EDGE_SUCC (cond2_bb
, 0)->dest
!= phi_bb
)
2471 cond2_phi_edge
= EDGE_SUCC (cond2_bb
, 0);
2472 tree arg2
= gimple_phi_arg_def (phi
, cond2_phi_edge
->dest_idx
);
2473 if (!tree_fits_shwi_p (arg2
))
2475 gcond
*cond2
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (cond2_bb
));
2478 enum tree_code cmp2
= gimple_cond_code (cond2
);
2479 tree lhs2
= gimple_cond_lhs (cond2
);
2480 tree rhs2
= gimple_cond_rhs (cond2
);
2483 if (!operand_equal_p (rhs2
, rhs1
, 0))
2485 if ((cmp2
== EQ_EXPR
|| cmp2
== NE_EXPR
)
2486 && TREE_CODE (rhs1
) == INTEGER_CST
2487 && TREE_CODE (rhs2
) == INTEGER_CST
)
2489 /* For integers, we can have cond2 x == 5
2490 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2491 x > 5, x >= 6, x >= 5 or x > 4. */
2492 if (tree_int_cst_lt (rhs1
, rhs2
))
2494 if (wi::ne_p (wi::to_wide (rhs1
) + 1, wi::to_wide (rhs2
)))
2496 if (cmp1
== LE_EXPR
)
2498 else if (cmp1
== GT_EXPR
)
2505 gcc_checking_assert (tree_int_cst_lt (rhs2
, rhs1
));
2506 if (wi::ne_p (wi::to_wide (rhs2
) + 1, wi::to_wide (rhs1
)))
2508 if (cmp1
== LT_EXPR
)
2510 else if (cmp1
== GE_EXPR
)
2521 else if (lhs2
== rhs1
)
2530 basic_block cond3_bb
= cond2_bb
;
2531 edge cond3_phi_edge
= cond2_phi_edge
;
2532 gcond
*cond3
= cond2
;
2533 enum tree_code cmp3
= cmp2
;
2536 if (EDGE_COUNT (phi_bb
->preds
) == 4)
2538 if (absu_hwi (tree_to_shwi (arg2
)) != 1)
2540 if (e1
->flags
& EDGE_TRUE_VALUE
)
2542 if (tree_to_shwi (arg0
) != 2
2543 || absu_hwi (tree_to_shwi (arg1
)) != 1
2544 || wi::to_widest (arg1
) == wi::to_widest (arg2
))
2547 else if (tree_to_shwi (arg1
) != 2
2548 || absu_hwi (tree_to_shwi (arg0
)) != 1
2549 || wi::to_widest (arg0
) == wi::to_widest (arg1
))
2561 /* if (x < y) goto phi_bb; else fallthru;
2562 if (x > y) goto phi_bb; else fallthru;
2565 is ok, but if x and y are swapped in one of the comparisons,
2566 or the comparisons are the same and operands not swapped,
2567 or the true and false edges are swapped, it is not. */
2569 ^ (((cond2_phi_edge
->flags
2570 & ((cmp2
== LT_EXPR
|| cmp2
== LE_EXPR
)
2571 ? EDGE_TRUE_VALUE
: EDGE_FALSE_VALUE
)) != 0)
2573 & ((cmp1
== LT_EXPR
|| cmp1
== LE_EXPR
)
2574 ? EDGE_TRUE_VALUE
: EDGE_FALSE_VALUE
)) != 0)))
2576 if (!single_pred_p (cond2_bb
) || !cond_only_block_p (cond2_bb
))
2578 cond3_bb
= single_pred (cond2_bb
);
2579 if (EDGE_COUNT (cond2_bb
->succs
) != 2)
2581 if (EDGE_SUCC (cond3_bb
, 0)->dest
== cond2_bb
)
2583 if (EDGE_SUCC (cond3_bb
, 1)->dest
!= phi_bb
)
2585 cond3_phi_edge
= EDGE_SUCC (cond3_bb
, 1);
2587 else if (EDGE_SUCC (cond3_bb
, 0)->dest
!= phi_bb
)
2590 cond3_phi_edge
= EDGE_SUCC (cond3_bb
, 0);
2591 arg3
= gimple_phi_arg_def (phi
, cond3_phi_edge
->dest_idx
);
2592 cond3
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (cond3_bb
));
2595 cmp3
= gimple_cond_code (cond3
);
2596 lhs3
= gimple_cond_lhs (cond3
);
2597 rhs3
= gimple_cond_rhs (cond3
);
2600 if (!operand_equal_p (rhs3
, rhs1
, 0))
2603 else if (lhs3
== rhs1
)
2611 else if (absu_hwi (tree_to_shwi (arg0
)) != 1
2612 || absu_hwi (tree_to_shwi (arg1
)) != 1
2613 || wi::to_widest (arg0
) == wi::to_widest (arg1
))
2616 if (!integer_zerop (arg3
) || (cmp3
!= EQ_EXPR
&& cmp3
!= NE_EXPR
))
2618 if ((cond3_phi_edge
->flags
& (cmp3
== EQ_EXPR
2619 ? EDGE_TRUE_VALUE
: EDGE_FALSE_VALUE
)) == 0)
2622 /* lhs1 one_cmp rhs1 results in phires of 1. */
2623 enum tree_code one_cmp
;
2624 if ((cmp1
== LT_EXPR
|| cmp1
== LE_EXPR
)
2625 ^ (!integer_onep ((e1
->flags
& EDGE_TRUE_VALUE
) ? arg1
: arg0
)))
2630 enum tree_code res_cmp
;
2634 if (integer_zerop (rhs
))
2636 else if (integer_minus_onep (rhs
))
2637 res_cmp
= one_cmp
== LT_EXPR
? GT_EXPR
: LT_EXPR
;
2638 else if (integer_onep (rhs
))
2644 if (integer_zerop (rhs
))
2646 else if (integer_minus_onep (rhs
))
2647 res_cmp
= one_cmp
== LT_EXPR
? LE_EXPR
: GE_EXPR
;
2648 else if (integer_onep (rhs
))
2649 res_cmp
= one_cmp
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
2654 if (integer_onep (rhs
))
2655 res_cmp
= one_cmp
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
2656 else if (integer_zerop (rhs
))
2657 res_cmp
= one_cmp
== LT_EXPR
? GT_EXPR
: LT_EXPR
;
2662 if (integer_zerop (rhs
))
2663 res_cmp
= one_cmp
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
2664 else if (integer_minus_onep (rhs
))
2665 res_cmp
= one_cmp
== LT_EXPR
? GT_EXPR
: LT_EXPR
;
2670 if (integer_minus_onep (rhs
))
2671 res_cmp
= one_cmp
== LT_EXPR
? LE_EXPR
: GE_EXPR
;
2672 else if (integer_zerop (rhs
))
2678 if (integer_zerop (rhs
))
2679 res_cmp
= one_cmp
== LT_EXPR
? LE_EXPR
: GE_EXPR
;
2680 else if (integer_onep (rhs
))
2689 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2691 gcond
*use_cond
= as_a
<gcond
*> (use_stmt
);
2692 gimple_cond_set_code (use_cond
, res_cmp
);
2693 gimple_cond_set_lhs (use_cond
, lhs1
);
2694 gimple_cond_set_rhs (use_cond
, rhs1
);
2696 else if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
2698 gimple_assign_set_rhs_code (use_stmt
, res_cmp
);
2699 gimple_assign_set_rhs1 (use_stmt
, lhs1
);
2700 gimple_assign_set_rhs2 (use_stmt
, rhs1
);
2704 tree cond
= build2 (res_cmp
, TREE_TYPE (gimple_assign_rhs1 (use_stmt
)),
2706 gimple_assign_set_rhs1 (use_stmt
, cond
);
2708 update_stmt (use_stmt
);
2710 if (MAY_HAVE_DEBUG_BIND_STMTS
)
2712 use_operand_p use_p
;
2713 imm_use_iterator iter
;
2714 bool has_debug_uses
= false;
2715 bool has_cast_debug_uses
= false;
2716 FOR_EACH_IMM_USE_FAST (use_p
, iter
, phires
)
2718 gimple
*use_stmt
= USE_STMT (use_p
);
2719 if (orig_use_lhs
&& use_stmt
== orig_use_stmt
)
2721 gcc_assert (is_gimple_debug (use_stmt
));
2722 has_debug_uses
= true;
2727 if (!has_debug_uses
|| is_cast
)
2728 FOR_EACH_IMM_USE_FAST (use_p
, iter
, orig_use_lhs
)
2730 gimple
*use_stmt
= USE_STMT (use_p
);
2731 gcc_assert (is_gimple_debug (use_stmt
));
2732 has_debug_uses
= true;
2734 has_cast_debug_uses
= true;
2736 gimple_stmt_iterator gsi
= gsi_for_stmt (orig_use_stmt
);
2737 tree zero
= build_zero_cst (TREE_TYPE (orig_use_lhs
));
2738 gimple_assign_set_rhs_with_ops (&gsi
, INTEGER_CST
, zero
);
2739 update_stmt (orig_use_stmt
);
2744 /* If there are debug uses, emit something like:
2745 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2746 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2747 where > stands for the comparison that yielded 1
2748 and replace debug uses of phi result with that D#2.
2749 Ignore the value of 2, because if NaNs aren't expected,
2750 all floating point numbers should be comparable. */
2751 gimple_stmt_iterator gsi
= gsi_after_labels (gimple_bb (phi
));
2752 tree type
= TREE_TYPE (phires
);
2753 tree temp1
= build_debug_expr_decl (type
);
2754 tree t
= build2 (one_cmp
, boolean_type_node
, lhs1
, rhs2
);
2755 t
= build3 (COND_EXPR
, type
, t
, build_one_cst (type
),
2756 build_int_cst (type
, -1));
2757 gimple
*g
= gimple_build_debug_bind (temp1
, t
, phi
);
2758 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2759 tree temp2
= build_debug_expr_decl (type
);
2760 t
= build2 (EQ_EXPR
, boolean_type_node
, lhs1
, rhs2
);
2761 t
= build3 (COND_EXPR
, type
, t
, build_zero_cst (type
), temp1
);
2762 g
= gimple_build_debug_bind (temp2
, t
, phi
);
2763 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2764 replace_uses_by (phires
, temp2
);
2767 if (has_cast_debug_uses
)
2769 tree temp3
= make_node (DEBUG_EXPR_DECL
);
2770 DECL_ARTIFICIAL (temp3
) = 1;
2771 TREE_TYPE (temp3
) = TREE_TYPE (orig_use_lhs
);
2772 SET_DECL_MODE (temp3
, TYPE_MODE (type
));
2773 t
= fold_convert (TREE_TYPE (temp3
), temp2
);
2774 g
= gimple_build_debug_bind (temp3
, t
, phi
);
2775 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2776 replace_uses_by (orig_use_lhs
, temp3
);
2779 replace_uses_by (orig_use_lhs
, temp2
);
2786 gimple_stmt_iterator gsi
= gsi_for_stmt (orig_use_stmt
);
2787 gsi_remove (&gsi
, true);
2790 gimple_stmt_iterator psi
= gsi_for_stmt (phi
);
2791 remove_phi_node (&psi
, true);
2792 statistics_counter_event (cfun
, "spaceship replacement", 1);
2797 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2807 _2 = (unsigned long) b_4(D);
2808 _9 = __builtin_popcountl (_2);
2810 _9 = __builtin_popcountl (b_4(D));
2813 c_12 = PHI <0(2), _9(3)>
2817 _2 = (unsigned long) b_4(D);
2818 _9 = __builtin_popcountl (_2);
2820 _9 = __builtin_popcountl (b_4(D));
2825 Similarly for __builtin_clz or __builtin_ctz if
2826 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2827 instead of 0 above it uses the value from that macro. */
2830 cond_removal_in_builtin_zero_pattern (basic_block cond_bb
,
2831 basic_block middle_bb
,
2832 edge e1
, edge e2
, gphi
*phi
,
2833 tree arg0
, tree arg1
)
2835 gimple_stmt_iterator gsi
, gsi_from
;
2837 gimple
*cast
= NULL
;
2841 _2 = (unsigned long) b_4(D);
2842 _9 = __builtin_popcountl (_2);
2844 _9 = __builtin_popcountl (b_4(D));
2845 are the only stmts in the middle_bb. */
2847 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
2848 if (gsi_end_p (gsi
))
2850 cast
= gsi_stmt (gsi
);
2851 gsi_next_nondebug (&gsi
);
2852 if (!gsi_end_p (gsi
))
2854 call
= gsi_stmt (gsi
);
2855 gsi_next_nondebug (&gsi
);
2856 if (!gsi_end_p (gsi
))
2865 /* Check that we have a popcount/clz/ctz builtin. */
2866 if (!is_gimple_call (call
))
2869 lhs
= gimple_get_lhs (call
);
2871 if (lhs
== NULL_TREE
)
2874 combined_fn cfn
= gimple_call_combined_fn (call
);
2875 if (gimple_call_num_args (call
) != 1
2876 && (gimple_call_num_args (call
) != 2
2881 arg
= gimple_call_arg (call
, 0);
2883 internal_fn ifn
= IFN_LAST
;
2885 bool any_val
= false;
2888 case CFN_BUILT_IN_BSWAP16
:
2889 case CFN_BUILT_IN_BSWAP32
:
2890 case CFN_BUILT_IN_BSWAP64
:
2891 case CFN_BUILT_IN_BSWAP128
:
2897 if (INTEGRAL_TYPE_P (TREE_TYPE (arg
)))
2899 tree type
= TREE_TYPE (arg
);
2900 if (TREE_CODE (type
) == BITINT_TYPE
)
2902 if (gimple_call_num_args (call
) == 1)
2908 if (!tree_fits_shwi_p (gimple_call_arg (call
, 1)))
2910 HOST_WIDE_INT at_zero
= tree_to_shwi (gimple_call_arg (call
, 1));
2911 if ((int) at_zero
!= at_zero
)
2917 if (direct_internal_fn_supported_p (IFN_CLZ
, type
, OPTIMIZE_FOR_BOTH
)
2918 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type
),
2927 if (INTEGRAL_TYPE_P (TREE_TYPE (arg
)))
2929 tree type
= TREE_TYPE (arg
);
2930 if (TREE_CODE (type
) == BITINT_TYPE
)
2932 if (gimple_call_num_args (call
) == 1)
2938 if (!tree_fits_shwi_p (gimple_call_arg (call
, 1)))
2940 HOST_WIDE_INT at_zero
= tree_to_shwi (gimple_call_arg (call
, 1));
2941 if ((int) at_zero
!= at_zero
)
2947 if (direct_internal_fn_supported_p (IFN_CTZ
, type
, OPTIMIZE_FOR_BOTH
)
2948 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type
),
2956 case CFN_BUILT_IN_CLRSB
:
2957 val
= TYPE_PRECISION (integer_type_node
) - 1;
2959 case CFN_BUILT_IN_CLRSBL
:
2960 val
= TYPE_PRECISION (long_integer_type_node
) - 1;
2962 case CFN_BUILT_IN_CLRSBLL
:
2963 val
= TYPE_PRECISION (long_long_integer_type_node
) - 1;
2971 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2972 /* Check that we have a cast prior to that. */
2973 if (gimple_code (cast
) != GIMPLE_ASSIGN
2974 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast
)))
2976 /* Result of the cast stmt is the argument to the builtin. */
2977 if (arg
!= gimple_assign_lhs (cast
))
2979 arg
= gimple_assign_rhs1 (cast
);
2982 gcond
*cond
= dyn_cast
<gcond
*> (*gsi_last_bb (cond_bb
));
2984 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
2987 || (gimple_cond_code (cond
) != NE_EXPR
2988 && gimple_cond_code (cond
) != EQ_EXPR
)
2989 || !integer_zerop (gimple_cond_rhs (cond
))
2990 || arg
!= gimple_cond_lhs (cond
))
2994 if ((e2
->flags
& EDGE_TRUE_VALUE
2995 && gimple_cond_code (cond
) == NE_EXPR
)
2996 || (e1
->flags
& EDGE_TRUE_VALUE
2997 && gimple_cond_code (cond
) == EQ_EXPR
))
2999 std::swap (arg0
, arg1
);
3003 /* Check PHI arguments. */
3005 || TREE_CODE (arg1
) != INTEGER_CST
)
3009 if (!tree_fits_shwi_p (arg1
))
3011 HOST_WIDE_INT at_zero
= tree_to_shwi (arg1
);
3012 if ((int) at_zero
!= at_zero
)
3016 else if (wi::to_wide (arg1
) != val
)
3019 /* And insert the popcount/clz/ctz builtin and cast stmt before the
3021 gsi
= gsi_last_bb (cond_bb
);
3024 gsi_from
= gsi_for_stmt (cast
);
3025 gsi_move_before (&gsi_from
, &gsi
);
3026 reset_flow_sensitive_info (gimple_get_lhs (cast
));
3028 gsi_from
= gsi_for_stmt (call
);
3030 || (gimple_call_internal_p (call
) && gimple_call_num_args (call
) == 2))
3031 gsi_move_before (&gsi_from
, &gsi
);
3034 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
3035 the latter is well defined at zero. */
3036 call
= gimple_build_call_internal (ifn
, 2, gimple_call_arg (call
, 0),
3037 build_int_cst (integer_type_node
, val
));
3038 gimple_call_set_lhs (call
, lhs
);
3039 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
3040 gsi_remove (&gsi_from
, true);
3042 reset_flow_sensitive_info (lhs
);
3044 /* Now update the PHI and remove unneeded bbs. */
3045 replace_phi_edge_with_variable (cond_bb
, e2
, phi
, lhs
);
3049 /* Auxiliary functions to determine the set of memory accesses which
3050 can't trap because they are preceded by accesses to the same memory
3051 portion. We do that for MEM_REFs, so we only need to track
3052 the SSA_NAME of the pointer indirectly referenced. The algorithm
3053 simply is a walk over all instructions in dominator order. When
3054 we see an MEM_REF we determine if we've already seen a same
3055 ref anywhere up to the root of the dominator tree. If we do the
3056 current access can't trap. If we don't see any dominating access
3057 the current access might trap, but might also make later accesses
3058 non-trapping, so we remember it. We need to be careful with loads
3059 or stores, for instance a load might not trap, while a store would,
3060 so if we see a dominating read access this doesn't mean that a later
3061 write access would not trap. Hence we also need to differentiate the
3062 type of access(es) seen.
3064 ??? We currently are very conservative and assume that a load might
3065 trap even if a store doesn't (write-only memory). This probably is
3066 overly conservative.
3068 We currently support a special case that for !TREE_ADDRESSABLE automatic
3069 variables, it could ignore whether something is a load or store because the
3070 local stack should be always writable. */
3072 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
3073 basic block an *_REF through it was seen, which would constitute a
3074 no-trap region for same accesses.
3076 Size is needed to support 2 MEM_REFs of different types, like
3077 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
3087 /* Hashtable helpers. */
3089 struct refs_hasher
: free_ptr_hash
<ref_to_bb
>
3091 static inline hashval_t
hash (const ref_to_bb
*);
3092 static inline bool equal (const ref_to_bb
*, const ref_to_bb
*);
3095 /* Used for quick clearing of the hash-table when we see calls.
3096 Hash entries with phase < nt_call_phase are invalid. */
3097 static unsigned int nt_call_phase
;
3099 /* The hash function. */
3102 refs_hasher::hash (const ref_to_bb
*n
)
3104 inchash::hash hstate
;
3105 inchash::add_expr (n
->exp
, hstate
, OEP_ADDRESS_OF
);
3106 hstate
.add_hwi (n
->size
);
3107 return hstate
.end ();
3110 /* The equality function of *P1 and *P2. */
3113 refs_hasher::equal (const ref_to_bb
*n1
, const ref_to_bb
*n2
)
3115 return operand_equal_p (n1
->exp
, n2
->exp
, OEP_ADDRESS_OF
)
3116 && n1
->size
== n2
->size
;
3119 class nontrapping_dom_walker
: public dom_walker
3122 nontrapping_dom_walker (cdi_direction direction
, hash_set
<tree
> *ps
)
3123 : dom_walker (direction
), m_nontrapping (ps
), m_seen_refs (128)
3126 edge
before_dom_children (basic_block
) final override
;
3127 void after_dom_children (basic_block
) final override
;
3131 /* We see the expression EXP in basic block BB. If it's an interesting
3132 expression (an MEM_REF through an SSA_NAME) possibly insert the
3133 expression into the set NONTRAP or the hash table of seen expressions.
3134 STORE is true if this expression is on the LHS, otherwise it's on
3136 void add_or_mark_expr (basic_block
, tree
, bool);
3138 hash_set
<tree
> *m_nontrapping
;
3140 /* The hash table for remembering what we've seen. */
3141 hash_table
<refs_hasher
> m_seen_refs
;
3144 /* Called by walk_dominator_tree, when entering the block BB. */
3146 nontrapping_dom_walker::before_dom_children (basic_block bb
)
3150 gimple_stmt_iterator gsi
;
3152 /* If we haven't seen all our predecessors, clear the hash-table. */
3153 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3154 if ((((size_t)e
->src
->aux
) & 2) == 0)
3160 /* Mark this BB as being on the path to dominator root and as visited. */
3161 bb
->aux
= (void*)(1 | 2);
3163 /* And walk the statements in order. */
3164 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3166 gimple
*stmt
= gsi_stmt (gsi
);
3168 if ((gimple_code (stmt
) == GIMPLE_ASM
&& gimple_vdef (stmt
))
3169 || (is_gimple_call (stmt
)
3170 && (!nonfreeing_call_p (stmt
) || !nonbarrier_call_p (stmt
))))
3172 else if (gimple_assign_single_p (stmt
) && !gimple_has_volatile_ops (stmt
))
3174 add_or_mark_expr (bb
, gimple_assign_lhs (stmt
), true);
3175 add_or_mark_expr (bb
, gimple_assign_rhs1 (stmt
), false);
3181 /* Called by walk_dominator_tree, when basic block BB is exited. */
3183 nontrapping_dom_walker::after_dom_children (basic_block bb
)
3185 /* This BB isn't on the path to dominator root anymore. */
3189 /* We see the expression EXP in basic block BB. If it's an interesting
3194 possibly insert the expression into the set NONTRAP or the hash table
3195 of seen expressions. STORE is true if this expression is on the LHS,
3196 otherwise it's on the RHS. */
3198 nontrapping_dom_walker::add_or_mark_expr (basic_block bb
, tree exp
, bool store
)
3202 if ((TREE_CODE (exp
) == MEM_REF
|| TREE_CODE (exp
) == ARRAY_REF
3203 || TREE_CODE (exp
) == COMPONENT_REF
)
3204 && (size
= int_size_in_bytes (TREE_TYPE (exp
))) > 0)
3206 struct ref_to_bb map
;
3208 struct ref_to_bb
*r2bb
;
3209 basic_block found_bb
= 0;
3213 tree base
= get_base_address (exp
);
3214 /* Only record a LOAD of a local variable without address-taken, as
3215 the local stack is always writable. This allows cselim on a STORE
3216 with a dominating LOAD. */
3217 if (!auto_var_p (base
) || TREE_ADDRESSABLE (base
))
3221 /* Try to find the last seen *_REF, which can trap. */
3224 slot
= m_seen_refs
.find_slot (&map
, INSERT
);
3226 if (r2bb
&& r2bb
->phase
>= nt_call_phase
)
3227 found_bb
= r2bb
->bb
;
3229 /* If we've found a trapping *_REF, _and_ it dominates EXP
3230 (it's in a basic block on the path from us to the dominator root)
3231 then we can't trap. */
3232 if (found_bb
&& (((size_t)found_bb
->aux
) & 1) == 1)
3234 m_nontrapping
->add (exp
);
3238 /* EXP might trap, so insert it into the hash table. */
3241 r2bb
->phase
= nt_call_phase
;
3246 r2bb
= XNEW (struct ref_to_bb
);
3247 r2bb
->phase
= nt_call_phase
;
3257 /* This is the entry point of gathering non trapping memory accesses.
3258 It will do a dominator walk over the whole function, and it will
3259 make use of the bb->aux pointers. It returns a set of trees
3260 (the MEM_REFs itself) which can't trap. */
3261 static hash_set
<tree
> *
3262 get_non_trapping (void)
3265 hash_set
<tree
> *nontrap
= new hash_set
<tree
>;
3267 nontrapping_dom_walker (CDI_DOMINATORS
, nontrap
)
3268 .walk (cfun
->cfg
->x_entry_block_ptr
);
3270 clear_aux_for_blocks ();
3274 /* Do the main work of conditional store replacement. We already know
3275 that the recognized pattern looks like so:
3278 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
3281 fallthrough (edge E0)
3285 We check that MIDDLE_BB contains only one store, that that store
3286 doesn't trap (not via NOTRAP, but via checking if an access to the same
3287 memory location dominates us, or the store is to a local addressable
3288 object) and that the store has a "simple" RHS. */
3291 cond_store_replacement (basic_block middle_bb
, basic_block join_bb
,
3292 edge e0
, edge e1
, hash_set
<tree
> *nontrap
)
3294 gimple
*assign
= last_and_only_stmt (middle_bb
);
3295 tree lhs
, rhs
, name
, name2
;
3298 gimple_stmt_iterator gsi
;
3301 /* Check if middle_bb contains of only one store. */
3303 || !gimple_assign_single_p (assign
)
3304 || gimple_has_volatile_ops (assign
))
3307 /* And no PHI nodes so all uses in the single stmt are also
3308 available where we insert to. */
3309 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
3312 locus
= gimple_location (assign
);
3313 lhs
= gimple_assign_lhs (assign
);
3314 rhs
= gimple_assign_rhs1 (assign
);
3315 if ((!REFERENCE_CLASS_P (lhs
)
3317 || !is_gimple_reg_type (TREE_TYPE (lhs
)))
3320 /* Prove that we can move the store down. We could also check
3321 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3322 whose value is not available readily, which we want to avoid. */
3323 if (!nontrap
->contains (lhs
))
3325 /* If LHS is an access to a local variable without address-taken
3326 (or when we allow data races) and known not to trap, we could
3327 always safely move down the store. */
3328 tree base
= get_base_address (lhs
);
3329 if (!auto_var_p (base
)
3330 || (TREE_ADDRESSABLE (base
) && !flag_store_data_races
)
3331 || tree_could_trap_p (lhs
))
3335 /* Now we've checked the constraints, so do the transformation:
3336 1) Remove the single store. */
3337 gsi
= gsi_for_stmt (assign
);
3338 unlink_stmt_vdef (assign
);
3339 gsi_remove (&gsi
, true);
3340 release_defs (assign
);
3342 /* Make both store and load use alias-set zero as we have to
3343 deal with the case of the store being a conditional change
3344 of the dynamic type. */
3345 lhs
= unshare_expr (lhs
);
3347 while (handled_component_p (*basep
))
3348 basep
= &TREE_OPERAND (*basep
, 0);
3349 if (TREE_CODE (*basep
) == MEM_REF
3350 || TREE_CODE (*basep
) == TARGET_MEM_REF
)
3351 TREE_OPERAND (*basep
, 1)
3352 = fold_convert (ptr_type_node
, TREE_OPERAND (*basep
, 1));
3354 *basep
= build2 (MEM_REF
, TREE_TYPE (*basep
),
3355 build_fold_addr_expr (*basep
),
3356 build_zero_cst (ptr_type_node
));
3358 /* 2) Insert a load from the memory of the store to the temporary
3359 on the edge which did not contain the store. */
3360 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
3361 new_stmt
= gimple_build_assign (name
, lhs
);
3362 gimple_set_location (new_stmt
, locus
);
3363 lhs
= unshare_expr (lhs
);
3365 /* Set the no-warning bit on the rhs of the load to avoid uninit
3367 tree rhs1
= gimple_assign_rhs1 (new_stmt
);
3368 suppress_warning (rhs1
, OPT_Wuninitialized
);
3370 gsi_insert_on_edge (e1
, new_stmt
);
3372 /* 3) Create a PHI node at the join block, with one argument
3373 holding the old RHS, and the other holding the temporary
3374 where we stored the old memory contents. */
3375 name2
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
3376 newphi
= create_phi_node (name2
, join_bb
);
3377 add_phi_arg (newphi
, rhs
, e0
, locus
);
3378 add_phi_arg (newphi
, name
, e1
, locus
);
3380 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
3382 /* 4) Insert that PHI node. */
3383 gsi
= gsi_after_labels (join_bb
);
3384 if (gsi_end_p (gsi
))
3386 gsi
= gsi_last_bb (join_bb
);
3387 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
3390 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
3392 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3394 fprintf (dump_file
, "\nConditional store replacement happened!");
3395 fprintf (dump_file
, "\nReplaced the store with a load.");
3396 fprintf (dump_file
, "\nInserted a new PHI statement in joint block:\n");
3397 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_VOPS
|TDF_MEMSYMS
);
3399 statistics_counter_event (cfun
, "conditional store replacement", 1);
3404 /* Do the main work of conditional store replacement. */
3407 cond_if_else_store_replacement_1 (basic_block then_bb
, basic_block else_bb
,
3408 basic_block join_bb
, gimple
*then_assign
,
3409 gimple
*else_assign
)
3411 tree lhs_base
, lhs
, then_rhs
, else_rhs
, name
;
3412 location_t then_locus
, else_locus
;
3413 gimple_stmt_iterator gsi
;
3417 if (then_assign
== NULL
3418 || !gimple_assign_single_p (then_assign
)
3419 || gimple_clobber_p (then_assign
)
3420 || gimple_has_volatile_ops (then_assign
)
3421 || else_assign
== NULL
3422 || !gimple_assign_single_p (else_assign
)
3423 || gimple_clobber_p (else_assign
)
3424 || gimple_has_volatile_ops (else_assign
))
3427 lhs
= gimple_assign_lhs (then_assign
);
3428 if (!is_gimple_reg_type (TREE_TYPE (lhs
))
3429 || !operand_equal_p (lhs
, gimple_assign_lhs (else_assign
), 0))
3432 lhs_base
= get_base_address (lhs
);
3433 if (lhs_base
== NULL_TREE
3434 || (!DECL_P (lhs_base
) && TREE_CODE (lhs_base
) != MEM_REF
))
3437 then_rhs
= gimple_assign_rhs1 (then_assign
);
3438 else_rhs
= gimple_assign_rhs1 (else_assign
);
3439 then_locus
= gimple_location (then_assign
);
3440 else_locus
= gimple_location (else_assign
);
3442 /* Now we've checked the constraints, so do the transformation:
3443 1) Remove the stores. */
3444 gsi
= gsi_for_stmt (then_assign
);
3445 unlink_stmt_vdef (then_assign
);
3446 gsi_remove (&gsi
, true);
3447 release_defs (then_assign
);
3449 gsi
= gsi_for_stmt (else_assign
);
3450 unlink_stmt_vdef (else_assign
);
3451 gsi_remove (&gsi
, true);
3452 release_defs (else_assign
);
3454 /* 2) Create a PHI node at the join block, with one argument
3455 holding the old RHS, and the other holding the temporary
3456 where we stored the old memory contents. */
3457 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
3458 newphi
= create_phi_node (name
, join_bb
);
3459 add_phi_arg (newphi
, then_rhs
, EDGE_SUCC (then_bb
, 0), then_locus
);
3460 add_phi_arg (newphi
, else_rhs
, EDGE_SUCC (else_bb
, 0), else_locus
);
3462 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
3464 /* 3) Insert that PHI node. */
3465 gsi
= gsi_after_labels (join_bb
);
3466 if (gsi_end_p (gsi
))
3468 gsi
= gsi_last_bb (join_bb
);
3469 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
3472 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
3474 statistics_counter_event (cfun
, "if-then-else store replacement", 1);
3479 /* Return the single store in BB with VDEF or NULL if there are
3480 other stores in the BB or loads following the store. */
3483 single_trailing_store_in_bb (basic_block bb
, tree vdef
)
3485 if (SSA_NAME_IS_DEFAULT_DEF (vdef
))
3487 gimple
*store
= SSA_NAME_DEF_STMT (vdef
);
3488 if (gimple_bb (store
) != bb
3489 || gimple_code (store
) == GIMPLE_PHI
)
3492 /* Verify there is no other store in this BB. */
3493 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store
))
3494 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store
))) == bb
3495 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store
))) != GIMPLE_PHI
)
3498 /* Verify there is no load or store after the store. */
3499 use_operand_p use_p
;
3500 imm_use_iterator imm_iter
;
3501 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_vdef (store
))
3502 if (USE_STMT (use_p
) != store
3503 && gimple_bb (USE_STMT (use_p
)) == bb
)
3509 /* Conditional store replacement. We already know
3510 that the recognized pattern looks like so:
3513 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3523 fallthrough (edge E0)
3527 We check that it is safe to sink the store to JOIN_BB by verifying that
3528 there are no read-after-write or write-after-write dependencies in
3529 THEN_BB and ELSE_BB. */
3532 cond_if_else_store_replacement (basic_block then_bb
, basic_block else_bb
,
3533 basic_block join_bb
)
3535 vec
<data_reference_p
> then_datarefs
, else_datarefs
;
3536 vec
<ddr_p
> then_ddrs
, else_ddrs
;
3537 gimple
*then_store
, *else_store
;
3538 bool found
, ok
= false, res
;
3539 struct data_dependence_relation
*ddr
;
3540 data_reference_p then_dr
, else_dr
;
3542 tree then_lhs
, else_lhs
;
3543 basic_block blocks
[3];
3545 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3546 cheap enough to always handle as it allows us to elide dependence
3549 for (gphi_iterator si
= gsi_start_phis (join_bb
); !gsi_end_p (si
);
3551 if (virtual_operand_p (gimple_phi_result (si
.phi ())))
3558 tree then_vdef
= PHI_ARG_DEF_FROM_EDGE (vphi
, single_succ_edge (then_bb
));
3559 tree else_vdef
= PHI_ARG_DEF_FROM_EDGE (vphi
, single_succ_edge (else_bb
));
3560 gimple
*then_assign
= single_trailing_store_in_bb (then_bb
, then_vdef
);
3563 gimple
*else_assign
= single_trailing_store_in_bb (else_bb
, else_vdef
);
3565 return cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
3566 then_assign
, else_assign
);
3569 /* If either vectorization or if-conversion is disabled then do
3570 not sink any stores. */
3571 if (param_max_stores_to_sink
== 0
3572 || (!flag_tree_loop_vectorize
&& !flag_tree_slp_vectorize
)
3573 || !flag_tree_loop_if_convert
)
3576 /* Find data references. */
3577 then_datarefs
.create (1);
3578 else_datarefs
.create (1);
3579 if ((find_data_references_in_bb (NULL
, then_bb
, &then_datarefs
)
3581 || !then_datarefs
.length ()
3582 || (find_data_references_in_bb (NULL
, else_bb
, &else_datarefs
)
3584 || !else_datarefs
.length ())
3586 free_data_refs (then_datarefs
);
3587 free_data_refs (else_datarefs
);
3591 /* Find pairs of stores with equal LHS. */
3592 auto_vec
<gimple
*, 1> then_stores
, else_stores
;
3593 FOR_EACH_VEC_ELT (then_datarefs
, i
, then_dr
)
3595 if (DR_IS_READ (then_dr
))
3598 then_store
= DR_STMT (then_dr
);
3599 then_lhs
= gimple_get_lhs (then_store
);
3600 if (then_lhs
== NULL_TREE
)
3604 FOR_EACH_VEC_ELT (else_datarefs
, j
, else_dr
)
3606 if (DR_IS_READ (else_dr
))
3609 else_store
= DR_STMT (else_dr
);
3610 else_lhs
= gimple_get_lhs (else_store
);
3611 if (else_lhs
== NULL_TREE
)
3614 if (operand_equal_p (then_lhs
, else_lhs
, 0))
3624 then_stores
.safe_push (then_store
);
3625 else_stores
.safe_push (else_store
);
3628 /* No pairs of stores found. */
3629 if (!then_stores
.length ()
3630 || then_stores
.length () > (unsigned) param_max_stores_to_sink
)
3632 free_data_refs (then_datarefs
);
3633 free_data_refs (else_datarefs
);
3637 /* Compute and check data dependencies in both basic blocks. */
3638 then_ddrs
.create (1);
3639 else_ddrs
.create (1);
3640 if (!compute_all_dependences (then_datarefs
, &then_ddrs
,
3642 || !compute_all_dependences (else_datarefs
, &else_ddrs
,
3645 free_dependence_relations (then_ddrs
);
3646 free_dependence_relations (else_ddrs
);
3647 free_data_refs (then_datarefs
);
3648 free_data_refs (else_datarefs
);
3651 blocks
[0] = then_bb
;
3652 blocks
[1] = else_bb
;
3653 blocks
[2] = join_bb
;
3654 renumber_gimple_stmt_uids_in_blocks (blocks
, 3);
3656 /* Check that there are no read-after-write or write-after-write dependencies
3658 FOR_EACH_VEC_ELT (then_ddrs
, i
, ddr
)
3660 struct data_reference
*dra
= DDR_A (ddr
);
3661 struct data_reference
*drb
= DDR_B (ddr
);
3663 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
3664 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
3665 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
3666 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
3667 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
3668 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
3670 free_dependence_relations (then_ddrs
);
3671 free_dependence_relations (else_ddrs
);
3672 free_data_refs (then_datarefs
);
3673 free_data_refs (else_datarefs
);
3678 /* Check that there are no read-after-write or write-after-write dependencies
3680 FOR_EACH_VEC_ELT (else_ddrs
, i
, ddr
)
3682 struct data_reference
*dra
= DDR_A (ddr
);
3683 struct data_reference
*drb
= DDR_B (ddr
);
3685 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
3686 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
3687 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
3688 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
3689 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
3690 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
3692 free_dependence_relations (then_ddrs
);
3693 free_dependence_relations (else_ddrs
);
3694 free_data_refs (then_datarefs
);
3695 free_data_refs (else_datarefs
);
3700 /* Sink stores with same LHS. */
3701 FOR_EACH_VEC_ELT (then_stores
, i
, then_store
)
3703 else_store
= else_stores
[i
];
3704 res
= cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
3705 then_store
, else_store
);
3709 free_dependence_relations (then_ddrs
);
3710 free_dependence_relations (else_ddrs
);
3711 free_data_refs (then_datarefs
);
3712 free_data_refs (else_datarefs
);
3717 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3720 local_mem_dependence (gimple
*stmt
, basic_block bb
)
3722 tree vuse
= gimple_vuse (stmt
);
3728 def
= SSA_NAME_DEF_STMT (vuse
);
3729 return (def
&& gimple_bb (def
) == bb
);
3732 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3733 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3734 and BB3 rejoins control flow following BB1 and BB2, look for
3735 opportunities to hoist loads as follows. If BB3 contains a PHI of
3736 two loads, one each occurring in BB1 and BB2, and the loads are
3737 provably of adjacent fields in the same structure, then move both
3738 loads into BB0. Of course this can only be done if there are no
3739 dependencies preventing such motion.
3741 One of the hoisted loads will always be speculative, so the
3742 transformation is currently conservative:
3744 - The fields must be strictly adjacent.
3745 - The two fields must occupy a single memory block that is
3746 guaranteed to not cross a page boundary.
3748 The last is difficult to prove, as such memory blocks should be
3749 aligned on the minimum of the stack alignment boundary and the
3750 alignment guaranteed by heap allocation interfaces. Thus we rely
3751 on a parameter for the alignment value.
3753 Provided a good value is used for the last case, the first
3754 restriction could possibly be relaxed. */
3757 hoist_adjacent_loads (basic_block bb0
, basic_block bb1
,
3758 basic_block bb2
, basic_block bb3
)
3760 unsigned HOST_WIDE_INT param_align
= param_l1_cache_line_size
;
3761 unsigned HOST_WIDE_INT param_align_bits
= param_align
* BITS_PER_UNIT
;
3764 /* Walk the phis in bb3 looking for an opportunity. We are looking
3765 for phis of two SSA names, one each of which is defined in bb1 and
3767 for (gsi
= gsi_start_phis (bb3
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3769 gphi
*phi_stmt
= gsi
.phi ();
3770 gimple
*def1
, *def2
;
3771 tree arg1
, arg2
, ref1
, ref2
, field1
, field2
;
3772 tree tree_offset1
, tree_offset2
, tree_size2
, next
;
3773 unsigned HOST_WIDE_INT offset1
, offset2
, size2
, align1
;
3774 gimple_stmt_iterator gsi2
;
3775 basic_block bb_for_def1
, bb_for_def2
;
3777 if (gimple_phi_num_args (phi_stmt
) != 2
3778 || virtual_operand_p (gimple_phi_result (phi_stmt
)))
3781 arg1
= gimple_phi_arg_def (phi_stmt
, 0);
3782 arg2
= gimple_phi_arg_def (phi_stmt
, 1);
3784 if (TREE_CODE (arg1
) != SSA_NAME
3785 || TREE_CODE (arg2
) != SSA_NAME
3786 || SSA_NAME_IS_DEFAULT_DEF (arg1
)
3787 || SSA_NAME_IS_DEFAULT_DEF (arg2
))
3790 def1
= SSA_NAME_DEF_STMT (arg1
);
3791 def2
= SSA_NAME_DEF_STMT (arg2
);
3793 if ((gimple_bb (def1
) != bb1
|| gimple_bb (def2
) != bb2
)
3794 && (gimple_bb (def2
) != bb1
|| gimple_bb (def1
) != bb2
))
3797 /* Check the mode of the arguments to be sure a conditional move
3798 can be generated for it. */
3799 if (optab_handler (movcc_optab
, TYPE_MODE (TREE_TYPE (arg1
)))
3800 == CODE_FOR_nothing
)
3803 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3804 if (!gimple_assign_single_p (def1
)
3805 || !gimple_assign_single_p (def2
)
3806 || gimple_has_volatile_ops (def1
)
3807 || gimple_has_volatile_ops (def2
))
3810 ref1
= gimple_assign_rhs1 (def1
);
3811 ref2
= gimple_assign_rhs1 (def2
);
3813 if (TREE_CODE (ref1
) != COMPONENT_REF
3814 || TREE_CODE (ref2
) != COMPONENT_REF
)
3817 /* The zeroth operand of the two component references must be
3818 identical. It is not sufficient to compare get_base_address of
3819 the two references, because this could allow for different
3820 elements of the same array in the two trees. It is not safe to
3821 assume that the existence of one array element implies the
3822 existence of a different one. */
3823 if (!operand_equal_p (TREE_OPERAND (ref1
, 0), TREE_OPERAND (ref2
, 0), 0))
3826 field1
= TREE_OPERAND (ref1
, 1);
3827 field2
= TREE_OPERAND (ref2
, 1);
3829 /* Check for field adjacency, and ensure field1 comes first. */
3830 for (next
= DECL_CHAIN (field1
);
3831 next
&& TREE_CODE (next
) != FIELD_DECL
;
3832 next
= DECL_CHAIN (next
))
3837 for (next
= DECL_CHAIN (field2
);
3838 next
&& TREE_CODE (next
) != FIELD_DECL
;
3839 next
= DECL_CHAIN (next
))
3845 std::swap (field1
, field2
);
3846 std::swap (def1
, def2
);
3849 bb_for_def1
= gimple_bb (def1
);
3850 bb_for_def2
= gimple_bb (def2
);
3852 /* Check for proper alignment of the first field. */
3853 tree_offset1
= bit_position (field1
);
3854 tree_offset2
= bit_position (field2
);
3855 tree_size2
= DECL_SIZE (field2
);
3857 if (!tree_fits_uhwi_p (tree_offset1
)
3858 || !tree_fits_uhwi_p (tree_offset2
)
3859 || !tree_fits_uhwi_p (tree_size2
))
3862 offset1
= tree_to_uhwi (tree_offset1
);
3863 offset2
= tree_to_uhwi (tree_offset2
);
3864 size2
= tree_to_uhwi (tree_size2
);
3865 align1
= DECL_ALIGN (field1
) % param_align_bits
;
3867 if (offset1
% BITS_PER_UNIT
!= 0)
3870 /* For profitability, the two field references should fit within
3871 a single cache line. */
3872 if (align1
+ offset2
- offset1
+ size2
> param_align_bits
)
3875 /* The two expressions cannot be dependent upon vdefs defined
3877 if (local_mem_dependence (def1
, bb_for_def1
)
3878 || local_mem_dependence (def2
, bb_for_def2
))
3881 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3882 bb0. We hoist the first one first so that a cache miss is handled
3883 efficiently regardless of hardware cache-fill policy. */
3884 gsi2
= gsi_for_stmt (def1
);
3885 gsi_move_to_bb_end (&gsi2
, bb0
);
3886 gsi2
= gsi_for_stmt (def2
);
3887 gsi_move_to_bb_end (&gsi2
, bb0
);
3888 statistics_counter_event (cfun
, "hoisted loads", 1);
3890 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3893 "\nHoisting adjacent loads from %d and %d into %d: \n",
3894 bb_for_def1
->index
, bb_for_def2
->index
, bb0
->index
);
3895 print_gimple_stmt (dump_file
, def1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
3896 print_gimple_stmt (dump_file
, def2
, 0, TDF_VOPS
|TDF_MEMSYMS
);
3901 /* Determine whether we should attempt to hoist adjacent loads out of
3902 diamond patterns in pass_phiopt. Always hoist loads if
3903 -fhoist-adjacent-loads is specified and the target machine has
3904 both a conditional move instruction and a defined cache line size. */
3907 gate_hoist_loads (void)
3909 return (flag_hoist_adjacent_loads
== 1
3910 && param_l1_cache_line_size
3911 && HAVE_conditional_move
);
3914 /* This pass tries to replaces an if-then-else block with an
3915 assignment. We have different kinds of transformations.
3916 Some of these transformations are also performed by the ifcvt
3919 PHI-OPT using Match-and-simplify infrastructure
3920 -----------------------
3922 The PHI-OPT pass will try to use match-and-simplify infrastructure
3923 (gimple_simplify) to do transformations. This is implemented in
3924 match_simplify_replacement.
3926 The way it works is it replaces:
3928 if (cond) goto bb2; else goto bb1;
3931 x = PHI <a (bb1), b (bb0), ...>;
3933 with a statement if it gets simplified from `cond ? b : a`.
3938 x = PHI <a (bb1), x1 (bb0), ...>;
3939 Bb1 might be removed as it becomes unreachable when doing the replacement.
3940 Though bb1 does not have to be considered a forwarding basic block from bb0.
3942 Will try to see if `(!cond) ? a : b` gets simplified (iff !cond simplifies);
3943 this is done not to have an explosion of patterns in match.pd.
3944 Note bb1 does not need to be completely empty, it can contain
3945 one statement which is known not to trap.
3947 It also can handle the case where we have two forwarding bbs (diamond):
3949 if (cond) goto bb2; else goto bb1;
3953 x = PHI <a (bb1), b (bb2), ...>;
3954 And that is replaced with a statement if it is simplified
3955 from `cond ? b : a`.
3956 Again bb1 and bb2 does not have to be completely empty but
3957 each can contain one statement which is known not to trap.
3958 But in this case bb1/bb2 can only be forwarding basic blocks.
3960 This fully replaces the old "Conditional Replacement",
3961 "ABS Replacement" transformations as they are now
3962 implmeneted in match.pd.
3963 Some parts of the "MIN/MAX Replacement" are re-implemented in match.pd.
3968 This transformation, implemented in value_replacement, replaces
3971 if (a != b) goto bb2; else goto bb1;
3974 x = PHI <a (bb1), b (bb0), ...>;
3980 x = PHI <b (bb0), ...>;
3982 This opportunity can sometimes occur as a result of other
3986 Another case caught by value replacement looks like this:
3992 if (t3 != 0) goto bb1; else goto bb2;
4008 This transformation, minmax_replacement replaces
4011 if (a <= b) goto bb2; else goto bb1;
4014 x = PHI <b (bb1), a (bb0), ...>;
4019 x' = MIN_EXPR (a, b)
4021 x = PHI <x' (bb0), ...>;
4023 A similar transformation is done for MAX_EXPR.
4026 This pass also performs a fifth transformation of a slightly different
4029 Factor operations in COND_EXPR
4030 ------------------------------
4032 This transformation factors the unary operations out of COND_EXPR with
4033 factor_out_conditional_operation.
4036 if (a <= CST) goto <bb 3>; else goto <bb 4>;
4040 tmp = PHI <tmp, CST>
4043 if (a <= CST) goto <bb 3>; else goto <bb 4>;
4049 Adjacent Load Hoisting
4050 ----------------------
4052 This transformation replaces
4055 if (...) goto bb2; else goto bb1;
4057 x1 = (<expr>).field1;
4060 x2 = (<expr>).field2;
4067 x1 = (<expr>).field1;
4068 x2 = (<expr>).field2;
4069 if (...) goto bb2; else goto bb1;
4076 The purpose of this transformation is to enable generation of conditional
4077 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
4078 the loads is speculative, the transformation is restricted to very
4079 specific cases to avoid introducing a page fault. We are looking for
4087 where left and right are typically adjacent pointers in a tree structure. */
4091 const pass_data pass_data_phiopt
=
4093 GIMPLE_PASS
, /* type */
4094 "phiopt", /* name */
4095 OPTGROUP_NONE
, /* optinfo_flags */
4096 TV_TREE_PHIOPT
, /* tv_id */
4097 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4098 0, /* properties_provided */
4099 0, /* properties_destroyed */
4100 0, /* todo_flags_start */
4101 0, /* todo_flags_finish */
4104 class pass_phiopt
: public gimple_opt_pass
4107 pass_phiopt (gcc::context
*ctxt
)
4108 : gimple_opt_pass (pass_data_phiopt
, ctxt
), early_p (false)
4111 /* opt_pass methods: */
4112 opt_pass
* clone () final override
{ return new pass_phiopt (m_ctxt
); }
4113 void set_pass_param (unsigned n
, bool param
) final override
4115 gcc_assert (n
== 0);
4118 bool gate (function
*) final override
{ return flag_ssa_phiopt
; }
4119 unsigned int execute (function
*) final override
;
4123 }; // class pass_phiopt
4128 make_pass_phiopt (gcc::context
*ctxt
)
4130 return new pass_phiopt (ctxt
);
4134 pass_phiopt::execute (function
*)
4136 bool do_hoist_loads
= !early_p
? gate_hoist_loads () : false;
4138 basic_block
*bb_order
;
4140 bool cfgchanged
= false;
4142 calculate_dominance_info (CDI_DOMINATORS
);
4143 mark_ssa_maybe_undefs ();
4145 /* Search every basic block for COND_EXPR we may be able to optimize.
4147 We walk the blocks in order that guarantees that a block with
4148 a single predecessor is processed before the predecessor.
4149 This ensures that we collapse inner ifs before visiting the
4150 outer ones, and also that we do not try to visit a removed
4152 bb_order
= single_pred_before_succ_order ();
4153 n
= n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
;
4155 for (i
= 0; i
< n
; i
++)
4158 basic_block bb1
, bb2
;
4161 bool diamond_p
= false;
4165 /* Check to see if the last statement is a GIMPLE_COND. */
4166 gcond
*cond_stmt
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (bb
));
4170 e1
= EDGE_SUCC (bb
, 0);
4172 e2
= EDGE_SUCC (bb
, 1);
4175 /* We cannot do the optimization on abnormal edges. */
4176 if ((e1
->flags
& EDGE_ABNORMAL
) != 0
4177 || (e2
->flags
& EDGE_ABNORMAL
) != 0)
4180 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4181 if (EDGE_COUNT (bb1
->succs
) == 0
4182 || EDGE_COUNT (bb2
->succs
) == 0)
4185 /* Find the bb which is the fall through to the other. */
4186 if (EDGE_SUCC (bb1
, 0)->dest
== bb2
)
4188 else if (EDGE_SUCC (bb2
, 0)->dest
== bb1
)
4190 std::swap (bb1
, bb2
);
4193 else if (EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
4194 && single_succ_p (bb2
))
4197 e2
= EDGE_SUCC (bb2
, 0);
4198 /* Make sure bb2 is just a fall through. */
4199 if ((e2
->flags
& EDGE_FALLTHRU
) == 0)
4205 e1
= EDGE_SUCC (bb1
, 0);
4207 /* Make sure that bb1 is just a fall through. */
4208 if (!single_succ_p (bb1
)
4209 || (e1
->flags
& EDGE_FALLTHRU
) == 0)
4214 basic_block bb3
= e1
->dest
;
4216 if (!single_pred_p (bb1
)
4217 || !single_pred_p (bb2
))
4221 && !FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt
)))
4222 && EDGE_COUNT (bb
->succs
) == 2
4223 && EDGE_COUNT (bb3
->preds
) == 2
4224 /* If one edge or the other is dominant, a conditional move
4225 is likely to perform worse than the well-predicted branch. */
4226 && !predictable_edge_p (EDGE_SUCC (bb
, 0))
4227 && !predictable_edge_p (EDGE_SUCC (bb
, 1)))
4228 hoist_adjacent_loads (bb
, bb1
, bb2
, bb3
);
4231 gimple_stmt_iterator gsi
;
4232 bool candorest
= true;
4234 /* Check that we're looking for nested phis. */
4235 basic_block merge
= diamond_p
? EDGE_SUCC (bb2
, 0)->dest
: bb2
;
4236 gimple_seq phis
= phi_nodes (merge
);
4238 /* Value replacement can work with more than one PHI
4239 so try that first. */
4240 if (!early_p
&& !diamond_p
)
4241 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4243 phi
= as_a
<gphi
*> (gsi_stmt (gsi
));
4244 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
4245 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
4246 if (value_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
) == 2)
4257 phi
= single_non_singleton_phi_for_edges (phis
, e1
, e2
);
4261 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
4262 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
4264 /* Something is wrong if we cannot find the arguments in the PHI
4266 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
4268 if (single_pred_p (bb1
)
4269 && EDGE_COUNT (merge
->preds
) == 2)
4275 /* factor_out_conditional_operation may create a new PHI in
4276 BB2 and eliminate an existing PHI in BB2. Recompute values
4277 that may be affected by that change. */
4278 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
4279 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
4280 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
4281 newphi
= factor_out_conditional_operation (e1
, e2
, phi
,
4287 /* Do the replacement of conditional if it can be done. */
4288 if (match_simplify_replacement (bb
, bb1
, bb2
, e1
, e2
, phi
,
4289 arg0
, arg1
, early_p
, diamond_p
))
4293 && single_pred_p (bb1
)
4294 && cond_removal_in_builtin_zero_pattern (bb
, bb1
, e1
, e2
,
4297 else if (minmax_replacement (bb
, bb1
, bb2
, e1
, e2
, phi
, arg0
, arg1
,
4300 else if (single_pred_p (bb1
)
4302 && spaceship_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
4309 return TODO_cleanup_cfg
;
4313 /* This pass tries to transform conditional stores into unconditional
4314 ones, enabling further simplifications with the simpler then and else
4315 blocks. In particular it replaces this:
4318 if (cond) goto bb2; else goto bb1;
4326 if (cond) goto bb1; else goto bb2;
4330 condtmp = PHI <RHS, condtmp'>
4333 This transformation can only be done under several constraints,
4334 documented below. It also replaces:
4337 if (cond) goto bb2; else goto bb1;
4348 if (cond) goto bb3; else goto bb1;
4351 condtmp = PHI <RHS1, RHS2>
4356 const pass_data pass_data_cselim
=
4358 GIMPLE_PASS
, /* type */
4359 "cselim", /* name */
4360 OPTGROUP_NONE
, /* optinfo_flags */
4361 TV_TREE_PHIOPT
, /* tv_id */
4362 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4363 0, /* properties_provided */
4364 0, /* properties_destroyed */
4365 0, /* todo_flags_start */
4366 0, /* todo_flags_finish */
4369 class pass_cselim
: public gimple_opt_pass
4372 pass_cselim (gcc::context
*ctxt
)
4373 : gimple_opt_pass (pass_data_cselim
, ctxt
)
4376 /* opt_pass methods: */
4377 bool gate (function
*) final override
{ return flag_tree_cselim
; }
4378 unsigned int execute (function
*) final override
;
4380 }; // class pass_cselim
4385 make_pass_cselim (gcc::context
*ctxt
)
4387 return new pass_cselim (ctxt
);
4391 pass_cselim::execute (function
*)
4394 basic_block
*bb_order
;
4396 bool cfgchanged
= false;
4397 hash_set
<tree
> *nontrap
= 0;
4400 /* ??? We are not interested in loop related info, but the following
4401 will create it, ICEing as we didn't init loops with pre-headers.
4402 An interfacing issue of find_data_references_in_bb. */
4403 loop_optimizer_init (LOOPS_NORMAL
);
4406 calculate_dominance_info (CDI_DOMINATORS
);
4408 /* Calculate the set of non-trapping memory accesses. */
4409 nontrap
= get_non_trapping ();
4411 /* Search every basic block for COND_EXPR we may be able to optimize.
4413 We walk the blocks in order that guarantees that a block with
4414 a single predecessor is processed before the predecessor.
4415 This ensures that we collapse inner ifs before visiting the
4416 outer ones, and also that we do not try to visit a removed
4418 bb_order
= single_pred_before_succ_order ();
4419 n
= n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
;
4421 for (i
= 0; i
< n
; i
++)
4423 basic_block bb1
, bb2
;
4425 bool diamond_p
= false;
4429 /* Check to see if the last statement is a GIMPLE_COND. */
4430 gcond
*cond_stmt
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (bb
));
4434 e1
= EDGE_SUCC (bb
, 0);
4436 e2
= EDGE_SUCC (bb
, 1);
4439 /* We cannot do the optimization on abnormal edges. */
4440 if ((e1
->flags
& EDGE_ABNORMAL
) != 0
4441 || (e2
->flags
& EDGE_ABNORMAL
) != 0)
4444 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4445 if (EDGE_COUNT (bb1
->succs
) == 0
4446 || EDGE_COUNT (bb2
->succs
) == 0)
4449 /* Find the bb which is the fall through to the other. */
4450 if (EDGE_SUCC (bb1
, 0)->dest
== bb2
)
4452 else if (EDGE_SUCC (bb2
, 0)->dest
== bb1
)
4454 std::swap (bb1
, bb2
);
4457 else if (EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
4458 && single_succ_p (bb2
))
4461 e2
= EDGE_SUCC (bb2
, 0);
4462 /* Make sure bb2 is just a fall through. */
4463 if ((e2
->flags
& EDGE_FALLTHRU
) == 0)
4469 e1
= EDGE_SUCC (bb1
, 0);
4471 /* Make sure that bb1 is just a fall through. */
4472 if (!single_succ_p (bb1
)
4473 || (e1
->flags
& EDGE_FALLTHRU
) == 0)
4478 basic_block bb3
= e1
->dest
;
4480 /* Only handle sinking of store from 2 bbs only,
4481 The middle bbs don't need to come from the
4482 if always since we are sinking rather than
4484 if (EDGE_COUNT (bb3
->preds
) != 2)
4486 if (cond_if_else_store_replacement (bb1
, bb2
, bb3
))
4491 /* Also make sure that bb1 only have one predecessor and that it
4493 if (!single_pred_p (bb1
)
4494 || single_pred (bb1
) != bb
)
4497 /* bb1 is the middle block, bb2 the join block, bb the split block,
4498 e1 the fallthrough edge from bb1 to bb2. We can't do the
4499 optimization if the join block has more than two predecessors. */
4500 if (EDGE_COUNT (bb2
->preds
) > 2)
4502 if (cond_store_replacement (bb1
, bb2
, e1
, e2
, nontrap
))
4509 /* If the CFG has changed, we should cleanup the CFG. */
4512 /* In cond-store replacement we have added some loads on edges
4513 and new VOPS (as we moved the store, and created a load). */
4514 gsi_commit_edge_inserts ();
4515 todo
= TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4518 loop_optimizer_finalize ();