rust: Implement TARGET_RUST_OS_INFO for *-*-openbsd*
[official-gcc.git] / gcc / tree-ssa-phiopt.cc
blob3835d25d08c43c867113ce61a5578c75580c4c1d
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "tree-ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-dfa.h"
43 #include "domwalk.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
49 #include "tree-eh.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
54 #include "dbgcnt.h"
55 #include "tree-ssa-propagate.h"
56 #include "tree-ssa-dce.h"
58 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
60 static gphi *
61 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
63 gimple_stmt_iterator i;
64 gphi *phi = NULL;
65 if (gimple_seq_singleton_p (seq))
67 phi = as_a <gphi *> (gsi_stmt (gsi_start (seq)));
68 /* Never return virtual phis. */
69 if (virtual_operand_p (gimple_phi_result (phi)))
70 return NULL;
71 return phi;
73 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
75 gphi *p = as_a <gphi *> (gsi_stmt (i));
76 /* If the PHI arguments are equal then we can skip this PHI. */
77 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
78 gimple_phi_arg_def (p, e1->dest_idx)))
79 continue;
81 /* Punt on virtual phis with different arguments from the edges. */
82 if (virtual_operand_p (gimple_phi_result (p)))
83 return NULL;
85 /* If we already have a PHI that has the two edge arguments are
86 different, then return it is not a singleton for these PHIs. */
87 if (phi)
88 return NULL;
90 phi = p;
92 return phi;
95 /* Replace PHI node element whose edge is E in block BB with variable NEW.
96 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
97 is known to have two edges, one of which must reach BB). */
99 static void
100 replace_phi_edge_with_variable (basic_block cond_block,
101 edge e, gphi *phi, tree new_tree,
102 bitmap dce_ssa_names = nullptr)
104 basic_block bb = gimple_bb (phi);
105 gimple_stmt_iterator gsi;
106 tree phi_result = PHI_RESULT (phi);
107 bool deleteboth = false;
109 /* Duplicate range info if they are the only things setting the target PHI.
110 This is needed as later on, the new_tree will be replacing
111 The assignement of the PHI.
112 For an example:
113 bb1:
114 _4 = min<a_1, 255>
115 goto bb2
117 # RANGE [-INF, 255]
118 a_3 = PHI<_4(1)>
119 bb3:
121 use(a_3)
122 And _4 gets propagated into the use of a_3 and losing the range info.
123 This can't be done for more than 2 incoming edges as the propagation
124 won't happen.
125 The new_tree needs to be defined in the same basic block as the conditional. */
126 if (TREE_CODE (new_tree) == SSA_NAME
127 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
128 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
129 && !SSA_NAME_RANGE_INFO (new_tree)
130 && SSA_NAME_RANGE_INFO (phi_result)
131 && gimple_bb (SSA_NAME_DEF_STMT (new_tree)) == cond_block
132 && dbg_cnt (phiopt_edge_range))
133 duplicate_ssa_name_range_info (new_tree, phi_result);
135 /* Change the PHI argument to new. */
136 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
138 /* Remove the empty basic block. */
139 edge edge_to_remove = NULL, keep_edge = NULL;
140 if (EDGE_SUCC (cond_block, 0)->dest == bb)
142 edge_to_remove = EDGE_SUCC (cond_block, 1);
143 keep_edge = EDGE_SUCC (cond_block, 0);
145 else if (EDGE_SUCC (cond_block, 1)->dest == bb)
147 edge_to_remove = EDGE_SUCC (cond_block, 0);
148 keep_edge = EDGE_SUCC (cond_block, 1);
150 else if ((keep_edge = find_edge (cond_block, e->src)))
152 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
153 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
154 if (single_pred_p (bb1) && single_pred_p (bb2)
155 && single_succ_p (bb1) && single_succ_p (bb2)
156 && empty_block_p (bb1) && empty_block_p (bb2))
157 deleteboth = true;
159 else
160 gcc_unreachable ();
162 if (edge_to_remove && EDGE_COUNT (edge_to_remove->dest->preds) == 1)
164 e->flags |= EDGE_FALLTHRU;
165 e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
166 e->probability = profile_probability::always ();
167 delete_basic_block (edge_to_remove->dest);
169 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
170 gsi = gsi_last_bb (cond_block);
171 gsi_remove (&gsi, true);
173 else if (deleteboth)
175 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
176 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
178 edge newedge = redirect_edge_and_branch (keep_edge, bb);
180 /* The new edge should be the same. */
181 gcc_assert (newedge == keep_edge);
183 keep_edge->flags |= EDGE_FALLTHRU;
184 keep_edge->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
185 keep_edge->probability = profile_probability::always ();
187 /* Copy the edge's phi entry from the old one. */
188 copy_phi_arg_into_existing_phi (e, keep_edge);
190 /* Delete the old 2 empty basic blocks */
191 delete_basic_block (bb1);
192 delete_basic_block (bb2);
194 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
195 gsi = gsi_last_bb (cond_block);
196 gsi_remove (&gsi, true);
198 else
200 /* If there are other edges into the middle block make
201 CFG cleanup deal with the edge removal to avoid
202 updating dominators here in a non-trivial way. */
203 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_block));
204 if (keep_edge->flags & EDGE_FALSE_VALUE)
205 gimple_cond_make_false (cond);
206 else if (keep_edge->flags & EDGE_TRUE_VALUE)
207 gimple_cond_make_true (cond);
210 if (dce_ssa_names)
211 simple_dce_from_worklist (dce_ssa_names);
213 statistics_counter_event (cfun, "Replace PHI with variable", 1);
215 if (dump_file && (dump_flags & TDF_DETAILS))
216 fprintf (dump_file,
217 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
218 cond_block->index,
219 bb->index);
222 /* PR66726: Factor operations out of COND_EXPR. If the arguments of the PHI
223 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
224 to the result of PHI stmt. COND_STMT is the controlling predicate.
225 Return the newly-created PHI, if any. */
227 static gphi *
228 factor_out_conditional_operation (edge e0, edge e1, gphi *phi,
229 tree arg0, tree arg1, gimple *cond_stmt)
231 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
232 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
233 tree temp, result;
234 gphi *newphi;
235 gimple_stmt_iterator gsi, gsi_for_def;
236 location_t locus = gimple_location (phi);
237 enum tree_code op_code;
239 /* Handle only PHI statements with two arguments. TODO: If all
240 other arguments to PHI are INTEGER_CST or if their defining
241 statement have the same unary operation, we can handle more
242 than two arguments too. */
243 if (gimple_phi_num_args (phi) != 2)
244 return NULL;
246 /* First canonicalize to simplify tests. */
247 if (TREE_CODE (arg0) != SSA_NAME)
249 std::swap (arg0, arg1);
250 std::swap (e0, e1);
253 if (TREE_CODE (arg0) != SSA_NAME
254 || (TREE_CODE (arg1) != SSA_NAME
255 && TREE_CODE (arg1) != INTEGER_CST))
256 return NULL;
258 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
259 an unary operation. */
260 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
261 if (!is_gimple_assign (arg0_def_stmt)
262 || (gimple_assign_rhs_class (arg0_def_stmt) != GIMPLE_UNARY_RHS
263 && gimple_assign_rhs_code (arg0_def_stmt) != VIEW_CONVERT_EXPR))
264 return NULL;
266 /* Use the RHS as new_arg0. */
267 op_code = gimple_assign_rhs_code (arg0_def_stmt);
268 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
269 if (op_code == VIEW_CONVERT_EXPR)
271 new_arg0 = TREE_OPERAND (new_arg0, 0);
272 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
273 return NULL;
275 if (TREE_CODE (new_arg0) == SSA_NAME
276 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
277 return NULL;
279 if (TREE_CODE (arg1) == SSA_NAME)
281 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
282 is an unary operation. */
283 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
284 if (!is_gimple_assign (arg1_def_stmt)
285 || gimple_assign_rhs_code (arg1_def_stmt) != op_code)
286 return NULL;
288 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
289 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
290 && dominated_by_p (CDI_DOMINATORS,
291 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
292 return NULL;
294 /* Use the RHS as new_arg1. */
295 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
296 if (op_code == VIEW_CONVERT_EXPR)
297 new_arg1 = TREE_OPERAND (new_arg1, 0);
298 if (TREE_CODE (new_arg1) == SSA_NAME
299 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
300 return NULL;
302 else
304 /* TODO: handle more than just casts here. */
305 if (!gimple_assign_cast_p (arg0_def_stmt))
306 return NULL;
308 /* arg0_def_stmt should be conditional. */
309 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
310 return NULL;
311 /* If arg1 is an INTEGER_CST, fold it to new type. */
312 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
313 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
315 if (gimple_assign_cast_p (arg0_def_stmt))
317 /* For the INTEGER_CST case, we are just moving the
318 conversion from one place to another, which can often
319 hurt as the conversion moves further away from the
320 statement that computes the value. So, perform this
321 only if new_arg0 is an operand of COND_STMT, or
322 if arg0_def_stmt is the only non-debug stmt in
323 its basic block, because then it is possible this
324 could enable further optimizations (minmax replacement
325 etc.). See PR71016. */
326 if (new_arg0 != gimple_cond_lhs (cond_stmt)
327 && new_arg0 != gimple_cond_rhs (cond_stmt)
328 && gimple_bb (arg0_def_stmt) == e0->src)
330 gsi = gsi_for_stmt (arg0_def_stmt);
331 gsi_prev_nondebug (&gsi);
332 if (!gsi_end_p (gsi))
334 if (gassign *assign
335 = dyn_cast <gassign *> (gsi_stmt (gsi)))
337 tree lhs = gimple_assign_lhs (assign);
338 enum tree_code ass_code
339 = gimple_assign_rhs_code (assign);
340 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
341 return NULL;
342 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
343 return NULL;
344 gsi_prev_nondebug (&gsi);
345 if (!gsi_end_p (gsi))
346 return NULL;
348 else
349 return NULL;
351 gsi = gsi_for_stmt (arg0_def_stmt);
352 gsi_next_nondebug (&gsi);
353 if (!gsi_end_p (gsi))
354 return NULL;
356 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
358 else
359 return NULL;
361 else
362 return NULL;
365 /* If arg0/arg1 have > 1 use, then this transformation actually increases
366 the number of expressions evaluated at runtime. */
367 if (!has_single_use (arg0)
368 || (arg1_def_stmt && !has_single_use (arg1)))
369 return NULL;
371 /* If types of new_arg0 and new_arg1 are different bailout. */
372 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
373 return NULL;
375 /* Create a new PHI stmt. */
376 result = PHI_RESULT (phi);
377 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
378 newphi = create_phi_node (temp, gimple_bb (phi));
380 if (dump_file && (dump_flags & TDF_DETAILS))
382 fprintf (dump_file, "PHI ");
383 print_generic_expr (dump_file, gimple_phi_result (phi));
384 fprintf (dump_file,
385 " changed to factor operation out from COND_EXPR.\n");
386 fprintf (dump_file, "New stmt with OPERATION that defines ");
387 print_generic_expr (dump_file, result);
388 fprintf (dump_file, ".\n");
391 /* Remove the old operation(s) that has single use. */
392 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
393 gsi_remove (&gsi_for_def, true);
394 release_defs (arg0_def_stmt);
396 if (arg1_def_stmt)
398 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
399 gsi_remove (&gsi_for_def, true);
400 release_defs (arg1_def_stmt);
403 add_phi_arg (newphi, new_arg0, e0, locus);
404 add_phi_arg (newphi, new_arg1, e1, locus);
406 /* Create the operation stmt and insert it. */
407 if (op_code == VIEW_CONVERT_EXPR)
409 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
410 new_stmt = gimple_build_assign (result, temp);
412 else
413 new_stmt = gimple_build_assign (result, op_code, temp);
414 gsi = gsi_after_labels (gimple_bb (phi));
415 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
417 /* Remove the original PHI stmt. */
418 gsi = gsi_for_stmt (phi);
419 gsi_remove (&gsi, true);
421 statistics_counter_event (cfun, "factored out operation", 1);
423 return newphi;
427 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
428 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
429 static bool
430 phiopt_early_allow (gimple_seq &seq, gimple_match_op &op)
432 /* Don't allow functions. */
433 if (!op.code.is_tree_code ())
434 return false;
435 tree_code code = (tree_code)op.code;
437 /* For non-empty sequence, only allow one statement
438 except for MIN/MAX, allow max 2 statements,
439 each with MIN/MAX. */
440 if (!gimple_seq_empty_p (seq))
442 if (code == MIN_EXPR || code == MAX_EXPR)
444 if (!gimple_seq_singleton_p (seq))
445 return false;
447 gimple *stmt = gimple_seq_first_stmt (seq);
448 /* Only allow assignments. */
449 if (!is_gimple_assign (stmt))
450 return false;
451 code = gimple_assign_rhs_code (stmt);
452 return code == MIN_EXPR || code == MAX_EXPR;
454 /* Check to make sure op was already a SSA_NAME. */
455 if (code != SSA_NAME)
456 return false;
457 if (!gimple_seq_singleton_p (seq))
458 return false;
459 gimple *stmt = gimple_seq_first_stmt (seq);
460 /* Only allow assignments. */
461 if (!is_gimple_assign (stmt))
462 return false;
463 if (gimple_assign_lhs (stmt) != op.ops[0])
464 return false;
465 code = gimple_assign_rhs_code (stmt);
468 switch (code)
470 case MIN_EXPR:
471 case MAX_EXPR:
472 case ABS_EXPR:
473 case ABSU_EXPR:
474 case NEGATE_EXPR:
475 case SSA_NAME:
476 return true;
477 case INTEGER_CST:
478 case REAL_CST:
479 case VECTOR_CST:
480 case FIXED_CST:
481 return true;
482 default:
483 return false;
487 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
488 Return NULL if nothing can be simplified or the resulting simplified value
489 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
490 if EARLY_P is set.
491 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
492 to simplify CMP ? ARG0 : ARG1.
493 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
494 static tree
495 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
496 tree arg0, tree arg1,
497 gimple_seq *seq)
499 gimple_seq seq1 = NULL;
500 enum tree_code comp_code = gimple_cond_code (comp_stmt);
501 location_t loc = gimple_location (comp_stmt);
502 tree cmp0 = gimple_cond_lhs (comp_stmt);
503 tree cmp1 = gimple_cond_rhs (comp_stmt);
504 /* To handle special cases like floating point comparison, it is easier and
505 less error-prone to build a tree and gimplify it on the fly though it is
506 less efficient.
507 Don't use fold_build2 here as that might create (bool)a instead of just
508 "a != 0". */
509 tree cond = build2_loc (loc, comp_code, boolean_type_node,
510 cmp0, cmp1);
512 if (dump_file && (dump_flags & TDF_FOLDING))
514 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
515 print_generic_expr (dump_file, cond);
516 fprintf (dump_file, " ? ");
517 print_generic_expr (dump_file, arg0);
518 fprintf (dump_file, " : ");
519 print_generic_expr (dump_file, arg1);
520 fprintf (dump_file, "\n");
523 gimple_match_op op (gimple_match_cond::UNCOND,
524 COND_EXPR, type, cond, arg0, arg1);
526 if (op.resimplify (&seq1, follow_all_ssa_edges))
528 bool allowed = !early_p || phiopt_early_allow (seq1, op);
529 tree result = maybe_push_res_to_seq (&op, &seq1);
530 if (dump_file && (dump_flags & TDF_FOLDING))
532 fprintf (dump_file, "\nphiopt match-simplify back:\n");
533 if (seq1)
534 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
535 fprintf (dump_file, "result: ");
536 if (result)
537 print_generic_expr (dump_file, result);
538 else
539 fprintf (dump_file, " (none)");
540 fprintf (dump_file, "\n");
541 if (!allowed)
542 fprintf (dump_file, "rejected because early\n");
544 /* Early we want only to allow some generated tree codes. */
545 if (allowed && result)
547 if (loc != UNKNOWN_LOCATION)
548 annotate_all_with_location (seq1, loc);
549 gimple_seq_add_seq_without_update (seq, seq1);
550 return result;
553 gimple_seq_discard (seq1);
554 seq1 = NULL;
556 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
557 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
559 if (comp_code == ERROR_MARK)
560 return NULL;
562 cond = build2_loc (loc,
563 comp_code, boolean_type_node,
564 cmp0, cmp1);
566 if (dump_file && (dump_flags & TDF_FOLDING))
568 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
569 print_generic_expr (dump_file, cond);
570 fprintf (dump_file, " ? ");
571 print_generic_expr (dump_file, arg1);
572 fprintf (dump_file, " : ");
573 print_generic_expr (dump_file, arg0);
574 fprintf (dump_file, "\n");
577 gimple_match_op op1 (gimple_match_cond::UNCOND,
578 COND_EXPR, type, cond, arg1, arg0);
580 if (op1.resimplify (&seq1, follow_all_ssa_edges))
582 bool allowed = !early_p || phiopt_early_allow (seq1, op1);
583 tree result = maybe_push_res_to_seq (&op1, &seq1);
584 if (dump_file && (dump_flags & TDF_FOLDING))
586 fprintf (dump_file, "\nphiopt match-simplify back:\n");
587 if (seq1)
588 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
589 fprintf (dump_file, "result: ");
590 if (result)
591 print_generic_expr (dump_file, result);
592 else
593 fprintf (dump_file, " (none)");
594 fprintf (dump_file, "\n");
595 if (!allowed)
596 fprintf (dump_file, "rejected because early\n");
598 /* Early we want only to allow some generated tree codes. */
599 if (allowed && result)
601 if (loc != UNKNOWN_LOCATION)
602 annotate_all_with_location (seq1, loc);
603 gimple_seq_add_seq_without_update (seq, seq1);
604 return result;
607 gimple_seq_discard (seq1);
609 return NULL;
612 /* empty_bb_or_one_feeding_into_p returns true if bb was empty basic block
613 or it has one cheap preparation statement that feeds into the PHI
614 statement and it sets STMT to that statement. */
615 static bool
616 empty_bb_or_one_feeding_into_p (basic_block bb,
617 gimple *phi,
618 gimple *&stmt)
620 stmt = nullptr;
621 gimple *stmt_to_move = nullptr;
622 tree lhs;
624 if (empty_block_p (bb))
625 return true;
627 if (!single_pred_p (bb))
628 return false;
630 /* The middle bb cannot have phi nodes as we don't
631 move those assignments yet. */
632 if (!gimple_seq_empty_p (phi_nodes (bb)))
633 return false;
635 gimple_stmt_iterator gsi;
637 gsi = gsi_start_nondebug_after_labels_bb (bb);
638 while (!gsi_end_p (gsi))
640 gimple *s = gsi_stmt (gsi);
641 gsi_next_nondebug (&gsi);
642 /* Skip over Predict and nop statements. */
643 if (gimple_code (s) == GIMPLE_PREDICT
644 || gimple_code (s) == GIMPLE_NOP)
645 continue;
646 /* If there is more one statement return false. */
647 if (stmt_to_move)
648 return false;
649 stmt_to_move = s;
652 /* The only statement here was a Predict or a nop statement
653 so return true. */
654 if (!stmt_to_move)
655 return true;
657 if (gimple_vuse (stmt_to_move))
658 return false;
660 if (gimple_could_trap_p (stmt_to_move)
661 || gimple_has_side_effects (stmt_to_move))
662 return false;
664 ssa_op_iter it;
665 tree use;
666 FOR_EACH_SSA_TREE_OPERAND (use, stmt_to_move, it, SSA_OP_USE)
667 if (ssa_name_maybe_undef_p (use))
668 return false;
670 /* Allow assignments but allow some builtin/internal calls.
671 As const calls don't match any of the above, yet they could
672 still have some side-effects - they could contain
673 gimple_could_trap_p statements, like floating point
674 exceptions or integer division by zero. See PR70586.
675 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
676 should handle this.
677 Allow some known builtin/internal calls that are known not to
678 trap: logical functions (e.g. bswap and bit counting). */
679 if (!is_gimple_assign (stmt_to_move))
681 if (!is_gimple_call (stmt_to_move))
682 return false;
683 combined_fn cfn = gimple_call_combined_fn (stmt_to_move);
684 switch (cfn)
686 default:
687 return false;
688 case CFN_BUILT_IN_BSWAP16:
689 case CFN_BUILT_IN_BSWAP32:
690 case CFN_BUILT_IN_BSWAP64:
691 case CFN_BUILT_IN_BSWAP128:
692 CASE_CFN_FFS:
693 CASE_CFN_PARITY:
694 CASE_CFN_POPCOUNT:
695 CASE_CFN_CLZ:
696 CASE_CFN_CTZ:
697 case CFN_BUILT_IN_CLRSB:
698 case CFN_BUILT_IN_CLRSBL:
699 case CFN_BUILT_IN_CLRSBLL:
700 lhs = gimple_call_lhs (stmt_to_move);
701 break;
704 else
705 lhs = gimple_assign_lhs (stmt_to_move);
707 gimple *use_stmt;
708 use_operand_p use_p;
710 /* Allow only a statement which feeds into the other stmt. */
711 if (!lhs || TREE_CODE (lhs) != SSA_NAME
712 || !single_imm_use (lhs, &use_p, &use_stmt)
713 || use_stmt != phi)
714 return false;
716 stmt = stmt_to_move;
717 return true;
720 /* Move STMT to before GSI and insert its defining
721 name into INSERTED_EXPRS bitmap. */
722 static void
723 move_stmt (gimple *stmt, gimple_stmt_iterator *gsi, auto_bitmap &inserted_exprs)
725 if (!stmt)
726 return;
727 if (dump_file && (dump_flags & TDF_DETAILS))
729 fprintf (dump_file, "statement un-sinked:\n");
730 print_gimple_stmt (dump_file, stmt, 0,
731 TDF_VOPS|TDF_MEMSYMS);
734 tree name = gimple_get_lhs (stmt);
735 // Mark the name to be renamed if there is one.
736 bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (name));
737 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt);
738 gsi_move_before (&gsi1, gsi);
739 reset_flow_sensitive_info (name);
742 /* RAII style class to temporarily remove flow sensitive
743 from ssa names defined by a gimple statement. */
744 class auto_flow_sensitive
746 public:
747 auto_flow_sensitive (gimple *s);
748 ~auto_flow_sensitive ();
749 private:
750 auto_vec<std::pair<tree, flow_sensitive_info_storage>, 2> stack;
753 /* Constructor for auto_flow_sensitive. Saves
754 off the ssa names' flow sensitive information
755 that was defined by gimple statement S and
756 resets it to be non-flow based ones. */
758 auto_flow_sensitive::auto_flow_sensitive (gimple *s)
760 if (!s)
761 return;
762 ssa_op_iter it;
763 tree def;
764 FOR_EACH_SSA_TREE_OPERAND (def, s, it, SSA_OP_DEF)
766 flow_sensitive_info_storage storage;
767 storage.save_and_clear (def);
768 stack.safe_push (std::make_pair (def, storage));
772 /* Deconstructor, restores the flow sensitive information
773 for the SSA names that had been saved off. */
775 auto_flow_sensitive::~auto_flow_sensitive ()
777 for (auto p : stack)
778 p.second.restore (p.first);
781 /* The function match_simplify_replacement does the main work of doing the
782 replacement using match and simplify. Return true if the replacement is done.
783 Otherwise return false.
784 BB is the basic block where the replacement is going to be done on. ARG0
785 is argument 0 from PHI. Likewise for ARG1. */
787 static bool
788 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
789 basic_block middle_bb_alt,
790 edge e0, edge e1, gphi *phi,
791 tree arg0, tree arg1, bool early_p,
792 bool threeway_p)
794 gimple *stmt;
795 gimple_stmt_iterator gsi;
796 edge true_edge, false_edge;
797 gimple_seq seq = NULL;
798 tree result;
799 gimple *stmt_to_move = NULL;
800 gimple *stmt_to_move_alt = NULL;
801 tree arg_true, arg_false;
803 /* Special case A ? B : B as this will always simplify to B. */
804 if (operand_equal_for_phi_arg_p (arg0, arg1))
805 return false;
807 /* If the basic block only has a cheap preparation statement,
808 allow it and move it once the transformation is done. */
809 if (!empty_bb_or_one_feeding_into_p (middle_bb, phi, stmt_to_move))
810 return false;
812 if (threeway_p
813 && middle_bb != middle_bb_alt
814 && !empty_bb_or_one_feeding_into_p (middle_bb_alt, phi,
815 stmt_to_move_alt))
816 return false;
818 /* At this point we know we have a GIMPLE_COND with two successors.
819 One successor is BB, the other successor is an empty block which
820 falls through into BB.
822 There is a single PHI node at the join point (BB).
824 So, given the condition COND, and the two PHI arguments, match and simplify
825 can happen on (COND) ? arg0 : arg1. */
827 stmt = last_nondebug_stmt (cond_bb);
829 /* We need to know which is the true edge and which is the false
830 edge so that we know when to invert the condition below. */
831 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
833 /* Forward the edges over the middle basic block. */
834 if (true_edge->dest == middle_bb)
835 true_edge = EDGE_SUCC (true_edge->dest, 0);
836 if (false_edge->dest == middle_bb)
837 false_edge = EDGE_SUCC (false_edge->dest, 0);
839 /* When THREEWAY_P then e1 will point to the edge of the final transition
840 from middle-bb to end. */
841 if (true_edge == e0)
843 if (!threeway_p)
844 gcc_assert (false_edge == e1);
845 arg_true = arg0;
846 arg_false = arg1;
848 else
850 gcc_assert (false_edge == e0);
851 if (!threeway_p)
852 gcc_assert (true_edge == e1);
853 arg_true = arg1;
854 arg_false = arg0;
857 /* Do not make conditional undefs unconditional. */
858 if ((TREE_CODE (arg_true) == SSA_NAME
859 && ssa_name_maybe_undef_p (arg_true))
860 || (TREE_CODE (arg_false) == SSA_NAME
861 && ssa_name_maybe_undef_p (arg_false)))
862 return false;
864 tree type = TREE_TYPE (gimple_phi_result (phi));
866 auto_flow_sensitive s1(stmt_to_move);
867 auto_flow_sensitive s_alt(stmt_to_move_alt);
869 result = gimple_simplify_phiopt (early_p, type, stmt,
870 arg_true, arg_false,
871 &seq);
874 if (!result)
875 return false;
876 if (dump_file && (dump_flags & TDF_FOLDING))
877 fprintf (dump_file, "accepted the phiopt match-simplify.\n");
879 auto_bitmap exprs_maybe_dce;
881 /* Mark the cond statements' lhs/rhs as maybe dce. */
882 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
883 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_lhs (stmt)))
884 bitmap_set_bit (exprs_maybe_dce,
885 SSA_NAME_VERSION (gimple_cond_lhs (stmt)));
886 if (TREE_CODE (gimple_cond_rhs (stmt)) == SSA_NAME
887 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_rhs (stmt)))
888 bitmap_set_bit (exprs_maybe_dce,
889 SSA_NAME_VERSION (gimple_cond_rhs (stmt)));
891 gsi = gsi_last_bb (cond_bb);
892 /* Insert the sequence generated from gimple_simplify_phiopt. */
893 if (seq)
895 // Mark the lhs of the new statements maybe for dce
896 gimple_stmt_iterator gsi1 = gsi_start (seq);
897 for (; !gsi_end_p (gsi1); gsi_next (&gsi1))
899 gimple *stmt = gsi_stmt (gsi1);
900 tree name = gimple_get_lhs (stmt);
901 if (name && TREE_CODE (name) == SSA_NAME)
902 bitmap_set_bit (exprs_maybe_dce, SSA_NAME_VERSION (name));
904 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
907 /* If there was a statement to move, move it to right before
908 the original conditional. */
909 move_stmt (stmt_to_move, &gsi, exprs_maybe_dce);
910 move_stmt (stmt_to_move_alt, &gsi, exprs_maybe_dce);
912 replace_phi_edge_with_variable (cond_bb, e1, phi, result, exprs_maybe_dce);
914 /* Add Statistic here even though replace_phi_edge_with_variable already
915 does it as we want to be able to count when match-simplify happens vs
916 the others. */
917 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
919 /* Note that we optimized this PHI. */
920 return true;
923 /* Update *ARG which is defined in STMT so that it contains the
924 computed value if that seems profitable. Return true if the
925 statement is made dead by that rewriting. */
927 static bool
928 jump_function_from_stmt (tree *arg, gimple *stmt)
930 enum tree_code code = gimple_assign_rhs_code (stmt);
931 if (code == ADDR_EXPR)
933 /* For arg = &p->i transform it to p, if possible. */
934 tree rhs1 = gimple_assign_rhs1 (stmt);
935 poly_int64 offset;
936 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
937 &offset);
938 if (tem
939 && TREE_CODE (tem) == MEM_REF
940 && known_eq (mem_ref_offset (tem) + offset, 0))
942 *arg = TREE_OPERAND (tem, 0);
943 return true;
946 /* TODO: Much like IPA-CP jump-functions we want to handle constant
947 additions symbolically here, and we'd need to update the comparison
948 code that compares the arg + cst tuples in our caller. For now the
949 code above exactly handles the VEC_BASE pattern from vec.h. */
950 return false;
953 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
954 of the form SSA_NAME NE 0.
956 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
957 the two input values of the EQ_EXPR match arg0 and arg1.
959 If so update *code and return TRUE. Otherwise return FALSE. */
961 static bool
962 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
963 enum tree_code *code, const_tree rhs)
965 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
966 statement. */
967 if (TREE_CODE (rhs) == SSA_NAME)
969 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
971 /* Verify the defining statement has an EQ_EXPR on the RHS. */
972 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
974 /* Finally verify the source operands of the EQ_EXPR are equal
975 to arg0 and arg1. */
976 tree op0 = gimple_assign_rhs1 (def1);
977 tree op1 = gimple_assign_rhs2 (def1);
978 if ((operand_equal_for_phi_arg_p (arg0, op0)
979 && operand_equal_for_phi_arg_p (arg1, op1))
980 || (operand_equal_for_phi_arg_p (arg0, op1)
981 && operand_equal_for_phi_arg_p (arg1, op0)))
983 /* We will perform the optimization. */
984 *code = gimple_assign_rhs_code (def1);
985 return true;
989 return false;
992 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
994 Also return TRUE if arg0/arg1 are equal to the source arguments of a
995 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
997 Return FALSE otherwise. */
999 static bool
1000 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1001 enum tree_code *code, gimple *cond)
1003 gimple *def;
1004 tree lhs = gimple_cond_lhs (cond);
1005 tree rhs = gimple_cond_rhs (cond);
1007 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1008 && operand_equal_for_phi_arg_p (arg1, rhs))
1009 || (operand_equal_for_phi_arg_p (arg1, lhs)
1010 && operand_equal_for_phi_arg_p (arg0, rhs)))
1011 return true;
1013 /* Now handle more complex case where we have an EQ comparison
1014 which feeds a BIT_AND_EXPR which feeds COND.
1016 First verify that COND is of the form SSA_NAME NE 0. */
1017 if (*code != NE_EXPR || !integer_zerop (rhs)
1018 || TREE_CODE (lhs) != SSA_NAME)
1019 return false;
1021 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1022 def = SSA_NAME_DEF_STMT (lhs);
1023 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1024 return false;
1026 /* Now verify arg0/arg1 correspond to the source arguments of an
1027 EQ comparison feeding the BIT_AND_EXPR. */
1029 tree tmp = gimple_assign_rhs1 (def);
1030 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1031 return true;
1033 tmp = gimple_assign_rhs2 (def);
1034 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1035 return true;
1037 return false;
1040 /* Returns true if ARG is a neutral element for operation CODE
1041 on the RIGHT side. */
1043 static bool
1044 neutral_element_p (tree_code code, tree arg, bool right)
1046 switch (code)
1048 case PLUS_EXPR:
1049 case BIT_IOR_EXPR:
1050 case BIT_XOR_EXPR:
1051 return integer_zerop (arg);
1053 case LROTATE_EXPR:
1054 case RROTATE_EXPR:
1055 case LSHIFT_EXPR:
1056 case RSHIFT_EXPR:
1057 case MINUS_EXPR:
1058 case POINTER_PLUS_EXPR:
1059 return right && integer_zerop (arg);
1061 case MULT_EXPR:
1062 return integer_onep (arg);
1064 case TRUNC_DIV_EXPR:
1065 case CEIL_DIV_EXPR:
1066 case FLOOR_DIV_EXPR:
1067 case ROUND_DIV_EXPR:
1068 case EXACT_DIV_EXPR:
1069 return right && integer_onep (arg);
1071 case BIT_AND_EXPR:
1072 return integer_all_onesp (arg);
1074 default:
1075 return false;
1079 /* Returns true if ARG is an absorbing element for operation CODE. */
1081 static bool
1082 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1084 switch (code)
1086 case BIT_IOR_EXPR:
1087 return integer_all_onesp (arg);
1089 case MULT_EXPR:
1090 case BIT_AND_EXPR:
1091 return integer_zerop (arg);
1093 case LSHIFT_EXPR:
1094 case RSHIFT_EXPR:
1095 case LROTATE_EXPR:
1096 case RROTATE_EXPR:
1097 return !right && integer_zerop (arg);
1099 case TRUNC_DIV_EXPR:
1100 case CEIL_DIV_EXPR:
1101 case FLOOR_DIV_EXPR:
1102 case ROUND_DIV_EXPR:
1103 case EXACT_DIV_EXPR:
1104 case TRUNC_MOD_EXPR:
1105 case CEIL_MOD_EXPR:
1106 case FLOOR_MOD_EXPR:
1107 case ROUND_MOD_EXPR:
1108 return (!right
1109 && integer_zerop (arg)
1110 && tree_single_nonzero_warnv_p (rval, NULL));
1112 default:
1113 return false;
1117 /* The function value_replacement does the main work of doing the value
1118 replacement. Return non-zero if the replacement is done. Otherwise return
1119 0. If we remove the middle basic block, return 2.
1120 BB is the basic block where the replacement is going to be done on. ARG0
1121 is argument 0 from the PHI. Likewise for ARG1. */
1123 static int
1124 value_replacement (basic_block cond_bb, basic_block middle_bb,
1125 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1127 gimple_stmt_iterator gsi;
1128 edge true_edge, false_edge;
1129 enum tree_code code;
1130 bool empty_or_with_defined_p = true;
1132 /* If the type says honor signed zeros we cannot do this
1133 optimization. */
1134 if (HONOR_SIGNED_ZEROS (arg1))
1135 return 0;
1137 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1138 arguments, then adjust arg0 or arg1. */
1139 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1140 while (!gsi_end_p (gsi))
1142 gimple *stmt = gsi_stmt (gsi);
1143 tree lhs;
1144 gsi_next_nondebug (&gsi);
1145 if (!is_gimple_assign (stmt))
1147 if (gimple_code (stmt) != GIMPLE_PREDICT
1148 && gimple_code (stmt) != GIMPLE_NOP)
1149 empty_or_with_defined_p = false;
1150 continue;
1152 /* Now try to adjust arg0 or arg1 according to the computation
1153 in the statement. */
1154 lhs = gimple_assign_lhs (stmt);
1155 if (!(lhs == arg0
1156 && jump_function_from_stmt (&arg0, stmt))
1157 || (lhs == arg1
1158 && jump_function_from_stmt (&arg1, stmt)))
1159 empty_or_with_defined_p = false;
1162 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1163 code = gimple_cond_code (cond);
1165 /* This transformation is only valid for equality comparisons. */
1166 if (code != NE_EXPR && code != EQ_EXPR)
1167 return 0;
1169 /* We need to know which is the true edge and which is the false
1170 edge so that we know if have abs or negative abs. */
1171 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1173 /* At this point we know we have a COND_EXPR with two successors.
1174 One successor is BB, the other successor is an empty block which
1175 falls through into BB.
1177 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1179 There is a single PHI node at the join point (BB) with two arguments.
1181 We now need to verify that the two arguments in the PHI node match
1182 the two arguments to the equality comparison. */
1184 bool equal_p = operand_equal_for_value_replacement (arg0, arg1, &code, cond);
1185 bool maybe_equal_p = false;
1186 if (!equal_p
1187 && empty_or_with_defined_p
1188 && TREE_CODE (gimple_cond_rhs (cond)) == INTEGER_CST
1189 && (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg0)
1190 ? TREE_CODE (arg1) == INTEGER_CST
1191 : (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg1)
1192 && TREE_CODE (arg0) == INTEGER_CST)))
1193 maybe_equal_p = true;
1194 if (equal_p || maybe_equal_p)
1196 edge e;
1197 tree arg;
1199 /* For NE_EXPR, we want to build an assignment result = arg where
1200 arg is the PHI argument associated with the true edge. For
1201 EQ_EXPR we want the PHI argument associated with the false edge. */
1202 e = (code == NE_EXPR ? true_edge : false_edge);
1204 /* Unfortunately, E may not reach BB (it may instead have gone to
1205 OTHER_BLOCK). If that is the case, then we want the single outgoing
1206 edge from OTHER_BLOCK which reaches BB and represents the desired
1207 path from COND_BLOCK. */
1208 if (e->dest == middle_bb)
1209 e = single_succ_edge (e->dest);
1211 /* Now we know the incoming edge to BB that has the argument for the
1212 RHS of our new assignment statement. */
1213 if (e0 == e)
1214 arg = arg0;
1215 else
1216 arg = arg1;
1218 /* If the middle basic block was empty or is defining the
1219 PHI arguments and this is a single phi where the args are different
1220 for the edges e0 and e1 then we can remove the middle basic block. */
1221 if (empty_or_with_defined_p
1222 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1223 e0, e1) == phi)
1225 use_operand_p use_p;
1226 gimple *use_stmt;
1228 /* Even if arg0/arg1 isn't equal to second operand of cond, we
1229 can optimize away the bb if we can prove it doesn't care whether
1230 phi result is arg0/arg1 or second operand of cond. Consider:
1231 <bb 2> [local count: 118111600]:
1232 if (i_2(D) == 4)
1233 goto <bb 4>; [97.00%]
1234 else
1235 goto <bb 3>; [3.00%]
1237 <bb 3> [local count: 3540129]:
1239 <bb 4> [local count: 118111600]:
1240 # i_6 = PHI <i_2(D)(3), 6(2)>
1241 _3 = i_6 != 0;
1242 Here, carg is 4, oarg is 6, crhs is 0, and because
1243 (4 != 0) == (6 != 0), we don't care if i_6 is 4 or 6, both
1244 have the same outcome. So, can can optimize this to:
1245 _3 = i_2(D) != 0;
1246 If the single imm use of phi result >, >=, < or <=, similarly
1247 we can check if both carg and oarg compare the same against
1248 crhs using ccode. */
1249 if (maybe_equal_p
1250 && TREE_CODE (arg) != INTEGER_CST
1251 && single_imm_use (gimple_phi_result (phi), &use_p, &use_stmt))
1253 enum tree_code ccode = ERROR_MARK;
1254 tree clhs = NULL_TREE, crhs = NULL_TREE;
1255 tree carg = gimple_cond_rhs (cond);
1256 tree oarg = e0 == e ? arg1 : arg0;
1257 if (is_gimple_assign (use_stmt)
1258 && (TREE_CODE_CLASS (gimple_assign_rhs_code (use_stmt))
1259 == tcc_comparison))
1261 ccode = gimple_assign_rhs_code (use_stmt);
1262 clhs = gimple_assign_rhs1 (use_stmt);
1263 crhs = gimple_assign_rhs2 (use_stmt);
1265 else if (gimple_code (use_stmt) == GIMPLE_COND)
1267 ccode = gimple_cond_code (use_stmt);
1268 clhs = gimple_cond_lhs (use_stmt);
1269 crhs = gimple_cond_rhs (use_stmt);
1271 if (ccode != ERROR_MARK
1272 && clhs == gimple_phi_result (phi)
1273 && TREE_CODE (crhs) == INTEGER_CST)
1274 switch (ccode)
1276 case EQ_EXPR:
1277 case NE_EXPR:
1278 if (!tree_int_cst_equal (crhs, carg)
1279 && !tree_int_cst_equal (crhs, oarg))
1280 equal_p = true;
1281 break;
1282 case GT_EXPR:
1283 if (tree_int_cst_lt (crhs, carg)
1284 == tree_int_cst_lt (crhs, oarg))
1285 equal_p = true;
1286 break;
1287 case GE_EXPR:
1288 if (tree_int_cst_le (crhs, carg)
1289 == tree_int_cst_le (crhs, oarg))
1290 equal_p = true;
1291 break;
1292 case LT_EXPR:
1293 if (tree_int_cst_lt (carg, crhs)
1294 == tree_int_cst_lt (oarg, crhs))
1295 equal_p = true;
1296 break;
1297 case LE_EXPR:
1298 if (tree_int_cst_le (carg, crhs)
1299 == tree_int_cst_le (oarg, crhs))
1300 equal_p = true;
1301 break;
1302 default:
1303 break;
1305 if (equal_p)
1307 tree phires = gimple_phi_result (phi);
1308 if (SSA_NAME_RANGE_INFO (phires))
1310 /* After the optimization PHI result can have value
1311 which it couldn't have previously. */
1312 int_range_max r;
1313 if (get_global_range_query ()->range_of_expr (r, phires,
1314 phi))
1316 wide_int warg = wi::to_wide (carg);
1317 int_range<2> tmp (TREE_TYPE (carg), warg, warg);
1318 r.union_ (tmp);
1319 reset_flow_sensitive_info (phires);
1320 set_range_info (phires, r);
1322 else
1323 reset_flow_sensitive_info (phires);
1326 if (equal_p && MAY_HAVE_DEBUG_BIND_STMTS)
1328 imm_use_iterator imm_iter;
1329 tree phires = gimple_phi_result (phi);
1330 tree temp = NULL_TREE;
1331 bool reset_p = false;
1333 /* Add # DEBUG D#1 => arg != carg ? arg : oarg. */
1334 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, phires)
1336 if (!is_gimple_debug (use_stmt))
1337 continue;
1338 if (temp == NULL_TREE)
1340 if (!single_pred_p (middle_bb)
1341 || EDGE_COUNT (gimple_bb (phi)->preds) != 2)
1343 /* But only if middle_bb has a single
1344 predecessor and phi bb has two, otherwise
1345 we could use a SSA_NAME not usable in that
1346 place or wrong-debug. */
1347 reset_p = true;
1348 break;
1350 gimple_stmt_iterator gsi
1351 = gsi_after_labels (gimple_bb (phi));
1352 tree type = TREE_TYPE (phires);
1353 temp = build_debug_expr_decl (type);
1354 tree t = build2 (NE_EXPR, boolean_type_node,
1355 arg, carg);
1356 t = build3 (COND_EXPR, type, t, arg, oarg);
1357 gimple *g = gimple_build_debug_bind (temp, t, phi);
1358 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
1360 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1361 replace_exp (use_p, temp);
1362 update_stmt (use_stmt);
1364 if (reset_p)
1365 reset_debug_uses (phi);
1368 if (equal_p)
1370 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1371 /* Note that we optimized this PHI. */
1372 return 2;
1375 else if (equal_p)
1377 if (!single_pred_p (middle_bb))
1378 return 0;
1379 statistics_counter_event (cfun, "Replace PHI with "
1380 "variable/value_replacement", 1);
1382 /* Replace the PHI arguments with arg. */
1383 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1384 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1385 if (dump_file && (dump_flags & TDF_DETAILS))
1387 fprintf (dump_file, "PHI ");
1388 print_generic_expr (dump_file, gimple_phi_result (phi));
1389 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1390 cond_bb->index);
1391 print_generic_expr (dump_file, arg);
1392 fprintf (dump_file, ".\n");
1394 return 1;
1398 if (!single_pred_p (middle_bb))
1399 return 0;
1401 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1402 gsi = gsi_last_nondebug_bb (middle_bb);
1403 if (gsi_end_p (gsi))
1404 return 0;
1406 gimple *assign = gsi_stmt (gsi);
1407 if (!is_gimple_assign (assign)
1408 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1409 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1410 return 0;
1412 if (gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS)
1414 /* If last stmt of the middle_bb is a conversion, handle it like
1415 a preparation statement through constant evaluation with
1416 checking for UB. */
1417 enum tree_code sc = gimple_assign_rhs_code (assign);
1418 if (CONVERT_EXPR_CODE_P (sc))
1419 assign = NULL;
1420 else
1421 return 0;
1424 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1425 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1426 return 0;
1428 /* Allow up to 2 cheap preparation statements that prepare argument
1429 for assign, e.g.:
1430 if (y_4 != 0)
1431 goto <bb 3>;
1432 else
1433 goto <bb 4>;
1434 <bb 3>:
1435 _1 = (int) y_4;
1436 iftmp.0_6 = x_5(D) r<< _1;
1437 <bb 4>:
1438 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1440 if (y_3(D) == 0)
1441 goto <bb 4>;
1442 else
1443 goto <bb 3>;
1444 <bb 3>:
1445 y_4 = y_3(D) & 31;
1446 _1 = (int) y_4;
1447 _6 = x_5(D) r<< _1;
1448 <bb 4>:
1449 # _2 = PHI <x_5(D)(2), _6(3)> */
1450 gimple *prep_stmt[2] = { NULL, NULL };
1451 int prep_cnt;
1452 for (prep_cnt = 0; ; prep_cnt++)
1454 if (prep_cnt || assign)
1455 gsi_prev_nondebug (&gsi);
1456 if (gsi_end_p (gsi))
1457 break;
1459 gimple *g = gsi_stmt (gsi);
1460 if (gimple_code (g) == GIMPLE_LABEL)
1461 break;
1463 if (prep_cnt == 2 || !is_gimple_assign (g))
1464 return 0;
1466 tree lhs = gimple_assign_lhs (g);
1467 tree rhs1 = gimple_assign_rhs1 (g);
1468 use_operand_p use_p;
1469 gimple *use_stmt;
1470 if (TREE_CODE (lhs) != SSA_NAME
1471 || TREE_CODE (rhs1) != SSA_NAME
1472 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1473 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1474 || !single_imm_use (lhs, &use_p, &use_stmt)
1475 || ((prep_cnt || assign)
1476 && use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign)))
1477 return 0;
1478 switch (gimple_assign_rhs_code (g))
1480 CASE_CONVERT:
1481 break;
1482 case PLUS_EXPR:
1483 case BIT_AND_EXPR:
1484 case BIT_IOR_EXPR:
1485 case BIT_XOR_EXPR:
1486 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1487 return 0;
1488 break;
1489 default:
1490 return 0;
1492 prep_stmt[prep_cnt] = g;
1495 /* Only transform if it removes the condition. */
1496 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1497 return 0;
1499 /* Size-wise, this is always profitable. */
1500 if (optimize_bb_for_speed_p (cond_bb)
1501 /* The special case is useless if it has a low probability. */
1502 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1503 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1504 /* If assign is cheap, there is no point avoiding it. */
1505 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1506 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1507 return 0;
1509 tree cond_lhs = gimple_cond_lhs (cond);
1510 tree cond_rhs = gimple_cond_rhs (cond);
1512 /* Propagate the cond_rhs constant through preparation stmts,
1513 make sure UB isn't invoked while doing that. */
1514 for (int i = prep_cnt - 1; i >= 0; --i)
1516 gimple *g = prep_stmt[i];
1517 tree grhs1 = gimple_assign_rhs1 (g);
1518 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1519 return 0;
1520 cond_lhs = gimple_assign_lhs (g);
1521 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1522 if (TREE_CODE (cond_rhs) != INTEGER_CST
1523 || TREE_OVERFLOW (cond_rhs))
1524 return 0;
1525 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1527 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1528 gimple_assign_rhs2 (g));
1529 if (TREE_OVERFLOW (cond_rhs))
1530 return 0;
1532 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1533 if (TREE_CODE (cond_rhs) != INTEGER_CST
1534 || TREE_OVERFLOW (cond_rhs))
1535 return 0;
1538 tree lhs, rhs1, rhs2;
1539 enum tree_code code_def;
1540 if (assign)
1542 lhs = gimple_assign_lhs (assign);
1543 rhs1 = gimple_assign_rhs1 (assign);
1544 rhs2 = gimple_assign_rhs2 (assign);
1545 code_def = gimple_assign_rhs_code (assign);
1547 else
1549 gcc_assert (prep_cnt > 0);
1550 lhs = cond_lhs;
1551 rhs1 = NULL_TREE;
1552 rhs2 = NULL_TREE;
1553 code_def = ERROR_MARK;
1556 if (((code == NE_EXPR && e1 == false_edge)
1557 || (code == EQ_EXPR && e1 == true_edge))
1558 && arg0 == lhs
1559 && ((assign == NULL
1560 && operand_equal_for_phi_arg_p (arg1, cond_rhs))
1561 || (assign
1562 && arg1 == rhs1
1563 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1564 && neutral_element_p (code_def, cond_rhs, true))
1565 || (assign
1566 && arg1 == rhs2
1567 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1568 && neutral_element_p (code_def, cond_rhs, false))
1569 || (assign
1570 && operand_equal_for_phi_arg_p (arg1, cond_rhs)
1571 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1572 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1573 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1574 && absorbing_element_p (code_def,
1575 cond_rhs, false, rhs2))))))
1577 gsi = gsi_for_stmt (cond);
1578 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1579 def-stmt in:
1580 if (n_5 != 0)
1581 goto <bb 3>;
1582 else
1583 goto <bb 4>;
1585 <bb 3>:
1586 # RANGE [0, 4294967294]
1587 u_6 = n_5 + 4294967295;
1589 <bb 4>:
1590 # u_3 = PHI <u_6(3), 4294967295(2)> */
1591 reset_flow_sensitive_info (lhs);
1592 gimple_stmt_iterator gsi_from;
1593 for (int i = prep_cnt - 1; i >= 0; --i)
1595 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1596 reset_flow_sensitive_info (plhs);
1597 gsi_from = gsi_for_stmt (prep_stmt[i]);
1598 gsi_move_before (&gsi_from, &gsi);
1600 if (assign)
1602 gsi_from = gsi_for_stmt (assign);
1603 gsi_move_before (&gsi_from, &gsi);
1605 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1606 return 2;
1609 return 0;
1612 /* If VAR is an SSA_NAME that points to a BIT_NOT_EXPR then return the TREE for
1613 the value being inverted. */
1615 static tree
1616 strip_bit_not (tree var)
1618 if (TREE_CODE (var) != SSA_NAME)
1619 return NULL_TREE;
1621 gimple *assign = SSA_NAME_DEF_STMT (var);
1622 if (gimple_code (assign) != GIMPLE_ASSIGN)
1623 return NULL_TREE;
1625 if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
1626 return NULL_TREE;
1628 return gimple_assign_rhs1 (assign);
1631 /* Invert a MIN to a MAX or a MAX to a MIN expression CODE. */
1633 enum tree_code
1634 invert_minmax_code (enum tree_code code)
1636 switch (code) {
1637 case MIN_EXPR:
1638 return MAX_EXPR;
1639 case MAX_EXPR:
1640 return MIN_EXPR;
1641 default:
1642 gcc_unreachable ();
1646 /* The function minmax_replacement does the main work of doing the minmax
1647 replacement. Return true if the replacement is done. Otherwise return
1648 false.
1649 BB is the basic block where the replacement is going to be done on. ARG0
1650 is argument 0 from the PHI. Likewise for ARG1.
1652 If THREEWAY_P then expect the BB to be laid out in diamond shape with each
1653 BB containing only a MIN or MAX expression. */
1655 static bool
1656 minmax_replacement (basic_block cond_bb, basic_block middle_bb, basic_block alt_middle_bb,
1657 edge e0, edge e1, gphi *phi, tree arg0, tree arg1, bool threeway_p)
1659 tree result;
1660 edge true_edge, false_edge;
1661 enum tree_code minmax, ass_code;
1662 tree smaller, larger, arg_true, arg_false;
1663 gimple_stmt_iterator gsi, gsi_from;
1665 tree type = TREE_TYPE (PHI_RESULT (phi));
1667 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1668 enum tree_code cmp = gimple_cond_code (cond);
1669 tree rhs = gimple_cond_rhs (cond);
1671 /* Turn EQ/NE of extreme values to order comparisons. */
1672 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1673 && TREE_CODE (rhs) == INTEGER_CST
1674 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1676 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1678 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1679 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1680 wi::min_value (TREE_TYPE (rhs)) + 1);
1682 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1684 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1685 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1686 wi::max_value (TREE_TYPE (rhs)) - 1);
1690 /* This transformation is only valid for order comparisons. Record which
1691 operand is smaller/larger if the result of the comparison is true. */
1692 tree alt_smaller = NULL_TREE;
1693 tree alt_larger = NULL_TREE;
1694 if (cmp == LT_EXPR || cmp == LE_EXPR)
1696 smaller = gimple_cond_lhs (cond);
1697 larger = rhs;
1698 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1699 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1700 if (TREE_CODE (larger) == INTEGER_CST
1701 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1703 if (cmp == LT_EXPR)
1705 wi::overflow_type overflow;
1706 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1707 TYPE_SIGN (TREE_TYPE (larger)),
1708 &overflow);
1709 if (! overflow)
1710 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1712 else
1714 wi::overflow_type overflow;
1715 wide_int alt = wi::add (wi::to_wide (larger), 1,
1716 TYPE_SIGN (TREE_TYPE (larger)),
1717 &overflow);
1718 if (! overflow)
1719 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1723 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1725 smaller = rhs;
1726 larger = gimple_cond_lhs (cond);
1727 /* If we have larger > CST it is equivalent to larger >= CST+1.
1728 Likewise larger >= CST is equivalent to larger > CST-1. */
1729 if (TREE_CODE (smaller) == INTEGER_CST
1730 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1732 wi::overflow_type overflow;
1733 if (cmp == GT_EXPR)
1735 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1736 TYPE_SIGN (TREE_TYPE (smaller)),
1737 &overflow);
1738 if (! overflow)
1739 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1741 else
1743 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1744 TYPE_SIGN (TREE_TYPE (smaller)),
1745 &overflow);
1746 if (! overflow)
1747 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1751 else
1752 return false;
1754 /* Handle the special case of (signed_type)x < 0 being equivalent
1755 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1756 to x <= MAX_VAL(signed_type). */
1757 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1758 && INTEGRAL_TYPE_P (type)
1759 && TYPE_UNSIGNED (type)
1760 && integer_zerop (rhs))
1762 tree op = gimple_cond_lhs (cond);
1763 if (TREE_CODE (op) == SSA_NAME
1764 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1765 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1767 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1768 if (gimple_assign_cast_p (def_stmt))
1770 tree op1 = gimple_assign_rhs1 (def_stmt);
1771 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1772 && TYPE_UNSIGNED (TREE_TYPE (op1))
1773 && (TYPE_PRECISION (TREE_TYPE (op))
1774 == TYPE_PRECISION (TREE_TYPE (op1)))
1775 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1777 wide_int w1 = wi::max_value (TREE_TYPE (op));
1778 wide_int w2 = wi::add (w1, 1);
1779 if (cmp == LT_EXPR)
1781 larger = op1;
1782 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1783 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1784 alt_larger = NULL_TREE;
1786 else
1788 smaller = op1;
1789 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1790 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1791 alt_smaller = NULL_TREE;
1798 /* We need to know which is the true edge and which is the false
1799 edge so that we know if have abs or negative abs. */
1800 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1802 /* Forward the edges over the middle basic block. */
1803 if (true_edge->dest == middle_bb)
1804 true_edge = EDGE_SUCC (true_edge->dest, 0);
1805 if (false_edge->dest == middle_bb)
1806 false_edge = EDGE_SUCC (false_edge->dest, 0);
1808 /* When THREEWAY_P then e1 will point to the edge of the final transition
1809 from middle-bb to end. */
1810 if (true_edge == e0)
1812 if (!threeway_p)
1813 gcc_assert (false_edge == e1);
1814 arg_true = arg0;
1815 arg_false = arg1;
1817 else
1819 gcc_assert (false_edge == e0);
1820 if (!threeway_p)
1821 gcc_assert (true_edge == e1);
1822 arg_true = arg1;
1823 arg_false = arg0;
1826 if (empty_block_p (middle_bb))
1828 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1829 || (alt_smaller
1830 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1831 && (operand_equal_for_phi_arg_p (arg_false, larger)
1832 || (alt_larger
1833 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1835 /* Case
1837 if (smaller < larger)
1838 rslt = smaller;
1839 else
1840 rslt = larger; */
1841 minmax = MIN_EXPR;
1843 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1844 || (alt_smaller
1845 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1846 && (operand_equal_for_phi_arg_p (arg_true, larger)
1847 || (alt_larger
1848 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1849 minmax = MAX_EXPR;
1850 else
1851 return false;
1853 else if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1854 /* The optimization may be unsafe due to NaNs. */
1855 return false;
1856 else if (middle_bb != alt_middle_bb && threeway_p)
1858 /* Recognize the following case:
1860 if (smaller < larger)
1861 a = MIN (smaller, c);
1862 else
1863 b = MIN (larger, c);
1864 x = PHI <a, b>
1866 This is equivalent to
1868 a = MIN (smaller, c);
1869 x = MIN (larger, a); */
1871 gimple *assign = last_and_only_stmt (middle_bb);
1872 tree lhs, op0, op1, bound;
1873 tree alt_lhs, alt_op0, alt_op1;
1874 bool invert = false;
1876 /* When THREEWAY_P then e1 will point to the edge of the final transition
1877 from middle-bb to end. */
1878 if (true_edge == e0)
1879 gcc_assert (false_edge == EDGE_PRED (e1->src, 0));
1880 else
1881 gcc_assert (true_edge == EDGE_PRED (e1->src, 0));
1883 bool valid_minmax_p = false;
1884 gimple_stmt_iterator it1
1885 = gsi_start_nondebug_after_labels_bb (middle_bb);
1886 gimple_stmt_iterator it2
1887 = gsi_start_nondebug_after_labels_bb (alt_middle_bb);
1888 if (gsi_one_nondebug_before_end_p (it1)
1889 && gsi_one_nondebug_before_end_p (it2))
1891 gimple *stmt1 = gsi_stmt (it1);
1892 gimple *stmt2 = gsi_stmt (it2);
1893 if (is_gimple_assign (stmt1) && is_gimple_assign (stmt2))
1895 enum tree_code code1 = gimple_assign_rhs_code (stmt1);
1896 enum tree_code code2 = gimple_assign_rhs_code (stmt2);
1897 valid_minmax_p = (code1 == MIN_EXPR || code1 == MAX_EXPR)
1898 && (code2 == MIN_EXPR || code2 == MAX_EXPR);
1902 if (!valid_minmax_p)
1903 return false;
1905 if (!assign
1906 || gimple_code (assign) != GIMPLE_ASSIGN)
1907 return false;
1909 lhs = gimple_assign_lhs (assign);
1910 ass_code = gimple_assign_rhs_code (assign);
1911 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1912 return false;
1914 op0 = gimple_assign_rhs1 (assign);
1915 op1 = gimple_assign_rhs2 (assign);
1917 assign = last_and_only_stmt (alt_middle_bb);
1918 if (!assign
1919 || gimple_code (assign) != GIMPLE_ASSIGN)
1920 return false;
1922 alt_lhs = gimple_assign_lhs (assign);
1923 if (ass_code != gimple_assign_rhs_code (assign))
1924 return false;
1926 if (!operand_equal_for_phi_arg_p (lhs, arg_true)
1927 || !operand_equal_for_phi_arg_p (alt_lhs, arg_false))
1928 return false;
1930 alt_op0 = gimple_assign_rhs1 (assign);
1931 alt_op1 = gimple_assign_rhs2 (assign);
1933 if ((operand_equal_for_phi_arg_p (op0, smaller)
1934 || (alt_smaller
1935 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1936 && (operand_equal_for_phi_arg_p (alt_op0, larger)
1937 || (alt_larger
1938 && operand_equal_for_phi_arg_p (alt_op0, alt_larger))))
1940 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1941 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1942 return false;
1944 if ((arg0 = strip_bit_not (op0)) != NULL
1945 && (arg1 = strip_bit_not (alt_op0)) != NULL
1946 && (bound = strip_bit_not (op1)) != NULL)
1948 minmax = MAX_EXPR;
1949 ass_code = invert_minmax_code (ass_code);
1950 invert = true;
1952 else
1954 bound = op1;
1955 minmax = MIN_EXPR;
1956 arg0 = op0;
1957 arg1 = alt_op0;
1960 else if ((operand_equal_for_phi_arg_p (op0, larger)
1961 || (alt_larger
1962 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1963 && (operand_equal_for_phi_arg_p (alt_op0, smaller)
1964 || (alt_smaller
1965 && operand_equal_for_phi_arg_p (alt_op0, alt_smaller))))
1967 /* We got here if the condition is true, i.e., SMALLER > LARGER. */
1968 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1969 return false;
1971 if ((arg0 = strip_bit_not (op0)) != NULL
1972 && (arg1 = strip_bit_not (alt_op0)) != NULL
1973 && (bound = strip_bit_not (op1)) != NULL)
1975 minmax = MIN_EXPR;
1976 ass_code = invert_minmax_code (ass_code);
1977 invert = true;
1979 else
1981 bound = op1;
1982 minmax = MAX_EXPR;
1983 arg0 = op0;
1984 arg1 = alt_op0;
1987 else
1988 return false;
1990 /* Emit the statement to compute min/max. */
1991 location_t locus = gimple_location (last_nondebug_stmt (cond_bb));
1992 gimple_seq stmts = NULL;
1993 tree phi_result = PHI_RESULT (phi);
1994 result = gimple_build (&stmts, locus, minmax, TREE_TYPE (phi_result),
1995 arg0, arg1);
1996 result = gimple_build (&stmts, locus, ass_code, TREE_TYPE (phi_result),
1997 result, bound);
1998 if (invert)
1999 result = gimple_build (&stmts, locus, BIT_NOT_EXPR, TREE_TYPE (phi_result),
2000 result);
2002 gsi = gsi_last_bb (cond_bb);
2003 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2005 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2007 return true;
2009 else
2011 /* Recognize the following case, assuming d <= u:
2013 if (a <= u)
2014 b = MAX (a, d);
2015 x = PHI <b, u>
2017 This is equivalent to
2019 b = MAX (a, d);
2020 x = MIN (b, u); */
2022 gimple *assign = last_and_only_stmt (middle_bb);
2023 tree lhs, op0, op1, bound;
2025 if (!single_pred_p (middle_bb))
2026 return false;
2028 if (!assign
2029 || gimple_code (assign) != GIMPLE_ASSIGN)
2030 return false;
2032 lhs = gimple_assign_lhs (assign);
2033 ass_code = gimple_assign_rhs_code (assign);
2034 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
2035 return false;
2036 op0 = gimple_assign_rhs1 (assign);
2037 op1 = gimple_assign_rhs2 (assign);
2039 if (true_edge->src == middle_bb)
2041 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
2042 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
2043 return false;
2045 if (operand_equal_for_phi_arg_p (arg_false, larger)
2046 || (alt_larger
2047 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
2049 /* Case
2051 if (smaller < larger)
2053 r' = MAX_EXPR (smaller, bound)
2055 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
2056 if (ass_code != MAX_EXPR)
2057 return false;
2059 minmax = MIN_EXPR;
2060 if (operand_equal_for_phi_arg_p (op0, smaller)
2061 || (alt_smaller
2062 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2063 bound = op1;
2064 else if (operand_equal_for_phi_arg_p (op1, smaller)
2065 || (alt_smaller
2066 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2067 bound = op0;
2068 else
2069 return false;
2071 /* We need BOUND <= LARGER. */
2072 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2073 bound, arg_false)))
2074 return false;
2076 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
2077 || (alt_smaller
2078 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
2080 /* Case
2082 if (smaller < larger)
2084 r' = MIN_EXPR (larger, bound)
2086 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
2087 if (ass_code != MIN_EXPR)
2088 return false;
2090 minmax = MAX_EXPR;
2091 if (operand_equal_for_phi_arg_p (op0, larger)
2092 || (alt_larger
2093 && operand_equal_for_phi_arg_p (op0, alt_larger)))
2094 bound = op1;
2095 else if (operand_equal_for_phi_arg_p (op1, larger)
2096 || (alt_larger
2097 && operand_equal_for_phi_arg_p (op1, alt_larger)))
2098 bound = op0;
2099 else
2100 return false;
2102 /* We need BOUND >= SMALLER. */
2103 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2104 bound, arg_false)))
2105 return false;
2107 else
2108 return false;
2110 else
2112 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
2113 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
2114 return false;
2116 if (operand_equal_for_phi_arg_p (arg_true, larger)
2117 || (alt_larger
2118 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
2120 /* Case
2122 if (smaller > larger)
2124 r' = MIN_EXPR (smaller, bound)
2126 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
2127 if (ass_code != MIN_EXPR)
2128 return false;
2130 minmax = MAX_EXPR;
2131 if (operand_equal_for_phi_arg_p (op0, smaller)
2132 || (alt_smaller
2133 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2134 bound = op1;
2135 else if (operand_equal_for_phi_arg_p (op1, smaller)
2136 || (alt_smaller
2137 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2138 bound = op0;
2139 else
2140 return false;
2142 /* We need BOUND >= LARGER. */
2143 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2144 bound, arg_true)))
2145 return false;
2147 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
2148 || (alt_smaller
2149 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
2151 /* Case
2153 if (smaller > larger)
2155 r' = MAX_EXPR (larger, bound)
2157 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
2158 if (ass_code != MAX_EXPR)
2159 return false;
2161 minmax = MIN_EXPR;
2162 if (operand_equal_for_phi_arg_p (op0, larger))
2163 bound = op1;
2164 else if (operand_equal_for_phi_arg_p (op1, larger))
2165 bound = op0;
2166 else
2167 return false;
2169 /* We need BOUND <= SMALLER. */
2170 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2171 bound, arg_true)))
2172 return false;
2174 else
2175 return false;
2178 /* Move the statement from the middle block. */
2179 gsi = gsi_last_bb (cond_bb);
2180 gsi_from = gsi_last_nondebug_bb (middle_bb);
2181 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
2182 SSA_OP_DEF));
2183 gsi_move_before (&gsi_from, &gsi);
2186 /* Emit the statement to compute min/max. */
2187 gimple_seq stmts = NULL;
2188 tree phi_result = PHI_RESULT (phi);
2190 /* When we can't use a MIN/MAX_EXPR still make sure the expression
2191 stays in a form to be recognized by ISA that map to IEEE x > y ? x : y
2192 semantics (that's not IEEE max semantics). */
2193 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
2195 result = gimple_build (&stmts, cmp, boolean_type_node,
2196 gimple_cond_lhs (cond), rhs);
2197 result = gimple_build (&stmts, COND_EXPR, TREE_TYPE (phi_result),
2198 result, arg_true, arg_false);
2200 else
2201 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
2203 gsi = gsi_last_bb (cond_bb);
2204 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2206 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2208 return true;
2211 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
2212 For strong ordering <=> try to match something like:
2213 <bb 2> : // cond3_bb (== cond2_bb)
2214 if (x_4(D) != y_5(D))
2215 goto <bb 3>; [INV]
2216 else
2217 goto <bb 6>; [INV]
2219 <bb 3> : // cond_bb
2220 if (x_4(D) < y_5(D))
2221 goto <bb 6>; [INV]
2222 else
2223 goto <bb 4>; [INV]
2225 <bb 4> : // middle_bb
2227 <bb 6> : // phi_bb
2228 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2229 _1 = iftmp.0_2 == 0;
2231 and for partial ordering <=> something like:
2233 <bb 2> : // cond3_bb
2234 if (a_3(D) == b_5(D))
2235 goto <bb 6>; [50.00%]
2236 else
2237 goto <bb 3>; [50.00%]
2239 <bb 3> [local count: 536870913]: // cond2_bb
2240 if (a_3(D) < b_5(D))
2241 goto <bb 6>; [50.00%]
2242 else
2243 goto <bb 4>; [50.00%]
2245 <bb 4> [local count: 268435456]: // cond_bb
2246 if (a_3(D) > b_5(D))
2247 goto <bb 6>; [50.00%]
2248 else
2249 goto <bb 5>; [50.00%]
2251 <bb 5> [local count: 134217728]: // middle_bb
2253 <bb 6> [local count: 1073741824]: // phi_bb
2254 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2255 _2 = SR.27_4 > 0; */
2257 static bool
2258 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
2259 edge e0, edge e1, gphi *phi,
2260 tree arg0, tree arg1)
2262 tree phires = PHI_RESULT (phi);
2263 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
2264 || TYPE_UNSIGNED (TREE_TYPE (phires))
2265 || !tree_fits_shwi_p (arg0)
2266 || !tree_fits_shwi_p (arg1)
2267 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
2268 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
2269 return false;
2271 basic_block phi_bb = gimple_bb (phi);
2272 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
2273 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
2274 return false;
2276 use_operand_p use_p;
2277 gimple *use_stmt;
2278 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
2279 return false;
2280 if (!single_imm_use (phires, &use_p, &use_stmt))
2281 return false;
2282 enum tree_code cmp;
2283 tree lhs, rhs;
2284 gimple *orig_use_stmt = use_stmt;
2285 tree orig_use_lhs = NULL_TREE;
2286 int prec = TYPE_PRECISION (TREE_TYPE (phires));
2287 bool is_cast = false;
2289 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2290 into res <= 1 and has left a type-cast for signed types. */
2291 if (gimple_assign_cast_p (use_stmt))
2293 orig_use_lhs = gimple_assign_lhs (use_stmt);
2294 /* match.pd would have only done this for a signed type,
2295 so the conversion must be to an unsigned one. */
2296 tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
2297 tree ty2 = TREE_TYPE (orig_use_lhs);
2299 if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
2300 return false;
2301 if (TYPE_PRECISION (ty1) > TYPE_PRECISION (ty2))
2302 return false;
2303 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2304 return false;
2305 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2306 return false;
2308 is_cast = true;
2310 else if (is_gimple_assign (use_stmt)
2311 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2312 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2313 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2314 == wi::shifted_mask (1, prec - 1, false, prec)))
2316 /* For partial_ordering result operator>= with unspec as second
2317 argument is (res & 1) == res, folded by match.pd into
2318 (res & ~1) == 0. */
2319 orig_use_lhs = gimple_assign_lhs (use_stmt);
2320 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2321 return false;
2322 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2323 return false;
2325 if (gimple_code (use_stmt) == GIMPLE_COND)
2327 cmp = gimple_cond_code (use_stmt);
2328 lhs = gimple_cond_lhs (use_stmt);
2329 rhs = gimple_cond_rhs (use_stmt);
2331 else if (is_gimple_assign (use_stmt))
2333 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2335 cmp = gimple_assign_rhs_code (use_stmt);
2336 lhs = gimple_assign_rhs1 (use_stmt);
2337 rhs = gimple_assign_rhs2 (use_stmt);
2339 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2341 tree cond = gimple_assign_rhs1 (use_stmt);
2342 if (!COMPARISON_CLASS_P (cond))
2343 return false;
2344 cmp = TREE_CODE (cond);
2345 lhs = TREE_OPERAND (cond, 0);
2346 rhs = TREE_OPERAND (cond, 1);
2348 else
2349 return false;
2351 else
2352 return false;
2353 switch (cmp)
2355 case EQ_EXPR:
2356 case NE_EXPR:
2357 case LT_EXPR:
2358 case GT_EXPR:
2359 case LE_EXPR:
2360 case GE_EXPR:
2361 break;
2362 default:
2363 return false;
2365 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2366 || !tree_fits_shwi_p (rhs)
2367 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2368 return false;
2370 if (is_cast)
2372 if (TREE_CODE (rhs) != INTEGER_CST)
2373 return false;
2374 /* As for -ffast-math we assume the 2 return to be
2375 impossible, canonicalize (unsigned) res <= 1U or
2376 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2377 or (unsigned) res >= 2U as res < 0. */
2378 switch (cmp)
2380 case LE_EXPR:
2381 if (!integer_onep (rhs))
2382 return false;
2383 cmp = GE_EXPR;
2384 break;
2385 case LT_EXPR:
2386 if (wi::ne_p (wi::to_widest (rhs), 2))
2387 return false;
2388 cmp = GE_EXPR;
2389 break;
2390 case GT_EXPR:
2391 if (!integer_onep (rhs))
2392 return false;
2393 cmp = LT_EXPR;
2394 break;
2395 case GE_EXPR:
2396 if (wi::ne_p (wi::to_widest (rhs), 2))
2397 return false;
2398 cmp = LT_EXPR;
2399 break;
2400 default:
2401 return false;
2403 rhs = build_zero_cst (TREE_TYPE (phires));
2405 else if (orig_use_lhs)
2407 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2408 return false;
2409 /* As for -ffast-math we assume the 2 return to be
2410 impossible, canonicalize (res & ~1) == 0 into
2411 res >= 0 and (res & ~1) != 0 as res < 0. */
2412 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2415 if (!empty_block_p (middle_bb))
2416 return false;
2418 gcond *cond1 = as_a <gcond *> (*gsi_last_bb (cond_bb));
2419 enum tree_code cmp1 = gimple_cond_code (cond1);
2420 switch (cmp1)
2422 case LT_EXPR:
2423 case LE_EXPR:
2424 case GT_EXPR:
2425 case GE_EXPR:
2426 break;
2427 default:
2428 return false;
2430 tree lhs1 = gimple_cond_lhs (cond1);
2431 tree rhs1 = gimple_cond_rhs (cond1);
2432 /* The optimization may be unsafe due to NaNs. */
2433 if (HONOR_NANS (TREE_TYPE (lhs1)))
2434 return false;
2435 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2436 return false;
2437 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2438 return false;
2440 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2441 return false;
2443 basic_block cond2_bb = single_pred (cond_bb);
2444 if (EDGE_COUNT (cond2_bb->succs) != 2)
2445 return false;
2446 edge cond2_phi_edge;
2447 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2449 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2450 return false;
2451 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2453 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2454 return false;
2455 else
2456 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2457 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2458 if (!tree_fits_shwi_p (arg2))
2459 return false;
2460 gcond *cond2 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond2_bb));
2461 if (!cond2)
2462 return false;
2463 enum tree_code cmp2 = gimple_cond_code (cond2);
2464 tree lhs2 = gimple_cond_lhs (cond2);
2465 tree rhs2 = gimple_cond_rhs (cond2);
2466 if (lhs2 == lhs1)
2468 if (!operand_equal_p (rhs2, rhs1, 0))
2470 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2471 && TREE_CODE (rhs1) == INTEGER_CST
2472 && TREE_CODE (rhs2) == INTEGER_CST)
2474 /* For integers, we can have cond2 x == 5
2475 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2476 x > 5, x >= 6, x >= 5 or x > 4. */
2477 if (tree_int_cst_lt (rhs1, rhs2))
2479 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2480 return false;
2481 if (cmp1 == LE_EXPR)
2482 cmp1 = LT_EXPR;
2483 else if (cmp1 == GT_EXPR)
2484 cmp1 = GE_EXPR;
2485 else
2486 return false;
2488 else
2490 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2491 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2492 return false;
2493 if (cmp1 == LT_EXPR)
2494 cmp1 = LE_EXPR;
2495 else if (cmp1 == GE_EXPR)
2496 cmp1 = GT_EXPR;
2497 else
2498 return false;
2500 rhs1 = rhs2;
2502 else
2503 return false;
2506 else if (lhs2 == rhs1)
2508 if (rhs2 != lhs1)
2509 return false;
2511 else
2512 return false;
2514 tree arg3 = arg2;
2515 basic_block cond3_bb = cond2_bb;
2516 edge cond3_phi_edge = cond2_phi_edge;
2517 gcond *cond3 = cond2;
2518 enum tree_code cmp3 = cmp2;
2519 tree lhs3 = lhs2;
2520 tree rhs3 = rhs2;
2521 if (EDGE_COUNT (phi_bb->preds) == 4)
2523 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2524 return false;
2525 if (e1->flags & EDGE_TRUE_VALUE)
2527 if (tree_to_shwi (arg0) != 2
2528 || absu_hwi (tree_to_shwi (arg1)) != 1
2529 || wi::to_widest (arg1) == wi::to_widest (arg2))
2530 return false;
2532 else if (tree_to_shwi (arg1) != 2
2533 || absu_hwi (tree_to_shwi (arg0)) != 1
2534 || wi::to_widest (arg0) == wi::to_widest (arg1))
2535 return false;
2536 switch (cmp2)
2538 case LT_EXPR:
2539 case LE_EXPR:
2540 case GT_EXPR:
2541 case GE_EXPR:
2542 break;
2543 default:
2544 return false;
2546 /* if (x < y) goto phi_bb; else fallthru;
2547 if (x > y) goto phi_bb; else fallthru;
2548 bbx:;
2549 phi_bb:;
2550 is ok, but if x and y are swapped in one of the comparisons,
2551 or the comparisons are the same and operands not swapped,
2552 or the true and false edges are swapped, it is not. */
2553 if ((lhs2 == lhs1)
2554 ^ (((cond2_phi_edge->flags
2555 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2556 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2557 != ((e1->flags
2558 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2559 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2560 return false;
2561 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2562 return false;
2563 cond3_bb = single_pred (cond2_bb);
2564 if (EDGE_COUNT (cond2_bb->succs) != 2)
2565 return false;
2566 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2568 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2569 return false;
2570 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2572 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2573 return false;
2574 else
2575 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2576 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2577 cond3 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond3_bb));
2578 if (!cond3)
2579 return false;
2580 cmp3 = gimple_cond_code (cond3);
2581 lhs3 = gimple_cond_lhs (cond3);
2582 rhs3 = gimple_cond_rhs (cond3);
2583 if (lhs3 == lhs1)
2585 if (!operand_equal_p (rhs3, rhs1, 0))
2586 return false;
2588 else if (lhs3 == rhs1)
2590 if (rhs3 != lhs1)
2591 return false;
2593 else
2594 return false;
2596 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2597 || absu_hwi (tree_to_shwi (arg1)) != 1
2598 || wi::to_widest (arg0) == wi::to_widest (arg1))
2599 return false;
2601 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2602 return false;
2603 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2604 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2605 return false;
2607 /* lhs1 one_cmp rhs1 results in phires of 1. */
2608 enum tree_code one_cmp;
2609 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2610 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2611 one_cmp = LT_EXPR;
2612 else
2613 one_cmp = GT_EXPR;
2615 enum tree_code res_cmp;
2616 switch (cmp)
2618 case EQ_EXPR:
2619 if (integer_zerop (rhs))
2620 res_cmp = EQ_EXPR;
2621 else if (integer_minus_onep (rhs))
2622 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2623 else if (integer_onep (rhs))
2624 res_cmp = one_cmp;
2625 else
2626 return false;
2627 break;
2628 case NE_EXPR:
2629 if (integer_zerop (rhs))
2630 res_cmp = NE_EXPR;
2631 else if (integer_minus_onep (rhs))
2632 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2633 else if (integer_onep (rhs))
2634 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2635 else
2636 return false;
2637 break;
2638 case LT_EXPR:
2639 if (integer_onep (rhs))
2640 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2641 else if (integer_zerop (rhs))
2642 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2643 else
2644 return false;
2645 break;
2646 case LE_EXPR:
2647 if (integer_zerop (rhs))
2648 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2649 else if (integer_minus_onep (rhs))
2650 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2651 else
2652 return false;
2653 break;
2654 case GT_EXPR:
2655 if (integer_minus_onep (rhs))
2656 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2657 else if (integer_zerop (rhs))
2658 res_cmp = one_cmp;
2659 else
2660 return false;
2661 break;
2662 case GE_EXPR:
2663 if (integer_zerop (rhs))
2664 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2665 else if (integer_onep (rhs))
2666 res_cmp = one_cmp;
2667 else
2668 return false;
2669 break;
2670 default:
2671 gcc_unreachable ();
2674 if (gimple_code (use_stmt) == GIMPLE_COND)
2676 gcond *use_cond = as_a <gcond *> (use_stmt);
2677 gimple_cond_set_code (use_cond, res_cmp);
2678 gimple_cond_set_lhs (use_cond, lhs1);
2679 gimple_cond_set_rhs (use_cond, rhs1);
2681 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2683 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2684 gimple_assign_set_rhs1 (use_stmt, lhs1);
2685 gimple_assign_set_rhs2 (use_stmt, rhs1);
2687 else
2689 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2690 lhs1, rhs1);
2691 gimple_assign_set_rhs1 (use_stmt, cond);
2693 update_stmt (use_stmt);
2695 if (MAY_HAVE_DEBUG_BIND_STMTS)
2697 use_operand_p use_p;
2698 imm_use_iterator iter;
2699 bool has_debug_uses = false;
2700 bool has_cast_debug_uses = false;
2701 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2703 gimple *use_stmt = USE_STMT (use_p);
2704 if (orig_use_lhs && use_stmt == orig_use_stmt)
2705 continue;
2706 gcc_assert (is_gimple_debug (use_stmt));
2707 has_debug_uses = true;
2708 break;
2710 if (orig_use_lhs)
2712 if (!has_debug_uses || is_cast)
2713 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2715 gimple *use_stmt = USE_STMT (use_p);
2716 gcc_assert (is_gimple_debug (use_stmt));
2717 has_debug_uses = true;
2718 if (is_cast)
2719 has_cast_debug_uses = true;
2721 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2722 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2723 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2724 update_stmt (orig_use_stmt);
2727 if (has_debug_uses)
2729 /* If there are debug uses, emit something like:
2730 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2731 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2732 where > stands for the comparison that yielded 1
2733 and replace debug uses of phi result with that D#2.
2734 Ignore the value of 2, because if NaNs aren't expected,
2735 all floating point numbers should be comparable. */
2736 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2737 tree type = TREE_TYPE (phires);
2738 tree temp1 = build_debug_expr_decl (type);
2739 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2740 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2741 build_int_cst (type, -1));
2742 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2743 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2744 tree temp2 = build_debug_expr_decl (type);
2745 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2746 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2747 g = gimple_build_debug_bind (temp2, t, phi);
2748 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2749 replace_uses_by (phires, temp2);
2750 if (orig_use_lhs)
2752 if (has_cast_debug_uses)
2754 tree temp3 = make_node (DEBUG_EXPR_DECL);
2755 DECL_ARTIFICIAL (temp3) = 1;
2756 TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
2757 SET_DECL_MODE (temp3, TYPE_MODE (type));
2758 t = fold_convert (TREE_TYPE (temp3), temp2);
2759 g = gimple_build_debug_bind (temp3, t, phi);
2760 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2761 replace_uses_by (orig_use_lhs, temp3);
2763 else
2764 replace_uses_by (orig_use_lhs, temp2);
2769 if (orig_use_lhs)
2771 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2772 gsi_remove (&gsi, true);
2775 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2776 remove_phi_node (&psi, true);
2777 statistics_counter_event (cfun, "spaceship replacement", 1);
2779 return true;
2782 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2783 Convert
2785 <bb 2>
2786 if (b_4(D) != 0)
2787 goto <bb 3>
2788 else
2789 goto <bb 4>
2791 <bb 3>
2792 _2 = (unsigned long) b_4(D);
2793 _9 = __builtin_popcountl (_2);
2795 _9 = __builtin_popcountl (b_4(D));
2797 <bb 4>
2798 c_12 = PHI <0(2), _9(3)>
2800 Into
2801 <bb 2>
2802 _2 = (unsigned long) b_4(D);
2803 _9 = __builtin_popcountl (_2);
2805 _9 = __builtin_popcountl (b_4(D));
2807 <bb 4>
2808 c_12 = PHI <_9(2)>
2810 Similarly for __builtin_clz or __builtin_ctz if
2811 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2812 instead of 0 above it uses the value from that macro. */
2814 static bool
2815 cond_removal_in_builtin_zero_pattern (basic_block cond_bb,
2816 basic_block middle_bb,
2817 edge e1, edge e2, gphi *phi,
2818 tree arg0, tree arg1)
2820 gimple_stmt_iterator gsi, gsi_from;
2821 gimple *call;
2822 gimple *cast = NULL;
2823 tree lhs, arg;
2825 /* Check that
2826 _2 = (unsigned long) b_4(D);
2827 _9 = __builtin_popcountl (_2);
2829 _9 = __builtin_popcountl (b_4(D));
2830 are the only stmts in the middle_bb. */
2832 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2833 if (gsi_end_p (gsi))
2834 return false;
2835 cast = gsi_stmt (gsi);
2836 gsi_next_nondebug (&gsi);
2837 if (!gsi_end_p (gsi))
2839 call = gsi_stmt (gsi);
2840 gsi_next_nondebug (&gsi);
2841 if (!gsi_end_p (gsi))
2842 return false;
2844 else
2846 call = cast;
2847 cast = NULL;
2850 /* Check that we have a popcount/clz/ctz builtin. */
2851 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
2852 return false;
2854 arg = gimple_call_arg (call, 0);
2855 lhs = gimple_get_lhs (call);
2857 if (lhs == NULL_TREE)
2858 return false;
2860 combined_fn cfn = gimple_call_combined_fn (call);
2861 internal_fn ifn = IFN_LAST;
2862 int val = 0;
2863 switch (cfn)
2865 case CFN_BUILT_IN_BSWAP16:
2866 case CFN_BUILT_IN_BSWAP32:
2867 case CFN_BUILT_IN_BSWAP64:
2868 case CFN_BUILT_IN_BSWAP128:
2869 CASE_CFN_FFS:
2870 CASE_CFN_PARITY:
2871 CASE_CFN_POPCOUNT:
2872 break;
2873 CASE_CFN_CLZ:
2874 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2876 tree type = TREE_TYPE (arg);
2877 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2878 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2879 val) == 2)
2881 ifn = IFN_CLZ;
2882 break;
2885 return false;
2886 CASE_CFN_CTZ:
2887 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2889 tree type = TREE_TYPE (arg);
2890 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2891 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2892 val) == 2)
2894 ifn = IFN_CTZ;
2895 break;
2898 return false;
2899 case CFN_BUILT_IN_CLRSB:
2900 val = TYPE_PRECISION (integer_type_node) - 1;
2901 break;
2902 case CFN_BUILT_IN_CLRSBL:
2903 val = TYPE_PRECISION (long_integer_type_node) - 1;
2904 break;
2905 case CFN_BUILT_IN_CLRSBLL:
2906 val = TYPE_PRECISION (long_long_integer_type_node) - 1;
2907 break;
2908 default:
2909 return false;
2912 if (cast)
2914 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2915 /* Check that we have a cast prior to that. */
2916 if (gimple_code (cast) != GIMPLE_ASSIGN
2917 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
2918 return false;
2919 /* Result of the cast stmt is the argument to the builtin. */
2920 if (arg != gimple_assign_lhs (cast))
2921 return false;
2922 arg = gimple_assign_rhs1 (cast);
2925 gcond *cond = dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
2927 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
2928 builtin. */
2929 if (!cond
2930 || (gimple_cond_code (cond) != NE_EXPR
2931 && gimple_cond_code (cond) != EQ_EXPR)
2932 || !integer_zerop (gimple_cond_rhs (cond))
2933 || arg != gimple_cond_lhs (cond))
2934 return false;
2936 /* Canonicalize. */
2937 if ((e2->flags & EDGE_TRUE_VALUE
2938 && gimple_cond_code (cond) == NE_EXPR)
2939 || (e1->flags & EDGE_TRUE_VALUE
2940 && gimple_cond_code (cond) == EQ_EXPR))
2942 std::swap (arg0, arg1);
2943 std::swap (e1, e2);
2946 /* Check PHI arguments. */
2947 if (lhs != arg0
2948 || TREE_CODE (arg1) != INTEGER_CST
2949 || wi::to_wide (arg1) != val)
2950 return false;
2952 /* And insert the popcount/clz/ctz builtin and cast stmt before the
2953 cond_bb. */
2954 gsi = gsi_last_bb (cond_bb);
2955 if (cast)
2957 gsi_from = gsi_for_stmt (cast);
2958 gsi_move_before (&gsi_from, &gsi);
2959 reset_flow_sensitive_info (gimple_get_lhs (cast));
2961 gsi_from = gsi_for_stmt (call);
2962 if (ifn == IFN_LAST || gimple_call_internal_p (call))
2963 gsi_move_before (&gsi_from, &gsi);
2964 else
2966 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
2967 the latter is well defined at zero. */
2968 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
2969 gimple_call_set_lhs (call, lhs);
2970 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2971 gsi_remove (&gsi_from, true);
2973 reset_flow_sensitive_info (lhs);
2975 /* Now update the PHI and remove unneeded bbs. */
2976 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
2977 return true;
2980 /* Auxiliary functions to determine the set of memory accesses which
2981 can't trap because they are preceded by accesses to the same memory
2982 portion. We do that for MEM_REFs, so we only need to track
2983 the SSA_NAME of the pointer indirectly referenced. The algorithm
2984 simply is a walk over all instructions in dominator order. When
2985 we see an MEM_REF we determine if we've already seen a same
2986 ref anywhere up to the root of the dominator tree. If we do the
2987 current access can't trap. If we don't see any dominating access
2988 the current access might trap, but might also make later accesses
2989 non-trapping, so we remember it. We need to be careful with loads
2990 or stores, for instance a load might not trap, while a store would,
2991 so if we see a dominating read access this doesn't mean that a later
2992 write access would not trap. Hence we also need to differentiate the
2993 type of access(es) seen.
2995 ??? We currently are very conservative and assume that a load might
2996 trap even if a store doesn't (write-only memory). This probably is
2997 overly conservative.
2999 We currently support a special case that for !TREE_ADDRESSABLE automatic
3000 variables, it could ignore whether something is a load or store because the
3001 local stack should be always writable. */
3003 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
3004 basic block an *_REF through it was seen, which would constitute a
3005 no-trap region for same accesses.
3007 Size is needed to support 2 MEM_REFs of different types, like
3008 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
3009 OEP_ADDRESS_OF. */
3010 struct ref_to_bb
3012 tree exp;
3013 HOST_WIDE_INT size;
3014 unsigned int phase;
3015 basic_block bb;
3018 /* Hashtable helpers. */
3020 struct refs_hasher : free_ptr_hash<ref_to_bb>
3022 static inline hashval_t hash (const ref_to_bb *);
3023 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
3026 /* Used for quick clearing of the hash-table when we see calls.
3027 Hash entries with phase < nt_call_phase are invalid. */
3028 static unsigned int nt_call_phase;
3030 /* The hash function. */
3032 inline hashval_t
3033 refs_hasher::hash (const ref_to_bb *n)
3035 inchash::hash hstate;
3036 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
3037 hstate.add_hwi (n->size);
3038 return hstate.end ();
3041 /* The equality function of *P1 and *P2. */
3043 inline bool
3044 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
3046 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
3047 && n1->size == n2->size;
3050 class nontrapping_dom_walker : public dom_walker
3052 public:
3053 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
3054 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
3057 edge before_dom_children (basic_block) final override;
3058 void after_dom_children (basic_block) final override;
3060 private:
3062 /* We see the expression EXP in basic block BB. If it's an interesting
3063 expression (an MEM_REF through an SSA_NAME) possibly insert the
3064 expression into the set NONTRAP or the hash table of seen expressions.
3065 STORE is true if this expression is on the LHS, otherwise it's on
3066 the RHS. */
3067 void add_or_mark_expr (basic_block, tree, bool);
3069 hash_set<tree> *m_nontrapping;
3071 /* The hash table for remembering what we've seen. */
3072 hash_table<refs_hasher> m_seen_refs;
3075 /* Called by walk_dominator_tree, when entering the block BB. */
3076 edge
3077 nontrapping_dom_walker::before_dom_children (basic_block bb)
3079 edge e;
3080 edge_iterator ei;
3081 gimple_stmt_iterator gsi;
3083 /* If we haven't seen all our predecessors, clear the hash-table. */
3084 FOR_EACH_EDGE (e, ei, bb->preds)
3085 if ((((size_t)e->src->aux) & 2) == 0)
3087 nt_call_phase++;
3088 break;
3091 /* Mark this BB as being on the path to dominator root and as visited. */
3092 bb->aux = (void*)(1 | 2);
3094 /* And walk the statements in order. */
3095 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3097 gimple *stmt = gsi_stmt (gsi);
3099 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
3100 || (is_gimple_call (stmt)
3101 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
3102 nt_call_phase++;
3103 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
3105 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
3106 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
3109 return NULL;
3112 /* Called by walk_dominator_tree, when basic block BB is exited. */
3113 void
3114 nontrapping_dom_walker::after_dom_children (basic_block bb)
3116 /* This BB isn't on the path to dominator root anymore. */
3117 bb->aux = (void*)2;
3120 /* We see the expression EXP in basic block BB. If it's an interesting
3121 expression of:
3122 1) MEM_REF
3123 2) ARRAY_REF
3124 3) COMPONENT_REF
3125 possibly insert the expression into the set NONTRAP or the hash table
3126 of seen expressions. STORE is true if this expression is on the LHS,
3127 otherwise it's on the RHS. */
3128 void
3129 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
3131 HOST_WIDE_INT size;
3133 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
3134 || TREE_CODE (exp) == COMPONENT_REF)
3135 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
3137 struct ref_to_bb map;
3138 ref_to_bb **slot;
3139 struct ref_to_bb *r2bb;
3140 basic_block found_bb = 0;
3142 if (!store)
3144 tree base = get_base_address (exp);
3145 /* Only record a LOAD of a local variable without address-taken, as
3146 the local stack is always writable. This allows cselim on a STORE
3147 with a dominating LOAD. */
3148 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
3149 return;
3152 /* Try to find the last seen *_REF, which can trap. */
3153 map.exp = exp;
3154 map.size = size;
3155 slot = m_seen_refs.find_slot (&map, INSERT);
3156 r2bb = *slot;
3157 if (r2bb && r2bb->phase >= nt_call_phase)
3158 found_bb = r2bb->bb;
3160 /* If we've found a trapping *_REF, _and_ it dominates EXP
3161 (it's in a basic block on the path from us to the dominator root)
3162 then we can't trap. */
3163 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
3165 m_nontrapping->add (exp);
3167 else
3169 /* EXP might trap, so insert it into the hash table. */
3170 if (r2bb)
3172 r2bb->phase = nt_call_phase;
3173 r2bb->bb = bb;
3175 else
3177 r2bb = XNEW (struct ref_to_bb);
3178 r2bb->phase = nt_call_phase;
3179 r2bb->bb = bb;
3180 r2bb->exp = exp;
3181 r2bb->size = size;
3182 *slot = r2bb;
3188 /* This is the entry point of gathering non trapping memory accesses.
3189 It will do a dominator walk over the whole function, and it will
3190 make use of the bb->aux pointers. It returns a set of trees
3191 (the MEM_REFs itself) which can't trap. */
3192 static hash_set<tree> *
3193 get_non_trapping (void)
3195 nt_call_phase = 0;
3196 hash_set<tree> *nontrap = new hash_set<tree>;
3198 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
3199 .walk (cfun->cfg->x_entry_block_ptr);
3201 clear_aux_for_blocks ();
3202 return nontrap;
3205 /* Do the main work of conditional store replacement. We already know
3206 that the recognized pattern looks like so:
3208 split:
3209 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
3210 MIDDLE_BB:
3211 something
3212 fallthrough (edge E0)
3213 JOIN_BB:
3214 some more
3216 We check that MIDDLE_BB contains only one store, that that store
3217 doesn't trap (not via NOTRAP, but via checking if an access to the same
3218 memory location dominates us, or the store is to a local addressable
3219 object) and that the store has a "simple" RHS. */
3221 static bool
3222 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
3223 edge e0, edge e1, hash_set<tree> *nontrap)
3225 gimple *assign = last_and_only_stmt (middle_bb);
3226 tree lhs, rhs, name, name2;
3227 gphi *newphi;
3228 gassign *new_stmt;
3229 gimple_stmt_iterator gsi;
3230 location_t locus;
3232 /* Check if middle_bb contains of only one store. */
3233 if (!assign
3234 || !gimple_assign_single_p (assign)
3235 || gimple_has_volatile_ops (assign))
3236 return false;
3238 /* And no PHI nodes so all uses in the single stmt are also
3239 available where we insert to. */
3240 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
3241 return false;
3243 locus = gimple_location (assign);
3244 lhs = gimple_assign_lhs (assign);
3245 rhs = gimple_assign_rhs1 (assign);
3246 if ((!REFERENCE_CLASS_P (lhs)
3247 && !DECL_P (lhs))
3248 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3249 return false;
3251 /* Prove that we can move the store down. We could also check
3252 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3253 whose value is not available readily, which we want to avoid. */
3254 if (!nontrap->contains (lhs))
3256 /* If LHS is an access to a local variable without address-taken
3257 (or when we allow data races) and known not to trap, we could
3258 always safely move down the store. */
3259 tree base = get_base_address (lhs);
3260 if (!auto_var_p (base)
3261 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
3262 || tree_could_trap_p (lhs))
3263 return false;
3266 /* Now we've checked the constraints, so do the transformation:
3267 1) Remove the single store. */
3268 gsi = gsi_for_stmt (assign);
3269 unlink_stmt_vdef (assign);
3270 gsi_remove (&gsi, true);
3271 release_defs (assign);
3273 /* Make both store and load use alias-set zero as we have to
3274 deal with the case of the store being a conditional change
3275 of the dynamic type. */
3276 lhs = unshare_expr (lhs);
3277 tree *basep = &lhs;
3278 while (handled_component_p (*basep))
3279 basep = &TREE_OPERAND (*basep, 0);
3280 if (TREE_CODE (*basep) == MEM_REF
3281 || TREE_CODE (*basep) == TARGET_MEM_REF)
3282 TREE_OPERAND (*basep, 1)
3283 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3284 else
3285 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3286 build_fold_addr_expr (*basep),
3287 build_zero_cst (ptr_type_node));
3289 /* 2) Insert a load from the memory of the store to the temporary
3290 on the edge which did not contain the store. */
3291 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3292 new_stmt = gimple_build_assign (name, lhs);
3293 gimple_set_location (new_stmt, locus);
3294 lhs = unshare_expr (lhs);
3296 /* Set the no-warning bit on the rhs of the load to avoid uninit
3297 warnings. */
3298 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3299 suppress_warning (rhs1, OPT_Wuninitialized);
3301 gsi_insert_on_edge (e1, new_stmt);
3303 /* 3) Create a PHI node at the join block, with one argument
3304 holding the old RHS, and the other holding the temporary
3305 where we stored the old memory contents. */
3306 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3307 newphi = create_phi_node (name2, join_bb);
3308 add_phi_arg (newphi, rhs, e0, locus);
3309 add_phi_arg (newphi, name, e1, locus);
3311 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3313 /* 4) Insert that PHI node. */
3314 gsi = gsi_after_labels (join_bb);
3315 if (gsi_end_p (gsi))
3317 gsi = gsi_last_bb (join_bb);
3318 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3320 else
3321 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3323 if (dump_file && (dump_flags & TDF_DETAILS))
3325 fprintf (dump_file, "\nConditional store replacement happened!");
3326 fprintf (dump_file, "\nReplaced the store with a load.");
3327 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3328 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3330 statistics_counter_event (cfun, "conditional store replacement", 1);
3332 return true;
3335 /* Do the main work of conditional store replacement. */
3337 static bool
3338 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3339 basic_block join_bb, gimple *then_assign,
3340 gimple *else_assign)
3342 tree lhs_base, lhs, then_rhs, else_rhs, name;
3343 location_t then_locus, else_locus;
3344 gimple_stmt_iterator gsi;
3345 gphi *newphi;
3346 gassign *new_stmt;
3348 if (then_assign == NULL
3349 || !gimple_assign_single_p (then_assign)
3350 || gimple_clobber_p (then_assign)
3351 || gimple_has_volatile_ops (then_assign)
3352 || else_assign == NULL
3353 || !gimple_assign_single_p (else_assign)
3354 || gimple_clobber_p (else_assign)
3355 || gimple_has_volatile_ops (else_assign))
3356 return false;
3358 lhs = gimple_assign_lhs (then_assign);
3359 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3360 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3361 return false;
3363 lhs_base = get_base_address (lhs);
3364 if (lhs_base == NULL_TREE
3365 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3366 return false;
3368 then_rhs = gimple_assign_rhs1 (then_assign);
3369 else_rhs = gimple_assign_rhs1 (else_assign);
3370 then_locus = gimple_location (then_assign);
3371 else_locus = gimple_location (else_assign);
3373 /* Now we've checked the constraints, so do the transformation:
3374 1) Remove the stores. */
3375 gsi = gsi_for_stmt (then_assign);
3376 unlink_stmt_vdef (then_assign);
3377 gsi_remove (&gsi, true);
3378 release_defs (then_assign);
3380 gsi = gsi_for_stmt (else_assign);
3381 unlink_stmt_vdef (else_assign);
3382 gsi_remove (&gsi, true);
3383 release_defs (else_assign);
3385 /* 2) Create a PHI node at the join block, with one argument
3386 holding the old RHS, and the other holding the temporary
3387 where we stored the old memory contents. */
3388 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3389 newphi = create_phi_node (name, join_bb);
3390 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3391 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3393 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3395 /* 3) Insert that PHI node. */
3396 gsi = gsi_after_labels (join_bb);
3397 if (gsi_end_p (gsi))
3399 gsi = gsi_last_bb (join_bb);
3400 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3402 else
3403 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3405 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3407 return true;
3410 /* Return the single store in BB with VDEF or NULL if there are
3411 other stores in the BB or loads following the store. */
3413 static gimple *
3414 single_trailing_store_in_bb (basic_block bb, tree vdef)
3416 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3417 return NULL;
3418 gimple *store = SSA_NAME_DEF_STMT (vdef);
3419 if (gimple_bb (store) != bb
3420 || gimple_code (store) == GIMPLE_PHI)
3421 return NULL;
3423 /* Verify there is no other store in this BB. */
3424 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3425 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3426 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3427 return NULL;
3429 /* Verify there is no load or store after the store. */
3430 use_operand_p use_p;
3431 imm_use_iterator imm_iter;
3432 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3433 if (USE_STMT (use_p) != store
3434 && gimple_bb (USE_STMT (use_p)) == bb)
3435 return NULL;
3437 return store;
3440 /* Conditional store replacement. We already know
3441 that the recognized pattern looks like so:
3443 split:
3444 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3445 THEN_BB:
3447 X = Y;
3449 goto JOIN_BB;
3450 ELSE_BB:
3452 X = Z;
3454 fallthrough (edge E0)
3455 JOIN_BB:
3456 some more
3458 We check that it is safe to sink the store to JOIN_BB by verifying that
3459 there are no read-after-write or write-after-write dependencies in
3460 THEN_BB and ELSE_BB. */
3462 static bool
3463 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3464 basic_block join_bb)
3466 vec<data_reference_p> then_datarefs, else_datarefs;
3467 vec<ddr_p> then_ddrs, else_ddrs;
3468 gimple *then_store, *else_store;
3469 bool found, ok = false, res;
3470 struct data_dependence_relation *ddr;
3471 data_reference_p then_dr, else_dr;
3472 int i, j;
3473 tree then_lhs, else_lhs;
3474 basic_block blocks[3];
3476 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3477 cheap enough to always handle as it allows us to elide dependence
3478 checking. */
3479 gphi *vphi = NULL;
3480 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3481 gsi_next (&si))
3482 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3484 vphi = si.phi ();
3485 break;
3487 if (!vphi)
3488 return false;
3489 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3490 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3491 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3492 if (then_assign)
3494 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3495 if (else_assign)
3496 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3497 then_assign, else_assign);
3500 /* If either vectorization or if-conversion is disabled then do
3501 not sink any stores. */
3502 if (param_max_stores_to_sink == 0
3503 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3504 || !flag_tree_loop_if_convert)
3505 return false;
3507 /* Find data references. */
3508 then_datarefs.create (1);
3509 else_datarefs.create (1);
3510 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3511 == chrec_dont_know)
3512 || !then_datarefs.length ()
3513 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3514 == chrec_dont_know)
3515 || !else_datarefs.length ())
3517 free_data_refs (then_datarefs);
3518 free_data_refs (else_datarefs);
3519 return false;
3522 /* Find pairs of stores with equal LHS. */
3523 auto_vec<gimple *, 1> then_stores, else_stores;
3524 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3526 if (DR_IS_READ (then_dr))
3527 continue;
3529 then_store = DR_STMT (then_dr);
3530 then_lhs = gimple_get_lhs (then_store);
3531 if (then_lhs == NULL_TREE)
3532 continue;
3533 found = false;
3535 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3537 if (DR_IS_READ (else_dr))
3538 continue;
3540 else_store = DR_STMT (else_dr);
3541 else_lhs = gimple_get_lhs (else_store);
3542 if (else_lhs == NULL_TREE)
3543 continue;
3545 if (operand_equal_p (then_lhs, else_lhs, 0))
3547 found = true;
3548 break;
3552 if (!found)
3553 continue;
3555 then_stores.safe_push (then_store);
3556 else_stores.safe_push (else_store);
3559 /* No pairs of stores found. */
3560 if (!then_stores.length ()
3561 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3563 free_data_refs (then_datarefs);
3564 free_data_refs (else_datarefs);
3565 return false;
3568 /* Compute and check data dependencies in both basic blocks. */
3569 then_ddrs.create (1);
3570 else_ddrs.create (1);
3571 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3572 vNULL, false)
3573 || !compute_all_dependences (else_datarefs, &else_ddrs,
3574 vNULL, false))
3576 free_dependence_relations (then_ddrs);
3577 free_dependence_relations (else_ddrs);
3578 free_data_refs (then_datarefs);
3579 free_data_refs (else_datarefs);
3580 return false;
3582 blocks[0] = then_bb;
3583 blocks[1] = else_bb;
3584 blocks[2] = join_bb;
3585 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3587 /* Check that there are no read-after-write or write-after-write dependencies
3588 in THEN_BB. */
3589 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3591 struct data_reference *dra = DDR_A (ddr);
3592 struct data_reference *drb = DDR_B (ddr);
3594 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3595 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3596 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3597 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3598 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3599 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3601 free_dependence_relations (then_ddrs);
3602 free_dependence_relations (else_ddrs);
3603 free_data_refs (then_datarefs);
3604 free_data_refs (else_datarefs);
3605 return false;
3609 /* Check that there are no read-after-write or write-after-write dependencies
3610 in ELSE_BB. */
3611 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3613 struct data_reference *dra = DDR_A (ddr);
3614 struct data_reference *drb = DDR_B (ddr);
3616 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3617 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3618 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3619 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3620 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3621 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3623 free_dependence_relations (then_ddrs);
3624 free_dependence_relations (else_ddrs);
3625 free_data_refs (then_datarefs);
3626 free_data_refs (else_datarefs);
3627 return false;
3631 /* Sink stores with same LHS. */
3632 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3634 else_store = else_stores[i];
3635 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3636 then_store, else_store);
3637 ok = ok || res;
3640 free_dependence_relations (then_ddrs);
3641 free_dependence_relations (else_ddrs);
3642 free_data_refs (then_datarefs);
3643 free_data_refs (else_datarefs);
3645 return ok;
3648 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3650 static bool
3651 local_mem_dependence (gimple *stmt, basic_block bb)
3653 tree vuse = gimple_vuse (stmt);
3654 gimple *def;
3656 if (!vuse)
3657 return false;
3659 def = SSA_NAME_DEF_STMT (vuse);
3660 return (def && gimple_bb (def) == bb);
3663 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3664 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3665 and BB3 rejoins control flow following BB1 and BB2, look for
3666 opportunities to hoist loads as follows. If BB3 contains a PHI of
3667 two loads, one each occurring in BB1 and BB2, and the loads are
3668 provably of adjacent fields in the same structure, then move both
3669 loads into BB0. Of course this can only be done if there are no
3670 dependencies preventing such motion.
3672 One of the hoisted loads will always be speculative, so the
3673 transformation is currently conservative:
3675 - The fields must be strictly adjacent.
3676 - The two fields must occupy a single memory block that is
3677 guaranteed to not cross a page boundary.
3679 The last is difficult to prove, as such memory blocks should be
3680 aligned on the minimum of the stack alignment boundary and the
3681 alignment guaranteed by heap allocation interfaces. Thus we rely
3682 on a parameter for the alignment value.
3684 Provided a good value is used for the last case, the first
3685 restriction could possibly be relaxed. */
3687 static void
3688 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3689 basic_block bb2, basic_block bb3)
3691 int param_align = param_l1_cache_line_size;
3692 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
3693 gphi_iterator gsi;
3695 /* Walk the phis in bb3 looking for an opportunity. We are looking
3696 for phis of two SSA names, one each of which is defined in bb1 and
3697 bb2. */
3698 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3700 gphi *phi_stmt = gsi.phi ();
3701 gimple *def1, *def2;
3702 tree arg1, arg2, ref1, ref2, field1, field2;
3703 tree tree_offset1, tree_offset2, tree_size2, next;
3704 int offset1, offset2, size2;
3705 unsigned align1;
3706 gimple_stmt_iterator gsi2;
3707 basic_block bb_for_def1, bb_for_def2;
3709 if (gimple_phi_num_args (phi_stmt) != 2
3710 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3711 continue;
3713 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3714 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3716 if (TREE_CODE (arg1) != SSA_NAME
3717 || TREE_CODE (arg2) != SSA_NAME
3718 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3719 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3720 continue;
3722 def1 = SSA_NAME_DEF_STMT (arg1);
3723 def2 = SSA_NAME_DEF_STMT (arg2);
3725 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3726 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3727 continue;
3729 /* Check the mode of the arguments to be sure a conditional move
3730 can be generated for it. */
3731 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3732 == CODE_FOR_nothing)
3733 continue;
3735 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3736 if (!gimple_assign_single_p (def1)
3737 || !gimple_assign_single_p (def2)
3738 || gimple_has_volatile_ops (def1)
3739 || gimple_has_volatile_ops (def2))
3740 continue;
3742 ref1 = gimple_assign_rhs1 (def1);
3743 ref2 = gimple_assign_rhs1 (def2);
3745 if (TREE_CODE (ref1) != COMPONENT_REF
3746 || TREE_CODE (ref2) != COMPONENT_REF)
3747 continue;
3749 /* The zeroth operand of the two component references must be
3750 identical. It is not sufficient to compare get_base_address of
3751 the two references, because this could allow for different
3752 elements of the same array in the two trees. It is not safe to
3753 assume that the existence of one array element implies the
3754 existence of a different one. */
3755 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3756 continue;
3758 field1 = TREE_OPERAND (ref1, 1);
3759 field2 = TREE_OPERAND (ref2, 1);
3761 /* Check for field adjacency, and ensure field1 comes first. */
3762 for (next = DECL_CHAIN (field1);
3763 next && TREE_CODE (next) != FIELD_DECL;
3764 next = DECL_CHAIN (next))
3767 if (next != field2)
3769 for (next = DECL_CHAIN (field2);
3770 next && TREE_CODE (next) != FIELD_DECL;
3771 next = DECL_CHAIN (next))
3774 if (next != field1)
3775 continue;
3777 std::swap (field1, field2);
3778 std::swap (def1, def2);
3781 bb_for_def1 = gimple_bb (def1);
3782 bb_for_def2 = gimple_bb (def2);
3784 /* Check for proper alignment of the first field. */
3785 tree_offset1 = bit_position (field1);
3786 tree_offset2 = bit_position (field2);
3787 tree_size2 = DECL_SIZE (field2);
3789 if (!tree_fits_uhwi_p (tree_offset1)
3790 || !tree_fits_uhwi_p (tree_offset2)
3791 || !tree_fits_uhwi_p (tree_size2))
3792 continue;
3794 offset1 = tree_to_uhwi (tree_offset1);
3795 offset2 = tree_to_uhwi (tree_offset2);
3796 size2 = tree_to_uhwi (tree_size2);
3797 align1 = DECL_ALIGN (field1) % param_align_bits;
3799 if (offset1 % BITS_PER_UNIT != 0)
3800 continue;
3802 /* For profitability, the two field references should fit within
3803 a single cache line. */
3804 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3805 continue;
3807 /* The two expressions cannot be dependent upon vdefs defined
3808 in bb1/bb2. */
3809 if (local_mem_dependence (def1, bb_for_def1)
3810 || local_mem_dependence (def2, bb_for_def2))
3811 continue;
3813 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3814 bb0. We hoist the first one first so that a cache miss is handled
3815 efficiently regardless of hardware cache-fill policy. */
3816 gsi2 = gsi_for_stmt (def1);
3817 gsi_move_to_bb_end (&gsi2, bb0);
3818 gsi2 = gsi_for_stmt (def2);
3819 gsi_move_to_bb_end (&gsi2, bb0);
3820 statistics_counter_event (cfun, "hoisted loads", 1);
3822 if (dump_file && (dump_flags & TDF_DETAILS))
3824 fprintf (dump_file,
3825 "\nHoisting adjacent loads from %d and %d into %d: \n",
3826 bb_for_def1->index, bb_for_def2->index, bb0->index);
3827 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3828 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3833 /* Determine whether we should attempt to hoist adjacent loads out of
3834 diamond patterns in pass_phiopt. Always hoist loads if
3835 -fhoist-adjacent-loads is specified and the target machine has
3836 both a conditional move instruction and a defined cache line size. */
3838 static bool
3839 gate_hoist_loads (void)
3841 return (flag_hoist_adjacent_loads == 1
3842 && param_l1_cache_line_size
3843 && HAVE_conditional_move);
3846 /* This pass tries to replaces an if-then-else block with an
3847 assignment. We have different kinds of transformations.
3848 Some of these transformations are also performed by the ifcvt
3849 RTL optimizer.
3851 PHI-OPT using Match-and-simplify infrastructure
3852 -----------------------
3854 The PHI-OPT pass will try to use match-and-simplify infrastructure
3855 (gimple_simplify) to do transformations. This is implemented in
3856 match_simplify_replacement.
3858 The way it works is it replaces:
3859 bb0:
3860 if (cond) goto bb2; else goto bb1;
3861 bb1:
3862 bb2:
3863 x = PHI <a (bb1), b (bb0), ...>;
3865 with a statement if it gets simplified from `cond ? b : a`.
3867 bb0:
3868 x1 = cond ? b : a;
3869 bb2:
3870 x = PHI <a (bb1), x1 (bb0), ...>;
3871 Bb1 might be removed as it becomes unreachable when doing the replacement.
3872 Though bb1 does not have to be considered a forwarding basic block from bb0.
3874 Will try to see if `(!cond) ? a : b` gets simplified (iff !cond simplifies);
3875 this is done not to have an explosion of patterns in match.pd.
3876 Note bb1 does not need to be completely empty, it can contain
3877 one statement which is known not to trap.
3879 It also can handle the case where we have two forwarding bbs (diamond):
3880 bb0:
3881 if (cond) goto bb2; else goto bb1;
3882 bb1: goto bb3;
3883 bb2: goto bb3;
3884 bb3:
3885 x = PHI <a (bb1), b (bb2), ...>;
3886 And that is replaced with a statement if it is simplified
3887 from `cond ? b : a`.
3888 Again bb1 and bb2 does not have to be completely empty but
3889 each can contain one statement which is known not to trap.
3890 But in this case bb1/bb2 can only be forwarding basic blocks.
3892 This fully replaces the old "Conditional Replacement",
3893 "ABS Replacement" transformations as they are now
3894 implmeneted in match.pd.
3895 Some parts of the "MIN/MAX Replacement" are re-implemented in match.pd.
3897 Value Replacement
3898 -----------------
3900 This transformation, implemented in value_replacement, replaces
3902 bb0:
3903 if (a != b) goto bb2; else goto bb1;
3904 bb1:
3905 bb2:
3906 x = PHI <a (bb1), b (bb0), ...>;
3908 with
3910 bb0:
3911 bb2:
3912 x = PHI <b (bb0), ...>;
3914 This opportunity can sometimes occur as a result of other
3915 optimizations.
3918 Another case caught by value replacement looks like this:
3920 bb0:
3921 t1 = a == CONST;
3922 t2 = b > c;
3923 t3 = t1 & t2;
3924 if (t3 != 0) goto bb1; else goto bb2;
3925 bb1:
3926 bb2:
3927 x = PHI (CONST, a)
3929 Gets replaced with:
3930 bb0:
3931 bb2:
3932 t1 = a == CONST;
3933 t2 = b > c;
3934 t3 = t1 & t2;
3935 x = a;
3937 MIN/MAX Replacement
3938 -------------------
3940 This transformation, minmax_replacement replaces
3942 bb0:
3943 if (a <= b) goto bb2; else goto bb1;
3944 bb1:
3945 bb2:
3946 x = PHI <b (bb1), a (bb0), ...>;
3948 with
3950 bb0:
3951 x' = MIN_EXPR (a, b)
3952 bb2:
3953 x = PHI <x' (bb0), ...>;
3955 A similar transformation is done for MAX_EXPR.
3958 This pass also performs a fifth transformation of a slightly different
3959 flavor.
3961 Factor operations in COND_EXPR
3962 ------------------------------
3964 This transformation factors the unary operations out of COND_EXPR with
3965 factor_out_conditional_operation.
3967 For example:
3968 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3969 <bb 3>:
3970 tmp = (int) a;
3971 <bb 4>:
3972 tmp = PHI <tmp, CST>
3974 Into:
3975 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3976 <bb 3>:
3977 <bb 4>:
3978 a = PHI <a, CST>
3979 tmp = (int) a;
3981 Adjacent Load Hoisting
3982 ----------------------
3984 This transformation replaces
3986 bb0:
3987 if (...) goto bb2; else goto bb1;
3988 bb1:
3989 x1 = (<expr>).field1;
3990 goto bb3;
3991 bb2:
3992 x2 = (<expr>).field2;
3993 bb3:
3994 # x = PHI <x1, x2>;
3996 with
3998 bb0:
3999 x1 = (<expr>).field1;
4000 x2 = (<expr>).field2;
4001 if (...) goto bb2; else goto bb1;
4002 bb1:
4003 goto bb3;
4004 bb2:
4005 bb3:
4006 # x = PHI <x1, x2>;
4008 The purpose of this transformation is to enable generation of conditional
4009 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
4010 the loads is speculative, the transformation is restricted to very
4011 specific cases to avoid introducing a page fault. We are looking for
4012 the common idiom:
4014 if (...)
4015 x = y->left;
4016 else
4017 x = y->right;
4019 where left and right are typically adjacent pointers in a tree structure. */
4021 namespace {
4023 const pass_data pass_data_phiopt =
4025 GIMPLE_PASS, /* type */
4026 "phiopt", /* name */
4027 OPTGROUP_NONE, /* optinfo_flags */
4028 TV_TREE_PHIOPT, /* tv_id */
4029 ( PROP_cfg | PROP_ssa ), /* properties_required */
4030 0, /* properties_provided */
4031 0, /* properties_destroyed */
4032 0, /* todo_flags_start */
4033 0, /* todo_flags_finish */
4036 class pass_phiopt : public gimple_opt_pass
4038 public:
4039 pass_phiopt (gcc::context *ctxt)
4040 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
4043 /* opt_pass methods: */
4044 opt_pass * clone () final override { return new pass_phiopt (m_ctxt); }
4045 void set_pass_param (unsigned n, bool param) final override
4047 gcc_assert (n == 0);
4048 early_p = param;
4050 bool gate (function *) final override { return flag_ssa_phiopt; }
4051 unsigned int execute (function *) final override;
4053 private:
4054 bool early_p;
4055 }; // class pass_phiopt
4057 } // anon namespace
4059 gimple_opt_pass *
4060 make_pass_phiopt (gcc::context *ctxt)
4062 return new pass_phiopt (ctxt);
4065 unsigned int
4066 pass_phiopt::execute (function *)
4068 bool do_hoist_loads = !early_p ? gate_hoist_loads () : false;
4069 basic_block bb;
4070 basic_block *bb_order;
4071 unsigned n, i;
4072 bool cfgchanged = false;
4074 calculate_dominance_info (CDI_DOMINATORS);
4075 mark_ssa_maybe_undefs ();
4077 /* Search every basic block for COND_EXPR we may be able to optimize.
4079 We walk the blocks in order that guarantees that a block with
4080 a single predecessor is processed before the predecessor.
4081 This ensures that we collapse inner ifs before visiting the
4082 outer ones, and also that we do not try to visit a removed
4083 block. */
4084 bb_order = single_pred_before_succ_order ();
4085 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4087 for (i = 0; i < n; i++)
4089 gphi *phi;
4090 basic_block bb1, bb2;
4091 edge e1, e2;
4092 tree arg0, arg1;
4093 bool diamond_p = false;
4095 bb = bb_order[i];
4097 /* Check to see if the last statement is a GIMPLE_COND. */
4098 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4099 if (!cond_stmt)
4100 continue;
4102 e1 = EDGE_SUCC (bb, 0);
4103 bb1 = e1->dest;
4104 e2 = EDGE_SUCC (bb, 1);
4105 bb2 = e2->dest;
4107 /* We cannot do the optimization on abnormal edges. */
4108 if ((e1->flags & EDGE_ABNORMAL) != 0
4109 || (e2->flags & EDGE_ABNORMAL) != 0)
4110 continue;
4112 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4113 if (EDGE_COUNT (bb1->succs) == 0
4114 || EDGE_COUNT (bb2->succs) == 0)
4115 continue;
4117 /* Find the bb which is the fall through to the other. */
4118 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4120 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4122 std::swap (bb1, bb2);
4123 std::swap (e1, e2);
4125 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4126 && single_succ_p (bb2))
4128 diamond_p = true;
4129 e2 = EDGE_SUCC (bb2, 0);
4130 /* Make sure bb2 is just a fall through. */
4131 if ((e2->flags & EDGE_FALLTHRU) == 0)
4132 continue;
4134 else
4135 continue;
4137 e1 = EDGE_SUCC (bb1, 0);
4139 /* Make sure that bb1 is just a fall through. */
4140 if (!single_succ_p (bb1)
4141 || (e1->flags & EDGE_FALLTHRU) == 0)
4142 continue;
4144 if (diamond_p)
4146 basic_block bb3 = e1->dest;
4148 if (!single_pred_p (bb1)
4149 || !single_pred_p (bb2))
4150 continue;
4152 if (do_hoist_loads
4153 && !FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
4154 && EDGE_COUNT (bb->succs) == 2
4155 && EDGE_COUNT (bb3->preds) == 2
4156 /* If one edge or the other is dominant, a conditional move
4157 is likely to perform worse than the well-predicted branch. */
4158 && !predictable_edge_p (EDGE_SUCC (bb, 0))
4159 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
4160 hoist_adjacent_loads (bb, bb1, bb2, bb3);
4163 gimple_stmt_iterator gsi;
4164 bool candorest = true;
4166 /* Check that we're looking for nested phis. */
4167 basic_block merge = diamond_p ? EDGE_SUCC (bb2, 0)->dest : bb2;
4168 gimple_seq phis = phi_nodes (merge);
4170 /* Value replacement can work with more than one PHI
4171 so try that first. */
4172 if (!early_p && !diamond_p)
4173 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4175 phi = as_a <gphi *> (gsi_stmt (gsi));
4176 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4177 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4178 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
4180 candorest = false;
4181 cfgchanged = true;
4182 break;
4186 if (!candorest)
4187 continue;
4189 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
4190 if (!phi)
4191 continue;
4193 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4194 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4196 /* Something is wrong if we cannot find the arguments in the PHI
4197 node. */
4198 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4200 if (single_pred_p (bb1)
4201 && EDGE_COUNT (merge->preds) == 2)
4203 gphi *newphi = phi;
4204 while (newphi)
4206 phi = newphi;
4207 /* factor_out_conditional_operation may create a new PHI in
4208 BB2 and eliminate an existing PHI in BB2. Recompute values
4209 that may be affected by that change. */
4210 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4211 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4212 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4213 newphi = factor_out_conditional_operation (e1, e2, phi,
4214 arg0, arg1,
4215 cond_stmt);
4219 /* Do the replacement of conditional if it can be done. */
4220 if (match_simplify_replacement (bb, bb1, bb2, e1, e2, phi,
4221 arg0, arg1, early_p, diamond_p))
4222 cfgchanged = true;
4223 else if (!early_p
4224 && !diamond_p
4225 && single_pred_p (bb1)
4226 && cond_removal_in_builtin_zero_pattern (bb, bb1, e1, e2,
4227 phi, arg0, arg1))
4228 cfgchanged = true;
4229 else if (minmax_replacement (bb, bb1, bb2, e1, e2, phi, arg0, arg1,
4230 diamond_p))
4231 cfgchanged = true;
4232 else if (single_pred_p (bb1)
4233 && !diamond_p
4234 && spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
4235 cfgchanged = true;
4238 free (bb_order);
4240 if (cfgchanged)
4241 return TODO_cleanup_cfg;
4242 return 0;
4245 /* This pass tries to transform conditional stores into unconditional
4246 ones, enabling further simplifications with the simpler then and else
4247 blocks. In particular it replaces this:
4249 bb0:
4250 if (cond) goto bb2; else goto bb1;
4251 bb1:
4252 *p = RHS;
4253 bb2:
4255 with
4257 bb0:
4258 if (cond) goto bb1; else goto bb2;
4259 bb1:
4260 condtmp' = *p;
4261 bb2:
4262 condtmp = PHI <RHS, condtmp'>
4263 *p = condtmp;
4265 This transformation can only be done under several constraints,
4266 documented below. It also replaces:
4268 bb0:
4269 if (cond) goto bb2; else goto bb1;
4270 bb1:
4271 *p = RHS1;
4272 goto bb3;
4273 bb2:
4274 *p = RHS2;
4275 bb3:
4277 with
4279 bb0:
4280 if (cond) goto bb3; else goto bb1;
4281 bb1:
4282 bb3:
4283 condtmp = PHI <RHS1, RHS2>
4284 *p = condtmp; */
4286 namespace {
4288 const pass_data pass_data_cselim =
4290 GIMPLE_PASS, /* type */
4291 "cselim", /* name */
4292 OPTGROUP_NONE, /* optinfo_flags */
4293 TV_TREE_PHIOPT, /* tv_id */
4294 ( PROP_cfg | PROP_ssa ), /* properties_required */
4295 0, /* properties_provided */
4296 0, /* properties_destroyed */
4297 0, /* todo_flags_start */
4298 0, /* todo_flags_finish */
4301 class pass_cselim : public gimple_opt_pass
4303 public:
4304 pass_cselim (gcc::context *ctxt)
4305 : gimple_opt_pass (pass_data_cselim, ctxt)
4308 /* opt_pass methods: */
4309 bool gate (function *) final override { return flag_tree_cselim; }
4310 unsigned int execute (function *) final override;
4312 }; // class pass_cselim
4314 } // anon namespace
4316 gimple_opt_pass *
4317 make_pass_cselim (gcc::context *ctxt)
4319 return new pass_cselim (ctxt);
4322 unsigned int
4323 pass_cselim::execute (function *)
4325 basic_block bb;
4326 basic_block *bb_order;
4327 unsigned n, i;
4328 bool cfgchanged = false;
4329 hash_set<tree> *nontrap = 0;
4330 unsigned todo = 0;
4332 /* ??? We are not interested in loop related info, but the following
4333 will create it, ICEing as we didn't init loops with pre-headers.
4334 An interfacing issue of find_data_references_in_bb. */
4335 loop_optimizer_init (LOOPS_NORMAL);
4336 scev_initialize ();
4338 calculate_dominance_info (CDI_DOMINATORS);
4340 /* Calculate the set of non-trapping memory accesses. */
4341 nontrap = get_non_trapping ();
4343 /* Search every basic block for COND_EXPR we may be able to optimize.
4345 We walk the blocks in order that guarantees that a block with
4346 a single predecessor is processed before the predecessor.
4347 This ensures that we collapse inner ifs before visiting the
4348 outer ones, and also that we do not try to visit a removed
4349 block. */
4350 bb_order = single_pred_before_succ_order ();
4351 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4353 for (i = 0; i < n; i++)
4355 basic_block bb1, bb2;
4356 edge e1, e2;
4357 bool diamond_p = false;
4359 bb = bb_order[i];
4361 /* Check to see if the last statement is a GIMPLE_COND. */
4362 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4363 if (!cond_stmt)
4364 continue;
4366 e1 = EDGE_SUCC (bb, 0);
4367 bb1 = e1->dest;
4368 e2 = EDGE_SUCC (bb, 1);
4369 bb2 = e2->dest;
4371 /* We cannot do the optimization on abnormal edges. */
4372 if ((e1->flags & EDGE_ABNORMAL) != 0
4373 || (e2->flags & EDGE_ABNORMAL) != 0)
4374 continue;
4376 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4377 if (EDGE_COUNT (bb1->succs) == 0
4378 || EDGE_COUNT (bb2->succs) == 0)
4379 continue;
4381 /* Find the bb which is the fall through to the other. */
4382 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4384 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4386 std::swap (bb1, bb2);
4387 std::swap (e1, e2);
4389 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4390 && single_succ_p (bb2))
4392 diamond_p = true;
4393 e2 = EDGE_SUCC (bb2, 0);
4394 /* Make sure bb2 is just a fall through. */
4395 if ((e2->flags & EDGE_FALLTHRU) == 0)
4396 continue;
4398 else
4399 continue;
4401 e1 = EDGE_SUCC (bb1, 0);
4403 /* Make sure that bb1 is just a fall through. */
4404 if (!single_succ_p (bb1)
4405 || (e1->flags & EDGE_FALLTHRU) == 0)
4406 continue;
4408 if (diamond_p)
4410 basic_block bb3 = e1->dest;
4412 /* Only handle sinking of store from 2 bbs only,
4413 The middle bbs don't need to come from the
4414 if always since we are sinking rather than
4415 hoisting. */
4416 if (EDGE_COUNT (bb3->preds) != 2)
4417 continue;
4418 if (cond_if_else_store_replacement (bb1, bb2, bb3))
4419 cfgchanged = true;
4420 continue;
4423 /* Also make sure that bb1 only have one predecessor and that it
4424 is bb. */
4425 if (!single_pred_p (bb1)
4426 || single_pred (bb1) != bb)
4427 continue;
4429 /* bb1 is the middle block, bb2 the join block, bb the split block,
4430 e1 the fallthrough edge from bb1 to bb2. We can't do the
4431 optimization if the join block has more than two predecessors. */
4432 if (EDGE_COUNT (bb2->preds) > 2)
4433 continue;
4434 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
4435 cfgchanged = true;
4438 free (bb_order);
4440 delete nontrap;
4441 /* If the CFG has changed, we should cleanup the CFG. */
4442 if (cfgchanged)
4444 /* In cond-store replacement we have added some loads on edges
4445 and new VOPS (as we moved the store, and created a load). */
4446 gsi_commit_edge_inserts ();
4447 todo = TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4449 scev_finalize ();
4450 loop_optimizer_finalize ();
4451 return todo;