Require target lra in gcc.dg/pr108095.c
[official-gcc.git] / gcc / tree-ssa-phiopt.cc
blob312a6f9082b63d1d50e98c6d13dafc1824ff6c66
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "tree-ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-dfa.h"
43 #include "domwalk.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
49 #include "tree-eh.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
54 #include "dbgcnt.h"
55 #include "tree-ssa-propagate.h"
56 #include "tree-ssa-dce.h"
58 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
60 static gphi *
61 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
63 gimple_stmt_iterator i;
64 gphi *phi = NULL;
65 if (gimple_seq_singleton_p (seq))
67 phi = as_a <gphi *> (gsi_stmt (gsi_start (seq)));
68 /* Never return virtual phis. */
69 if (virtual_operand_p (gimple_phi_result (phi)))
70 return NULL;
71 return phi;
73 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
75 gphi *p = as_a <gphi *> (gsi_stmt (i));
76 /* If the PHI arguments are equal then we can skip this PHI. */
77 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
78 gimple_phi_arg_def (p, e1->dest_idx)))
79 continue;
81 /* Punt on virtual phis with different arguments from the edges. */
82 if (virtual_operand_p (gimple_phi_result (p)))
83 return NULL;
85 /* If we already have a PHI that has the two edge arguments are
86 different, then return it is not a singleton for these PHIs. */
87 if (phi)
88 return NULL;
90 phi = p;
92 return phi;
95 /* Replace PHI node element whose edge is E in block BB with variable NEW.
96 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
97 is known to have two edges, one of which must reach BB). */
99 static void
100 replace_phi_edge_with_variable (basic_block cond_block,
101 edge e, gphi *phi, tree new_tree,
102 bitmap dce_ssa_names = nullptr)
104 basic_block bb = gimple_bb (phi);
105 gimple_stmt_iterator gsi;
106 tree phi_result = PHI_RESULT (phi);
107 bool deleteboth = false;
109 /* Duplicate range info if they are the only things setting the target PHI.
110 This is needed as later on, the new_tree will be replacing
111 The assignement of the PHI.
112 For an example:
113 bb1:
114 _4 = min<a_1, 255>
115 goto bb2
117 # RANGE [-INF, 255]
118 a_3 = PHI<_4(1)>
119 bb3:
121 use(a_3)
122 And _4 gets propagated into the use of a_3 and losing the range info.
123 This can't be done for more than 2 incoming edges as the propagation
124 won't happen.
125 The new_tree needs to be defined in the same basic block as the conditional. */
126 if (TREE_CODE (new_tree) == SSA_NAME
127 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
128 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
129 && !SSA_NAME_RANGE_INFO (new_tree)
130 && SSA_NAME_RANGE_INFO (phi_result)
131 && gimple_bb (SSA_NAME_DEF_STMT (new_tree)) == cond_block
132 && dbg_cnt (phiopt_edge_range))
133 duplicate_ssa_name_range_info (new_tree, phi_result);
135 /* Change the PHI argument to new. */
136 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
138 /* Remove the empty basic block. */
139 edge edge_to_remove = NULL, keep_edge = NULL;
140 if (EDGE_SUCC (cond_block, 0)->dest == bb)
142 edge_to_remove = EDGE_SUCC (cond_block, 1);
143 keep_edge = EDGE_SUCC (cond_block, 0);
145 else if (EDGE_SUCC (cond_block, 1)->dest == bb)
147 edge_to_remove = EDGE_SUCC (cond_block, 0);
148 keep_edge = EDGE_SUCC (cond_block, 1);
150 else if ((keep_edge = find_edge (cond_block, e->src)))
152 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
153 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
154 if (single_pred_p (bb1) && single_pred_p (bb2)
155 && single_succ_p (bb1) && single_succ_p (bb2)
156 && empty_block_p (bb1) && empty_block_p (bb2))
157 deleteboth = true;
159 else
160 gcc_unreachable ();
162 if (edge_to_remove && EDGE_COUNT (edge_to_remove->dest->preds) == 1)
164 e->flags |= EDGE_FALLTHRU;
165 e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
166 e->probability = profile_probability::always ();
167 delete_basic_block (edge_to_remove->dest);
169 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
170 gsi = gsi_last_bb (cond_block);
171 gsi_remove (&gsi, true);
173 else if (deleteboth)
175 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
176 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
178 edge newedge = redirect_edge_and_branch (keep_edge, bb);
180 /* The new edge should be the same. */
181 gcc_assert (newedge == keep_edge);
183 keep_edge->flags |= EDGE_FALLTHRU;
184 keep_edge->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
185 keep_edge->probability = profile_probability::always ();
187 /* Copy the edge's phi entry from the old one. */
188 copy_phi_arg_into_existing_phi (e, keep_edge);
190 /* Delete the old 2 empty basic blocks */
191 delete_basic_block (bb1);
192 delete_basic_block (bb2);
194 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
195 gsi = gsi_last_bb (cond_block);
196 gsi_remove (&gsi, true);
198 else
200 /* If there are other edges into the middle block make
201 CFG cleanup deal with the edge removal to avoid
202 updating dominators here in a non-trivial way. */
203 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_block));
204 if (keep_edge->flags & EDGE_FALSE_VALUE)
205 gimple_cond_make_false (cond);
206 else if (keep_edge->flags & EDGE_TRUE_VALUE)
207 gimple_cond_make_true (cond);
210 if (dce_ssa_names)
211 simple_dce_from_worklist (dce_ssa_names);
213 statistics_counter_event (cfun, "Replace PHI with variable", 1);
215 if (dump_file && (dump_flags & TDF_DETAILS))
216 fprintf (dump_file,
217 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
218 cond_block->index,
219 bb->index);
222 /* PR66726: Factor operations out of COND_EXPR. If the arguments of the PHI
223 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
224 to the result of PHI stmt. COND_STMT is the controlling predicate.
225 Return the newly-created PHI, if any. */
227 static gphi *
228 factor_out_conditional_operation (edge e0, edge e1, gphi *phi,
229 tree arg0, tree arg1, gimple *cond_stmt)
231 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
232 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
233 tree temp, result;
234 gphi *newphi;
235 gimple_stmt_iterator gsi, gsi_for_def;
236 location_t locus = gimple_location (phi);
237 enum tree_code op_code;
239 /* Handle only PHI statements with two arguments. TODO: If all
240 other arguments to PHI are INTEGER_CST or if their defining
241 statement have the same unary operation, we can handle more
242 than two arguments too. */
243 if (gimple_phi_num_args (phi) != 2)
244 return NULL;
246 /* First canonicalize to simplify tests. */
247 if (TREE_CODE (arg0) != SSA_NAME)
249 std::swap (arg0, arg1);
250 std::swap (e0, e1);
253 if (TREE_CODE (arg0) != SSA_NAME
254 || (TREE_CODE (arg1) != SSA_NAME
255 && TREE_CODE (arg1) != INTEGER_CST))
256 return NULL;
258 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
259 an unary operation. */
260 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
261 if (!is_gimple_assign (arg0_def_stmt)
262 || (gimple_assign_rhs_class (arg0_def_stmt) != GIMPLE_UNARY_RHS
263 && gimple_assign_rhs_code (arg0_def_stmt) != VIEW_CONVERT_EXPR))
264 return NULL;
266 /* Use the RHS as new_arg0. */
267 op_code = gimple_assign_rhs_code (arg0_def_stmt);
268 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
269 if (op_code == VIEW_CONVERT_EXPR)
271 new_arg0 = TREE_OPERAND (new_arg0, 0);
272 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
273 return NULL;
275 if (TREE_CODE (new_arg0) == SSA_NAME
276 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
277 return NULL;
279 if (TREE_CODE (arg1) == SSA_NAME)
281 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
282 is an unary operation. */
283 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
284 if (!is_gimple_assign (arg1_def_stmt)
285 || gimple_assign_rhs_code (arg1_def_stmt) != op_code)
286 return NULL;
288 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
289 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
290 && dominated_by_p (CDI_DOMINATORS,
291 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
292 return NULL;
294 /* Use the RHS as new_arg1. */
295 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
296 if (op_code == VIEW_CONVERT_EXPR)
297 new_arg1 = TREE_OPERAND (new_arg1, 0);
298 if (TREE_CODE (new_arg1) == SSA_NAME
299 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
300 return NULL;
302 else
304 /* TODO: handle more than just casts here. */
305 if (!gimple_assign_cast_p (arg0_def_stmt))
306 return NULL;
308 /* arg0_def_stmt should be conditional. */
309 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
310 return NULL;
311 /* If arg1 is an INTEGER_CST, fold it to new type. */
312 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
313 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
315 if (gimple_assign_cast_p (arg0_def_stmt))
317 /* For the INTEGER_CST case, we are just moving the
318 conversion from one place to another, which can often
319 hurt as the conversion moves further away from the
320 statement that computes the value. So, perform this
321 only if new_arg0 is an operand of COND_STMT, or
322 if arg0_def_stmt is the only non-debug stmt in
323 its basic block, because then it is possible this
324 could enable further optimizations (minmax replacement
325 etc.). See PR71016. */
326 if (new_arg0 != gimple_cond_lhs (cond_stmt)
327 && new_arg0 != gimple_cond_rhs (cond_stmt)
328 && gimple_bb (arg0_def_stmt) == e0->src)
330 gsi = gsi_for_stmt (arg0_def_stmt);
331 gsi_prev_nondebug (&gsi);
332 if (!gsi_end_p (gsi))
334 if (gassign *assign
335 = dyn_cast <gassign *> (gsi_stmt (gsi)))
337 tree lhs = gimple_assign_lhs (assign);
338 enum tree_code ass_code
339 = gimple_assign_rhs_code (assign);
340 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
341 return NULL;
342 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
343 return NULL;
344 gsi_prev_nondebug (&gsi);
345 if (!gsi_end_p (gsi))
346 return NULL;
348 else
349 return NULL;
351 gsi = gsi_for_stmt (arg0_def_stmt);
352 gsi_next_nondebug (&gsi);
353 if (!gsi_end_p (gsi))
354 return NULL;
356 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
358 else
359 return NULL;
361 else
362 return NULL;
365 /* If arg0/arg1 have > 1 use, then this transformation actually increases
366 the number of expressions evaluated at runtime. */
367 if (!has_single_use (arg0)
368 || (arg1_def_stmt && !has_single_use (arg1)))
369 return NULL;
371 /* If types of new_arg0 and new_arg1 are different bailout. */
372 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
373 return NULL;
375 /* Create a new PHI stmt. */
376 result = PHI_RESULT (phi);
377 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
378 newphi = create_phi_node (temp, gimple_bb (phi));
380 if (dump_file && (dump_flags & TDF_DETAILS))
382 fprintf (dump_file, "PHI ");
383 print_generic_expr (dump_file, gimple_phi_result (phi));
384 fprintf (dump_file,
385 " changed to factor operation out from COND_EXPR.\n");
386 fprintf (dump_file, "New stmt with OPERATION that defines ");
387 print_generic_expr (dump_file, result);
388 fprintf (dump_file, ".\n");
391 /* Remove the old operation(s) that has single use. */
392 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
393 gsi_remove (&gsi_for_def, true);
394 release_defs (arg0_def_stmt);
396 if (arg1_def_stmt)
398 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
399 gsi_remove (&gsi_for_def, true);
400 release_defs (arg1_def_stmt);
403 add_phi_arg (newphi, new_arg0, e0, locus);
404 add_phi_arg (newphi, new_arg1, e1, locus);
406 /* Create the operation stmt and insert it. */
407 if (op_code == VIEW_CONVERT_EXPR)
409 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
410 new_stmt = gimple_build_assign (result, temp);
412 else
413 new_stmt = gimple_build_assign (result, op_code, temp);
414 gsi = gsi_after_labels (gimple_bb (phi));
415 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
417 /* Remove the original PHI stmt. */
418 gsi = gsi_for_stmt (phi);
419 gsi_remove (&gsi, true);
421 statistics_counter_event (cfun, "factored out operation", 1);
423 return newphi;
427 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
428 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
429 static bool
430 phiopt_early_allow (gimple_seq &seq, gimple_match_op &op)
432 /* Don't allow functions. */
433 if (!op.code.is_tree_code ())
434 return false;
435 tree_code code = (tree_code)op.code;
437 /* For non-empty sequence, only allow one statement
438 except for MIN/MAX, allow max 2 statements,
439 each with MIN/MAX. */
440 if (!gimple_seq_empty_p (seq))
442 if (code == MIN_EXPR || code == MAX_EXPR)
444 if (!gimple_seq_singleton_p (seq))
445 return false;
447 gimple *stmt = gimple_seq_first_stmt (seq);
448 /* Only allow assignments. */
449 if (!is_gimple_assign (stmt))
450 return false;
451 code = gimple_assign_rhs_code (stmt);
452 return code == MIN_EXPR || code == MAX_EXPR;
454 /* Check to make sure op was already a SSA_NAME. */
455 if (code != SSA_NAME)
456 return false;
457 if (!gimple_seq_singleton_p (seq))
458 return false;
459 gimple *stmt = gimple_seq_first_stmt (seq);
460 /* Only allow assignments. */
461 if (!is_gimple_assign (stmt))
462 return false;
463 if (gimple_assign_lhs (stmt) != op.ops[0])
464 return false;
465 code = gimple_assign_rhs_code (stmt);
468 switch (code)
470 case MIN_EXPR:
471 case MAX_EXPR:
472 case ABS_EXPR:
473 case ABSU_EXPR:
474 case NEGATE_EXPR:
475 case SSA_NAME:
476 return true;
477 case INTEGER_CST:
478 case REAL_CST:
479 case VECTOR_CST:
480 case FIXED_CST:
481 return true;
482 default:
483 return false;
487 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
488 Return NULL if nothing can be simplified or the resulting simplified value
489 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
490 if EARLY_P is set.
491 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
492 to simplify CMP ? ARG0 : ARG1.
493 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
494 static tree
495 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
496 tree arg0, tree arg1,
497 gimple_seq *seq)
499 gimple_seq seq1 = NULL;
500 enum tree_code comp_code = gimple_cond_code (comp_stmt);
501 location_t loc = gimple_location (comp_stmt);
502 tree cmp0 = gimple_cond_lhs (comp_stmt);
503 tree cmp1 = gimple_cond_rhs (comp_stmt);
504 /* To handle special cases like floating point comparison, it is easier and
505 less error-prone to build a tree and gimplify it on the fly though it is
506 less efficient.
507 Don't use fold_build2 here as that might create (bool)a instead of just
508 "a != 0". */
509 tree cond = build2_loc (loc, comp_code, boolean_type_node,
510 cmp0, cmp1);
512 if (dump_file && (dump_flags & TDF_FOLDING))
514 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
515 print_generic_expr (dump_file, cond);
516 fprintf (dump_file, " ? ");
517 print_generic_expr (dump_file, arg0);
518 fprintf (dump_file, " : ");
519 print_generic_expr (dump_file, arg1);
520 fprintf (dump_file, "\n");
523 gimple_match_op op (gimple_match_cond::UNCOND,
524 COND_EXPR, type, cond, arg0, arg1);
526 if (op.resimplify (&seq1, follow_all_ssa_edges))
528 bool allowed = !early_p || phiopt_early_allow (seq1, op);
529 tree result = maybe_push_res_to_seq (&op, &seq1);
530 if (dump_file && (dump_flags & TDF_FOLDING))
532 fprintf (dump_file, "\nphiopt match-simplify back:\n");
533 if (seq1)
534 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
535 fprintf (dump_file, "result: ");
536 if (result)
537 print_generic_expr (dump_file, result);
538 else
539 fprintf (dump_file, " (none)");
540 fprintf (dump_file, "\n");
541 if (!allowed)
542 fprintf (dump_file, "rejected because early\n");
544 /* Early we want only to allow some generated tree codes. */
545 if (allowed && result)
547 if (loc != UNKNOWN_LOCATION)
548 annotate_all_with_location (seq1, loc);
549 gimple_seq_add_seq_without_update (seq, seq1);
550 return result;
553 gimple_seq_discard (seq1);
554 seq1 = NULL;
556 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
557 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
559 if (comp_code == ERROR_MARK)
560 return NULL;
562 cond = build2_loc (loc,
563 comp_code, boolean_type_node,
564 cmp0, cmp1);
566 if (dump_file && (dump_flags & TDF_FOLDING))
568 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
569 print_generic_expr (dump_file, cond);
570 fprintf (dump_file, " ? ");
571 print_generic_expr (dump_file, arg1);
572 fprintf (dump_file, " : ");
573 print_generic_expr (dump_file, arg0);
574 fprintf (dump_file, "\n");
577 gimple_match_op op1 (gimple_match_cond::UNCOND,
578 COND_EXPR, type, cond, arg1, arg0);
580 if (op1.resimplify (&seq1, follow_all_ssa_edges))
582 bool allowed = !early_p || phiopt_early_allow (seq1, op1);
583 tree result = maybe_push_res_to_seq (&op1, &seq1);
584 if (dump_file && (dump_flags & TDF_FOLDING))
586 fprintf (dump_file, "\nphiopt match-simplify back:\n");
587 if (seq1)
588 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
589 fprintf (dump_file, "result: ");
590 if (result)
591 print_generic_expr (dump_file, result);
592 else
593 fprintf (dump_file, " (none)");
594 fprintf (dump_file, "\n");
595 if (!allowed)
596 fprintf (dump_file, "rejected because early\n");
598 /* Early we want only to allow some generated tree codes. */
599 if (allowed && result)
601 if (loc != UNKNOWN_LOCATION)
602 annotate_all_with_location (seq1, loc);
603 gimple_seq_add_seq_without_update (seq, seq1);
604 return result;
607 gimple_seq_discard (seq1);
609 return NULL;
612 /* empty_bb_or_one_feeding_into_p returns true if bb was empty basic block
613 or it has one cheap preparation statement that feeds into the PHI
614 statement and it sets STMT to that statement. */
615 static bool
616 empty_bb_or_one_feeding_into_p (basic_block bb,
617 gimple *phi,
618 gimple *&stmt)
620 stmt = nullptr;
621 gimple *stmt_to_move = nullptr;
622 tree lhs;
624 if (empty_block_p (bb))
625 return true;
627 if (!single_pred_p (bb))
628 return false;
630 /* The middle bb cannot have phi nodes as we don't
631 move those assignments yet. */
632 if (!gimple_seq_empty_p (phi_nodes (bb)))
633 return false;
635 gimple_stmt_iterator gsi;
637 gsi = gsi_start_nondebug_after_labels_bb (bb);
638 while (!gsi_end_p (gsi))
640 gimple *s = gsi_stmt (gsi);
641 gsi_next_nondebug (&gsi);
642 /* Skip over Predict and nop statements. */
643 if (gimple_code (s) == GIMPLE_PREDICT
644 || gimple_code (s) == GIMPLE_NOP)
645 continue;
646 /* If there is more one statement return false. */
647 if (stmt_to_move)
648 return false;
649 stmt_to_move = s;
652 /* The only statement here was a Predict or a nop statement
653 so return true. */
654 if (!stmt_to_move)
655 return true;
657 if (gimple_vuse (stmt_to_move))
658 return false;
660 if (gimple_could_trap_p (stmt_to_move)
661 || gimple_has_side_effects (stmt_to_move))
662 return false;
664 ssa_op_iter it;
665 tree use;
666 FOR_EACH_SSA_TREE_OPERAND (use, stmt_to_move, it, SSA_OP_USE)
667 if (ssa_name_maybe_undef_p (use))
668 return false;
670 /* Allow assignments but allow some builtin/internal calls.
671 As const calls don't match any of the above, yet they could
672 still have some side-effects - they could contain
673 gimple_could_trap_p statements, like floating point
674 exceptions or integer division by zero. See PR70586.
675 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
676 should handle this.
677 Allow some known builtin/internal calls that are known not to
678 trap: logical functions (e.g. bswap and bit counting). */
679 if (!is_gimple_assign (stmt_to_move))
681 if (!is_gimple_call (stmt_to_move))
682 return false;
683 combined_fn cfn = gimple_call_combined_fn (stmt_to_move);
684 switch (cfn)
686 default:
687 return false;
688 case CFN_BUILT_IN_BSWAP16:
689 case CFN_BUILT_IN_BSWAP32:
690 case CFN_BUILT_IN_BSWAP64:
691 case CFN_BUILT_IN_BSWAP128:
692 CASE_CFN_FFS:
693 CASE_CFN_PARITY:
694 CASE_CFN_POPCOUNT:
695 CASE_CFN_CLZ:
696 CASE_CFN_CTZ:
697 case CFN_BUILT_IN_CLRSB:
698 case CFN_BUILT_IN_CLRSBL:
699 case CFN_BUILT_IN_CLRSBLL:
700 lhs = gimple_call_lhs (stmt_to_move);
701 break;
704 else
705 lhs = gimple_assign_lhs (stmt_to_move);
707 gimple *use_stmt;
708 use_operand_p use_p;
710 /* Allow only a statement which feeds into the other stmt. */
711 if (!lhs || TREE_CODE (lhs) != SSA_NAME
712 || !single_imm_use (lhs, &use_p, &use_stmt)
713 || use_stmt != phi)
714 return false;
716 stmt = stmt_to_move;
717 return true;
720 /* Move STMT to before GSI and insert its defining
721 name into INSERTED_EXPRS bitmap. */
722 static void
723 move_stmt (gimple *stmt, gimple_stmt_iterator *gsi, auto_bitmap &inserted_exprs)
725 if (!stmt)
726 return;
727 if (dump_file && (dump_flags & TDF_DETAILS))
729 fprintf (dump_file, "statement un-sinked:\n");
730 print_gimple_stmt (dump_file, stmt, 0,
731 TDF_VOPS|TDF_MEMSYMS);
734 tree name = gimple_get_lhs (stmt);
735 // Mark the name to be renamed if there is one.
736 bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (name));
737 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt);
738 gsi_move_before (&gsi1, gsi);
739 reset_flow_sensitive_info (name);
742 /* RAII style class to temporarily remove flow sensitive
743 from ssa names defined by a gimple statement. */
744 class auto_flow_sensitive
746 public:
747 auto_flow_sensitive (gimple *s);
748 ~auto_flow_sensitive ();
749 private:
750 auto_vec<std::pair<tree, flow_sensitive_info_storage>, 2> stack;
753 /* Constructor for auto_flow_sensitive. Saves
754 off the ssa names' flow sensitive information
755 that was defined by gimple statement S and
756 resets it to be non-flow based ones. */
758 auto_flow_sensitive::auto_flow_sensitive (gimple *s)
760 if (!s)
761 return;
762 ssa_op_iter it;
763 tree def;
764 FOR_EACH_SSA_TREE_OPERAND (def, s, it, SSA_OP_DEF)
766 flow_sensitive_info_storage storage;
767 storage.save_and_clear (def);
768 stack.safe_push (std::make_pair (def, storage));
772 /* Deconstructor, restores the flow sensitive information
773 for the SSA names that had been saved off. */
775 auto_flow_sensitive::~auto_flow_sensitive ()
777 for (auto p : stack)
778 p.second.restore (p.first);
781 /* The function match_simplify_replacement does the main work of doing the
782 replacement using match and simplify. Return true if the replacement is done.
783 Otherwise return false.
784 BB is the basic block where the replacement is going to be done on. ARG0
785 is argument 0 from PHI. Likewise for ARG1. */
787 static bool
788 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
789 basic_block middle_bb_alt,
790 edge e0, edge e1, gphi *phi,
791 tree arg0, tree arg1, bool early_p,
792 bool threeway_p)
794 gimple *stmt;
795 gimple_stmt_iterator gsi;
796 edge true_edge, false_edge;
797 gimple_seq seq = NULL;
798 tree result;
799 gimple *stmt_to_move = NULL;
800 gimple *stmt_to_move_alt = NULL;
801 tree arg_true, arg_false;
803 /* Special case A ? B : B as this will always simplify to B. */
804 if (operand_equal_for_phi_arg_p (arg0, arg1))
805 return false;
807 /* If the basic block only has a cheap preparation statement,
808 allow it and move it once the transformation is done. */
809 if (!empty_bb_or_one_feeding_into_p (middle_bb, phi, stmt_to_move))
810 return false;
812 if (threeway_p
813 && middle_bb != middle_bb_alt
814 && !empty_bb_or_one_feeding_into_p (middle_bb_alt, phi,
815 stmt_to_move_alt))
816 return false;
818 /* At this point we know we have a GIMPLE_COND with two successors.
819 One successor is BB, the other successor is an empty block which
820 falls through into BB.
822 There is a single PHI node at the join point (BB).
824 So, given the condition COND, and the two PHI arguments, match and simplify
825 can happen on (COND) ? arg0 : arg1. */
827 stmt = last_nondebug_stmt (cond_bb);
829 /* We need to know which is the true edge and which is the false
830 edge so that we know when to invert the condition below. */
831 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
833 /* Forward the edges over the middle basic block. */
834 if (true_edge->dest == middle_bb)
835 true_edge = EDGE_SUCC (true_edge->dest, 0);
836 if (false_edge->dest == middle_bb)
837 false_edge = EDGE_SUCC (false_edge->dest, 0);
839 /* When THREEWAY_P then e1 will point to the edge of the final transition
840 from middle-bb to end. */
841 if (true_edge == e0)
843 if (!threeway_p)
844 gcc_assert (false_edge == e1);
845 arg_true = arg0;
846 arg_false = arg1;
848 else
850 gcc_assert (false_edge == e0);
851 if (!threeway_p)
852 gcc_assert (true_edge == e1);
853 arg_true = arg1;
854 arg_false = arg0;
857 /* Do not make conditional undefs unconditional. */
858 if ((TREE_CODE (arg_true) == SSA_NAME
859 && ssa_name_maybe_undef_p (arg_true))
860 || (TREE_CODE (arg_false) == SSA_NAME
861 && ssa_name_maybe_undef_p (arg_false)))
862 return false;
864 tree type = TREE_TYPE (gimple_phi_result (phi));
866 auto_flow_sensitive s1(stmt_to_move);
867 auto_flow_sensitive s_alt(stmt_to_move_alt);
869 result = gimple_simplify_phiopt (early_p, type, stmt,
870 arg_true, arg_false,
871 &seq);
874 if (!result)
875 return false;
876 if (dump_file && (dump_flags & TDF_FOLDING))
877 fprintf (dump_file, "accepted the phiopt match-simplify.\n");
879 auto_bitmap exprs_maybe_dce;
881 /* Mark the cond statements' lhs/rhs as maybe dce. */
882 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
883 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_lhs (stmt)))
884 bitmap_set_bit (exprs_maybe_dce,
885 SSA_NAME_VERSION (gimple_cond_lhs (stmt)));
886 if (TREE_CODE (gimple_cond_rhs (stmt)) == SSA_NAME
887 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_rhs (stmt)))
888 bitmap_set_bit (exprs_maybe_dce,
889 SSA_NAME_VERSION (gimple_cond_rhs (stmt)));
891 gsi = gsi_last_bb (cond_bb);
892 /* Insert the sequence generated from gimple_simplify_phiopt. */
893 if (seq)
895 // Mark the lhs of the new statements maybe for dce
896 gimple_stmt_iterator gsi1 = gsi_start (seq);
897 for (; !gsi_end_p (gsi1); gsi_next (&gsi1))
899 gimple *stmt = gsi_stmt (gsi1);
900 tree name = gimple_get_lhs (stmt);
901 if (name && TREE_CODE (name) == SSA_NAME)
902 bitmap_set_bit (exprs_maybe_dce, SSA_NAME_VERSION (name));
904 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
907 /* If there was a statement to move, move it to right before
908 the original conditional. */
909 move_stmt (stmt_to_move, &gsi, exprs_maybe_dce);
910 move_stmt (stmt_to_move_alt, &gsi, exprs_maybe_dce);
912 replace_phi_edge_with_variable (cond_bb, e1, phi, result, exprs_maybe_dce);
914 /* Add Statistic here even though replace_phi_edge_with_variable already
915 does it as we want to be able to count when match-simplify happens vs
916 the others. */
917 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
919 /* Note that we optimized this PHI. */
920 return true;
923 /* Update *ARG which is defined in STMT so that it contains the
924 computed value if that seems profitable. Return true if the
925 statement is made dead by that rewriting. */
927 static bool
928 jump_function_from_stmt (tree *arg, gimple *stmt)
930 enum tree_code code = gimple_assign_rhs_code (stmt);
931 if (code == ADDR_EXPR)
933 /* For arg = &p->i transform it to p, if possible. */
934 tree rhs1 = gimple_assign_rhs1 (stmt);
935 poly_int64 offset;
936 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
937 &offset);
938 if (tem
939 && TREE_CODE (tem) == MEM_REF
940 && known_eq (mem_ref_offset (tem) + offset, 0))
942 *arg = TREE_OPERAND (tem, 0);
943 return true;
946 /* TODO: Much like IPA-CP jump-functions we want to handle constant
947 additions symbolically here, and we'd need to update the comparison
948 code that compares the arg + cst tuples in our caller. For now the
949 code above exactly handles the VEC_BASE pattern from vec.h. */
950 return false;
953 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
954 of the form SSA_NAME NE 0.
956 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
957 the two input values of the EQ_EXPR match arg0 and arg1.
959 If so update *code and return TRUE. Otherwise return FALSE. */
961 static bool
962 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
963 enum tree_code *code, const_tree rhs)
965 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
966 statement. */
967 if (TREE_CODE (rhs) == SSA_NAME)
969 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
971 /* Verify the defining statement has an EQ_EXPR on the RHS. */
972 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
974 /* Finally verify the source operands of the EQ_EXPR are equal
975 to arg0 and arg1. */
976 tree op0 = gimple_assign_rhs1 (def1);
977 tree op1 = gimple_assign_rhs2 (def1);
978 if ((operand_equal_for_phi_arg_p (arg0, op0)
979 && operand_equal_for_phi_arg_p (arg1, op1))
980 || (operand_equal_for_phi_arg_p (arg0, op1)
981 && operand_equal_for_phi_arg_p (arg1, op0)))
983 /* We will perform the optimization. */
984 *code = gimple_assign_rhs_code (def1);
985 return true;
989 return false;
992 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
994 Also return TRUE if arg0/arg1 are equal to the source arguments of a
995 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
997 Return FALSE otherwise. */
999 static bool
1000 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1001 enum tree_code *code, gimple *cond)
1003 gimple *def;
1004 tree lhs = gimple_cond_lhs (cond);
1005 tree rhs = gimple_cond_rhs (cond);
1007 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1008 && operand_equal_for_phi_arg_p (arg1, rhs))
1009 || (operand_equal_for_phi_arg_p (arg1, lhs)
1010 && operand_equal_for_phi_arg_p (arg0, rhs)))
1011 return true;
1013 /* Now handle more complex case where we have an EQ comparison
1014 which feeds a BIT_AND_EXPR which feeds COND.
1016 First verify that COND is of the form SSA_NAME NE 0. */
1017 if (*code != NE_EXPR || !integer_zerop (rhs)
1018 || TREE_CODE (lhs) != SSA_NAME)
1019 return false;
1021 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1022 def = SSA_NAME_DEF_STMT (lhs);
1023 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1024 return false;
1026 /* Now verify arg0/arg1 correspond to the source arguments of an
1027 EQ comparison feeding the BIT_AND_EXPR. */
1029 tree tmp = gimple_assign_rhs1 (def);
1030 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1031 return true;
1033 tmp = gimple_assign_rhs2 (def);
1034 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1035 return true;
1037 return false;
1040 /* Returns true if ARG is a neutral element for operation CODE
1041 on the RIGHT side. */
1043 static bool
1044 neutral_element_p (tree_code code, tree arg, bool right)
1046 switch (code)
1048 case PLUS_EXPR:
1049 case BIT_IOR_EXPR:
1050 case BIT_XOR_EXPR:
1051 return integer_zerop (arg);
1053 case LROTATE_EXPR:
1054 case RROTATE_EXPR:
1055 case LSHIFT_EXPR:
1056 case RSHIFT_EXPR:
1057 case MINUS_EXPR:
1058 case POINTER_PLUS_EXPR:
1059 return right && integer_zerop (arg);
1061 case MULT_EXPR:
1062 return integer_onep (arg);
1064 case TRUNC_DIV_EXPR:
1065 case CEIL_DIV_EXPR:
1066 case FLOOR_DIV_EXPR:
1067 case ROUND_DIV_EXPR:
1068 case EXACT_DIV_EXPR:
1069 return right && integer_onep (arg);
1071 case BIT_AND_EXPR:
1072 return integer_all_onesp (arg);
1074 default:
1075 return false;
1079 /* Returns true if ARG is an absorbing element for operation CODE. */
1081 static bool
1082 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1084 switch (code)
1086 case BIT_IOR_EXPR:
1087 return integer_all_onesp (arg);
1089 case MULT_EXPR:
1090 case BIT_AND_EXPR:
1091 return integer_zerop (arg);
1093 case LSHIFT_EXPR:
1094 case RSHIFT_EXPR:
1095 case LROTATE_EXPR:
1096 case RROTATE_EXPR:
1097 return !right && integer_zerop (arg);
1099 case TRUNC_DIV_EXPR:
1100 case CEIL_DIV_EXPR:
1101 case FLOOR_DIV_EXPR:
1102 case ROUND_DIV_EXPR:
1103 case EXACT_DIV_EXPR:
1104 case TRUNC_MOD_EXPR:
1105 case CEIL_MOD_EXPR:
1106 case FLOOR_MOD_EXPR:
1107 case ROUND_MOD_EXPR:
1108 return (!right
1109 && integer_zerop (arg)
1110 && tree_single_nonzero_warnv_p (rval, NULL));
1112 default:
1113 return false;
1117 /* The function value_replacement does the main work of doing the value
1118 replacement. Return non-zero if the replacement is done. Otherwise return
1119 0. If we remove the middle basic block, return 2.
1120 BB is the basic block where the replacement is going to be done on. ARG0
1121 is argument 0 from the PHI. Likewise for ARG1. */
1123 static int
1124 value_replacement (basic_block cond_bb, basic_block middle_bb,
1125 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1127 gimple_stmt_iterator gsi;
1128 edge true_edge, false_edge;
1129 enum tree_code code;
1130 bool empty_or_with_defined_p = true;
1132 /* If the type says honor signed zeros we cannot do this
1133 optimization. */
1134 if (HONOR_SIGNED_ZEROS (arg1))
1135 return 0;
1137 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1138 arguments, then adjust arg0 or arg1. */
1139 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1140 while (!gsi_end_p (gsi))
1142 gimple *stmt = gsi_stmt (gsi);
1143 tree lhs;
1144 gsi_next_nondebug (&gsi);
1145 if (!is_gimple_assign (stmt))
1147 if (gimple_code (stmt) != GIMPLE_PREDICT
1148 && gimple_code (stmt) != GIMPLE_NOP)
1149 empty_or_with_defined_p = false;
1150 continue;
1152 /* Now try to adjust arg0 or arg1 according to the computation
1153 in the statement. */
1154 lhs = gimple_assign_lhs (stmt);
1155 if (!(lhs == arg0
1156 && jump_function_from_stmt (&arg0, stmt))
1157 || (lhs == arg1
1158 && jump_function_from_stmt (&arg1, stmt)))
1159 empty_or_with_defined_p = false;
1162 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1163 code = gimple_cond_code (cond);
1165 /* This transformation is only valid for equality comparisons. */
1166 if (code != NE_EXPR && code != EQ_EXPR)
1167 return 0;
1169 /* We need to know which is the true edge and which is the false
1170 edge so that we know if have abs or negative abs. */
1171 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1173 /* At this point we know we have a COND_EXPR with two successors.
1174 One successor is BB, the other successor is an empty block which
1175 falls through into BB.
1177 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1179 There is a single PHI node at the join point (BB) with two arguments.
1181 We now need to verify that the two arguments in the PHI node match
1182 the two arguments to the equality comparison. */
1184 bool equal_p = operand_equal_for_value_replacement (arg0, arg1, &code, cond);
1185 bool maybe_equal_p = false;
1186 if (!equal_p
1187 && empty_or_with_defined_p
1188 && TREE_CODE (gimple_cond_rhs (cond)) == INTEGER_CST
1189 && (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg0)
1190 ? TREE_CODE (arg1) == INTEGER_CST
1191 : (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg1)
1192 && TREE_CODE (arg0) == INTEGER_CST)))
1193 maybe_equal_p = true;
1194 if (equal_p || maybe_equal_p)
1196 edge e;
1197 tree arg;
1199 /* For NE_EXPR, we want to build an assignment result = arg where
1200 arg is the PHI argument associated with the true edge. For
1201 EQ_EXPR we want the PHI argument associated with the false edge. */
1202 e = (code == NE_EXPR ? true_edge : false_edge);
1204 /* Unfortunately, E may not reach BB (it may instead have gone to
1205 OTHER_BLOCK). If that is the case, then we want the single outgoing
1206 edge from OTHER_BLOCK which reaches BB and represents the desired
1207 path from COND_BLOCK. */
1208 if (e->dest == middle_bb)
1209 e = single_succ_edge (e->dest);
1211 /* Now we know the incoming edge to BB that has the argument for the
1212 RHS of our new assignment statement. */
1213 if (e0 == e)
1214 arg = arg0;
1215 else
1216 arg = arg1;
1218 /* If the middle basic block was empty or is defining the
1219 PHI arguments and this is a single phi where the args are different
1220 for the edges e0 and e1 then we can remove the middle basic block. */
1221 if (empty_or_with_defined_p
1222 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1223 e0, e1) == phi)
1225 use_operand_p use_p;
1226 gimple *use_stmt;
1228 /* Even if arg0/arg1 isn't equal to second operand of cond, we
1229 can optimize away the bb if we can prove it doesn't care whether
1230 phi result is arg0/arg1 or second operand of cond. Consider:
1231 <bb 2> [local count: 118111600]:
1232 if (i_2(D) == 4)
1233 goto <bb 4>; [97.00%]
1234 else
1235 goto <bb 3>; [3.00%]
1237 <bb 3> [local count: 3540129]:
1239 <bb 4> [local count: 118111600]:
1240 # i_6 = PHI <i_2(D)(3), 6(2)>
1241 _3 = i_6 != 0;
1242 Here, carg is 4, oarg is 6, crhs is 0, and because
1243 (4 != 0) == (6 != 0), we don't care if i_6 is 4 or 6, both
1244 have the same outcome. So, can can optimize this to:
1245 _3 = i_2(D) != 0;
1246 If the single imm use of phi result >, >=, < or <=, similarly
1247 we can check if both carg and oarg compare the same against
1248 crhs using ccode. */
1249 if (maybe_equal_p
1250 && TREE_CODE (arg) != INTEGER_CST
1251 && single_imm_use (gimple_phi_result (phi), &use_p, &use_stmt))
1253 enum tree_code ccode = ERROR_MARK;
1254 tree clhs = NULL_TREE, crhs = NULL_TREE;
1255 tree carg = gimple_cond_rhs (cond);
1256 tree oarg = e0 == e ? arg1 : arg0;
1257 if (is_gimple_assign (use_stmt)
1258 && (TREE_CODE_CLASS (gimple_assign_rhs_code (use_stmt))
1259 == tcc_comparison))
1261 ccode = gimple_assign_rhs_code (use_stmt);
1262 clhs = gimple_assign_rhs1 (use_stmt);
1263 crhs = gimple_assign_rhs2 (use_stmt);
1265 else if (gimple_code (use_stmt) == GIMPLE_COND)
1267 ccode = gimple_cond_code (use_stmt);
1268 clhs = gimple_cond_lhs (use_stmt);
1269 crhs = gimple_cond_rhs (use_stmt);
1271 if (ccode != ERROR_MARK
1272 && clhs == gimple_phi_result (phi)
1273 && TREE_CODE (crhs) == INTEGER_CST)
1274 switch (ccode)
1276 case EQ_EXPR:
1277 case NE_EXPR:
1278 if (!tree_int_cst_equal (crhs, carg)
1279 && !tree_int_cst_equal (crhs, oarg))
1280 equal_p = true;
1281 break;
1282 case GT_EXPR:
1283 if (tree_int_cst_lt (crhs, carg)
1284 == tree_int_cst_lt (crhs, oarg))
1285 equal_p = true;
1286 break;
1287 case GE_EXPR:
1288 if (tree_int_cst_le (crhs, carg)
1289 == tree_int_cst_le (crhs, oarg))
1290 equal_p = true;
1291 break;
1292 case LT_EXPR:
1293 if (tree_int_cst_lt (carg, crhs)
1294 == tree_int_cst_lt (oarg, crhs))
1295 equal_p = true;
1296 break;
1297 case LE_EXPR:
1298 if (tree_int_cst_le (carg, crhs)
1299 == tree_int_cst_le (oarg, crhs))
1300 equal_p = true;
1301 break;
1302 default:
1303 break;
1305 if (equal_p)
1307 tree phires = gimple_phi_result (phi);
1308 if (SSA_NAME_RANGE_INFO (phires))
1310 /* After the optimization PHI result can have value
1311 which it couldn't have previously. */
1312 int_range_max r;
1313 if (get_global_range_query ()->range_of_expr (r, phires,
1314 phi))
1316 wide_int warg = wi::to_wide (carg);
1317 int_range<2> tmp (TREE_TYPE (carg), warg, warg);
1318 r.union_ (tmp);
1319 reset_flow_sensitive_info (phires);
1320 set_range_info (phires, r);
1322 else
1323 reset_flow_sensitive_info (phires);
1326 if (equal_p && MAY_HAVE_DEBUG_BIND_STMTS)
1328 imm_use_iterator imm_iter;
1329 tree phires = gimple_phi_result (phi);
1330 tree temp = NULL_TREE;
1331 bool reset_p = false;
1333 /* Add # DEBUG D#1 => arg != carg ? arg : oarg. */
1334 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, phires)
1336 if (!is_gimple_debug (use_stmt))
1337 continue;
1338 if (temp == NULL_TREE)
1340 if (!single_pred_p (middle_bb)
1341 || EDGE_COUNT (gimple_bb (phi)->preds) != 2)
1343 /* But only if middle_bb has a single
1344 predecessor and phi bb has two, otherwise
1345 we could use a SSA_NAME not usable in that
1346 place or wrong-debug. */
1347 reset_p = true;
1348 break;
1350 gimple_stmt_iterator gsi
1351 = gsi_after_labels (gimple_bb (phi));
1352 tree type = TREE_TYPE (phires);
1353 temp = build_debug_expr_decl (type);
1354 tree t = build2 (NE_EXPR, boolean_type_node,
1355 arg, carg);
1356 t = build3 (COND_EXPR, type, t, arg, oarg);
1357 gimple *g = gimple_build_debug_bind (temp, t, phi);
1358 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
1360 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1361 replace_exp (use_p, temp);
1362 update_stmt (use_stmt);
1364 if (reset_p)
1365 reset_debug_uses (phi);
1368 if (equal_p)
1370 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1371 /* Note that we optimized this PHI. */
1372 return 2;
1375 else if (equal_p)
1377 if (!single_pred_p (middle_bb))
1378 return 0;
1379 statistics_counter_event (cfun, "Replace PHI with "
1380 "variable/value_replacement", 1);
1382 /* Replace the PHI arguments with arg. */
1383 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1384 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1385 if (dump_file && (dump_flags & TDF_DETAILS))
1387 fprintf (dump_file, "PHI ");
1388 print_generic_expr (dump_file, gimple_phi_result (phi));
1389 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1390 cond_bb->index);
1391 print_generic_expr (dump_file, arg);
1392 fprintf (dump_file, ".\n");
1394 return 1;
1398 if (!single_pred_p (middle_bb))
1399 return 0;
1401 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1402 gsi = gsi_last_nondebug_bb (middle_bb);
1403 if (gsi_end_p (gsi))
1404 return 0;
1406 gimple *assign = gsi_stmt (gsi);
1407 if (!is_gimple_assign (assign)
1408 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1409 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1410 return 0;
1412 if (gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS)
1414 /* If last stmt of the middle_bb is a conversion, handle it like
1415 a preparation statement through constant evaluation with
1416 checking for UB. */
1417 enum tree_code sc = gimple_assign_rhs_code (assign);
1418 if (CONVERT_EXPR_CODE_P (sc))
1419 assign = NULL;
1420 else
1421 return 0;
1424 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1425 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1426 return 0;
1428 /* Allow up to 2 cheap preparation statements that prepare argument
1429 for assign, e.g.:
1430 if (y_4 != 0)
1431 goto <bb 3>;
1432 else
1433 goto <bb 4>;
1434 <bb 3>:
1435 _1 = (int) y_4;
1436 iftmp.0_6 = x_5(D) r<< _1;
1437 <bb 4>:
1438 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1440 if (y_3(D) == 0)
1441 goto <bb 4>;
1442 else
1443 goto <bb 3>;
1444 <bb 3>:
1445 y_4 = y_3(D) & 31;
1446 _1 = (int) y_4;
1447 _6 = x_5(D) r<< _1;
1448 <bb 4>:
1449 # _2 = PHI <x_5(D)(2), _6(3)> */
1450 gimple *prep_stmt[2] = { NULL, NULL };
1451 int prep_cnt;
1452 for (prep_cnt = 0; ; prep_cnt++)
1454 if (prep_cnt || assign)
1455 gsi_prev_nondebug (&gsi);
1456 if (gsi_end_p (gsi))
1457 break;
1459 gimple *g = gsi_stmt (gsi);
1460 if (gimple_code (g) == GIMPLE_LABEL)
1461 break;
1463 if (prep_cnt == 2 || !is_gimple_assign (g))
1464 return 0;
1466 tree lhs = gimple_assign_lhs (g);
1467 tree rhs1 = gimple_assign_rhs1 (g);
1468 use_operand_p use_p;
1469 gimple *use_stmt;
1470 if (TREE_CODE (lhs) != SSA_NAME
1471 || TREE_CODE (rhs1) != SSA_NAME
1472 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1473 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1474 || !single_imm_use (lhs, &use_p, &use_stmt)
1475 || ((prep_cnt || assign)
1476 && use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign)))
1477 return 0;
1478 switch (gimple_assign_rhs_code (g))
1480 CASE_CONVERT:
1481 break;
1482 case PLUS_EXPR:
1483 case BIT_AND_EXPR:
1484 case BIT_IOR_EXPR:
1485 case BIT_XOR_EXPR:
1486 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1487 return 0;
1488 break;
1489 default:
1490 return 0;
1492 prep_stmt[prep_cnt] = g;
1495 /* Only transform if it removes the condition. */
1496 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1497 return 0;
1499 /* Size-wise, this is always profitable. */
1500 if (optimize_bb_for_speed_p (cond_bb)
1501 /* The special case is useless if it has a low probability. */
1502 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1503 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1504 /* If assign is cheap, there is no point avoiding it. */
1505 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1506 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1507 return 0;
1509 tree cond_lhs = gimple_cond_lhs (cond);
1510 tree cond_rhs = gimple_cond_rhs (cond);
1512 /* Propagate the cond_rhs constant through preparation stmts,
1513 make sure UB isn't invoked while doing that. */
1514 for (int i = prep_cnt - 1; i >= 0; --i)
1516 gimple *g = prep_stmt[i];
1517 tree grhs1 = gimple_assign_rhs1 (g);
1518 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1519 return 0;
1520 cond_lhs = gimple_assign_lhs (g);
1521 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1522 if (TREE_CODE (cond_rhs) != INTEGER_CST
1523 || TREE_OVERFLOW (cond_rhs))
1524 return 0;
1525 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1527 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1528 gimple_assign_rhs2 (g));
1529 if (TREE_OVERFLOW (cond_rhs))
1530 return 0;
1532 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1533 if (TREE_CODE (cond_rhs) != INTEGER_CST
1534 || TREE_OVERFLOW (cond_rhs))
1535 return 0;
1538 tree lhs, rhs1, rhs2;
1539 enum tree_code code_def;
1540 if (assign)
1542 lhs = gimple_assign_lhs (assign);
1543 rhs1 = gimple_assign_rhs1 (assign);
1544 rhs2 = gimple_assign_rhs2 (assign);
1545 code_def = gimple_assign_rhs_code (assign);
1547 else
1549 gcc_assert (prep_cnt > 0);
1550 lhs = cond_lhs;
1551 rhs1 = NULL_TREE;
1552 rhs2 = NULL_TREE;
1553 code_def = ERROR_MARK;
1556 if (((code == NE_EXPR && e1 == false_edge)
1557 || (code == EQ_EXPR && e1 == true_edge))
1558 && arg0 == lhs
1559 && ((assign == NULL
1560 && operand_equal_for_phi_arg_p (arg1, cond_rhs))
1561 || (assign
1562 && arg1 == rhs1
1563 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1564 && neutral_element_p (code_def, cond_rhs, true))
1565 || (assign
1566 && arg1 == rhs2
1567 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1568 && neutral_element_p (code_def, cond_rhs, false))
1569 || (assign
1570 && operand_equal_for_phi_arg_p (arg1, cond_rhs)
1571 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1572 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1573 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1574 && absorbing_element_p (code_def,
1575 cond_rhs, false, rhs2))))))
1577 gsi = gsi_for_stmt (cond);
1578 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1579 def-stmt in:
1580 if (n_5 != 0)
1581 goto <bb 3>;
1582 else
1583 goto <bb 4>;
1585 <bb 3>:
1586 # RANGE [0, 4294967294]
1587 u_6 = n_5 + 4294967295;
1589 <bb 4>:
1590 # u_3 = PHI <u_6(3), 4294967295(2)> */
1591 reset_flow_sensitive_info (lhs);
1592 gimple_stmt_iterator gsi_from;
1593 for (int i = prep_cnt - 1; i >= 0; --i)
1595 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1596 reset_flow_sensitive_info (plhs);
1597 gsi_from = gsi_for_stmt (prep_stmt[i]);
1598 gsi_move_before (&gsi_from, &gsi);
1600 if (assign)
1602 gsi_from = gsi_for_stmt (assign);
1603 gsi_move_before (&gsi_from, &gsi);
1605 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1606 return 2;
1609 return 0;
1612 /* If VAR is an SSA_NAME that points to a BIT_NOT_EXPR then return the TREE for
1613 the value being inverted. */
1615 static tree
1616 strip_bit_not (tree var)
1618 if (TREE_CODE (var) != SSA_NAME)
1619 return NULL_TREE;
1621 gimple *assign = SSA_NAME_DEF_STMT (var);
1622 if (gimple_code (assign) != GIMPLE_ASSIGN)
1623 return NULL_TREE;
1625 if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
1626 return NULL_TREE;
1628 return gimple_assign_rhs1 (assign);
1631 /* Invert a MIN to a MAX or a MAX to a MIN expression CODE. */
1633 enum tree_code
1634 invert_minmax_code (enum tree_code code)
1636 switch (code) {
1637 case MIN_EXPR:
1638 return MAX_EXPR;
1639 case MAX_EXPR:
1640 return MIN_EXPR;
1641 default:
1642 gcc_unreachable ();
1646 /* The function minmax_replacement does the main work of doing the minmax
1647 replacement. Return true if the replacement is done. Otherwise return
1648 false.
1649 BB is the basic block where the replacement is going to be done on. ARG0
1650 is argument 0 from the PHI. Likewise for ARG1.
1652 If THREEWAY_P then expect the BB to be laid out in diamond shape with each
1653 BB containing only a MIN or MAX expression. */
1655 static bool
1656 minmax_replacement (basic_block cond_bb, basic_block middle_bb, basic_block alt_middle_bb,
1657 edge e0, edge e1, gphi *phi, tree arg0, tree arg1, bool threeway_p)
1659 tree result;
1660 edge true_edge, false_edge;
1661 enum tree_code minmax, ass_code;
1662 tree smaller, larger, arg_true, arg_false;
1663 gimple_stmt_iterator gsi, gsi_from;
1665 tree type = TREE_TYPE (PHI_RESULT (phi));
1667 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1668 enum tree_code cmp = gimple_cond_code (cond);
1669 tree rhs = gimple_cond_rhs (cond);
1671 /* Turn EQ/NE of extreme values to order comparisons. */
1672 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1673 && TREE_CODE (rhs) == INTEGER_CST
1674 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1676 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1678 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1679 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1680 wi::min_value (TREE_TYPE (rhs)) + 1);
1682 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1684 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1685 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1686 wi::max_value (TREE_TYPE (rhs)) - 1);
1690 /* This transformation is only valid for order comparisons. Record which
1691 operand is smaller/larger if the result of the comparison is true. */
1692 tree alt_smaller = NULL_TREE;
1693 tree alt_larger = NULL_TREE;
1694 if (cmp == LT_EXPR || cmp == LE_EXPR)
1696 smaller = gimple_cond_lhs (cond);
1697 larger = rhs;
1698 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1699 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1700 if (TREE_CODE (larger) == INTEGER_CST
1701 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1703 if (cmp == LT_EXPR)
1705 wi::overflow_type overflow;
1706 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1707 TYPE_SIGN (TREE_TYPE (larger)),
1708 &overflow);
1709 if (! overflow)
1710 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1712 else
1714 wi::overflow_type overflow;
1715 wide_int alt = wi::add (wi::to_wide (larger), 1,
1716 TYPE_SIGN (TREE_TYPE (larger)),
1717 &overflow);
1718 if (! overflow)
1719 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1723 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1725 smaller = rhs;
1726 larger = gimple_cond_lhs (cond);
1727 /* If we have larger > CST it is equivalent to larger >= CST+1.
1728 Likewise larger >= CST is equivalent to larger > CST-1. */
1729 if (TREE_CODE (smaller) == INTEGER_CST
1730 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1732 wi::overflow_type overflow;
1733 if (cmp == GT_EXPR)
1735 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1736 TYPE_SIGN (TREE_TYPE (smaller)),
1737 &overflow);
1738 if (! overflow)
1739 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1741 else
1743 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1744 TYPE_SIGN (TREE_TYPE (smaller)),
1745 &overflow);
1746 if (! overflow)
1747 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1751 else
1752 return false;
1754 /* Handle the special case of (signed_type)x < 0 being equivalent
1755 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1756 to x <= MAX_VAL(signed_type). */
1757 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1758 && INTEGRAL_TYPE_P (type)
1759 && TYPE_UNSIGNED (type)
1760 && integer_zerop (rhs))
1762 tree op = gimple_cond_lhs (cond);
1763 if (TREE_CODE (op) == SSA_NAME
1764 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1765 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1767 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1768 if (gimple_assign_cast_p (def_stmt))
1770 tree op1 = gimple_assign_rhs1 (def_stmt);
1771 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1772 && TYPE_UNSIGNED (TREE_TYPE (op1))
1773 && (TYPE_PRECISION (TREE_TYPE (op))
1774 == TYPE_PRECISION (TREE_TYPE (op1)))
1775 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1777 wide_int w1 = wi::max_value (TREE_TYPE (op));
1778 wide_int w2 = wi::add (w1, 1);
1779 if (cmp == LT_EXPR)
1781 larger = op1;
1782 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1783 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1784 alt_larger = NULL_TREE;
1786 else
1788 smaller = op1;
1789 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1790 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1791 alt_smaller = NULL_TREE;
1798 /* We need to know which is the true edge and which is the false
1799 edge so that we know if have abs or negative abs. */
1800 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1802 /* Forward the edges over the middle basic block. */
1803 if (true_edge->dest == middle_bb)
1804 true_edge = EDGE_SUCC (true_edge->dest, 0);
1805 if (false_edge->dest == middle_bb)
1806 false_edge = EDGE_SUCC (false_edge->dest, 0);
1808 /* When THREEWAY_P then e1 will point to the edge of the final transition
1809 from middle-bb to end. */
1810 if (true_edge == e0)
1812 if (!threeway_p)
1813 gcc_assert (false_edge == e1);
1814 arg_true = arg0;
1815 arg_false = arg1;
1817 else
1819 gcc_assert (false_edge == e0);
1820 if (!threeway_p)
1821 gcc_assert (true_edge == e1);
1822 arg_true = arg1;
1823 arg_false = arg0;
1826 if (empty_block_p (middle_bb)
1827 && (!threeway_p
1828 || empty_block_p (alt_middle_bb)))
1830 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1831 || (alt_smaller
1832 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1833 && (operand_equal_for_phi_arg_p (arg_false, larger)
1834 || (alt_larger
1835 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1837 /* Case
1839 if (smaller < larger)
1840 rslt = smaller;
1841 else
1842 rslt = larger; */
1843 minmax = MIN_EXPR;
1845 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1846 || (alt_smaller
1847 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1848 && (operand_equal_for_phi_arg_p (arg_true, larger)
1849 || (alt_larger
1850 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1851 minmax = MAX_EXPR;
1852 else
1853 return false;
1855 else if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1856 /* The optimization may be unsafe due to NaNs. */
1857 return false;
1858 else if (middle_bb != alt_middle_bb && threeway_p)
1860 /* Recognize the following case:
1862 if (smaller < larger)
1863 a = MIN (smaller, c);
1864 else
1865 b = MIN (larger, c);
1866 x = PHI <a, b>
1868 This is equivalent to
1870 a = MIN (smaller, c);
1871 x = MIN (larger, a); */
1873 gimple *assign = last_and_only_stmt (middle_bb);
1874 tree lhs, op0, op1, bound;
1875 tree alt_lhs, alt_op0, alt_op1;
1876 bool invert = false;
1878 /* When THREEWAY_P then e1 will point to the edge of the final transition
1879 from middle-bb to end. */
1880 if (true_edge == e0)
1881 gcc_assert (false_edge == EDGE_PRED (e1->src, 0));
1882 else
1883 gcc_assert (true_edge == EDGE_PRED (e1->src, 0));
1885 bool valid_minmax_p = false;
1886 gimple_stmt_iterator it1
1887 = gsi_start_nondebug_after_labels_bb (middle_bb);
1888 gimple_stmt_iterator it2
1889 = gsi_start_nondebug_after_labels_bb (alt_middle_bb);
1890 if (gsi_one_nondebug_before_end_p (it1)
1891 && gsi_one_nondebug_before_end_p (it2))
1893 gimple *stmt1 = gsi_stmt (it1);
1894 gimple *stmt2 = gsi_stmt (it2);
1895 if (is_gimple_assign (stmt1) && is_gimple_assign (stmt2))
1897 enum tree_code code1 = gimple_assign_rhs_code (stmt1);
1898 enum tree_code code2 = gimple_assign_rhs_code (stmt2);
1899 valid_minmax_p = (code1 == MIN_EXPR || code1 == MAX_EXPR)
1900 && (code2 == MIN_EXPR || code2 == MAX_EXPR);
1904 if (!valid_minmax_p)
1905 return false;
1907 if (!assign
1908 || gimple_code (assign) != GIMPLE_ASSIGN)
1909 return false;
1911 lhs = gimple_assign_lhs (assign);
1912 ass_code = gimple_assign_rhs_code (assign);
1913 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1914 return false;
1916 op0 = gimple_assign_rhs1 (assign);
1917 op1 = gimple_assign_rhs2 (assign);
1919 assign = last_and_only_stmt (alt_middle_bb);
1920 if (!assign
1921 || gimple_code (assign) != GIMPLE_ASSIGN)
1922 return false;
1924 alt_lhs = gimple_assign_lhs (assign);
1925 if (ass_code != gimple_assign_rhs_code (assign))
1926 return false;
1928 if (!operand_equal_for_phi_arg_p (lhs, arg_true)
1929 || !operand_equal_for_phi_arg_p (alt_lhs, arg_false))
1930 return false;
1932 alt_op0 = gimple_assign_rhs1 (assign);
1933 alt_op1 = gimple_assign_rhs2 (assign);
1935 if ((operand_equal_for_phi_arg_p (op0, smaller)
1936 || (alt_smaller
1937 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1938 && (operand_equal_for_phi_arg_p (alt_op0, larger)
1939 || (alt_larger
1940 && operand_equal_for_phi_arg_p (alt_op0, alt_larger))))
1942 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1943 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1944 return false;
1946 if ((arg0 = strip_bit_not (op0)) != NULL
1947 && (arg1 = strip_bit_not (alt_op0)) != NULL
1948 && (bound = strip_bit_not (op1)) != NULL)
1950 minmax = MAX_EXPR;
1951 ass_code = invert_minmax_code (ass_code);
1952 invert = true;
1954 else
1956 bound = op1;
1957 minmax = MIN_EXPR;
1958 arg0 = op0;
1959 arg1 = alt_op0;
1962 else if ((operand_equal_for_phi_arg_p (op0, larger)
1963 || (alt_larger
1964 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1965 && (operand_equal_for_phi_arg_p (alt_op0, smaller)
1966 || (alt_smaller
1967 && operand_equal_for_phi_arg_p (alt_op0, alt_smaller))))
1969 /* We got here if the condition is true, i.e., SMALLER > LARGER. */
1970 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1971 return false;
1973 if ((arg0 = strip_bit_not (op0)) != NULL
1974 && (arg1 = strip_bit_not (alt_op0)) != NULL
1975 && (bound = strip_bit_not (op1)) != NULL)
1977 minmax = MIN_EXPR;
1978 ass_code = invert_minmax_code (ass_code);
1979 invert = true;
1981 else
1983 bound = op1;
1984 minmax = MAX_EXPR;
1985 arg0 = op0;
1986 arg1 = alt_op0;
1989 else
1990 return false;
1992 /* Emit the statement to compute min/max. */
1993 location_t locus = gimple_location (last_nondebug_stmt (cond_bb));
1994 gimple_seq stmts = NULL;
1995 tree phi_result = PHI_RESULT (phi);
1996 result = gimple_build (&stmts, locus, minmax, TREE_TYPE (phi_result),
1997 arg0, arg1);
1998 result = gimple_build (&stmts, locus, ass_code, TREE_TYPE (phi_result),
1999 result, bound);
2000 if (invert)
2001 result = gimple_build (&stmts, locus, BIT_NOT_EXPR, TREE_TYPE (phi_result),
2002 result);
2004 gsi = gsi_last_bb (cond_bb);
2005 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2007 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2009 return true;
2011 else if (!threeway_p
2012 || empty_block_p (alt_middle_bb))
2014 /* Recognize the following case, assuming d <= u:
2016 if (a <= u)
2017 b = MAX (a, d);
2018 x = PHI <b, u>
2020 This is equivalent to
2022 b = MAX (a, d);
2023 x = MIN (b, u); */
2025 gimple *assign = last_and_only_stmt (middle_bb);
2026 tree lhs, op0, op1, bound;
2028 if (!single_pred_p (middle_bb))
2029 return false;
2031 if (!assign
2032 || gimple_code (assign) != GIMPLE_ASSIGN)
2033 return false;
2035 lhs = gimple_assign_lhs (assign);
2036 ass_code = gimple_assign_rhs_code (assign);
2037 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
2038 return false;
2039 op0 = gimple_assign_rhs1 (assign);
2040 op1 = gimple_assign_rhs2 (assign);
2042 if (true_edge->src == middle_bb)
2044 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
2045 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
2046 return false;
2048 if (operand_equal_for_phi_arg_p (arg_false, larger)
2049 || (alt_larger
2050 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
2052 /* Case
2054 if (smaller < larger)
2056 r' = MAX_EXPR (smaller, bound)
2058 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
2059 if (ass_code != MAX_EXPR)
2060 return false;
2062 minmax = MIN_EXPR;
2063 if (operand_equal_for_phi_arg_p (op0, smaller)
2064 || (alt_smaller
2065 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2066 bound = op1;
2067 else if (operand_equal_for_phi_arg_p (op1, smaller)
2068 || (alt_smaller
2069 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2070 bound = op0;
2071 else
2072 return false;
2074 /* We need BOUND <= LARGER. */
2075 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2076 bound, arg_false)))
2077 return false;
2079 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
2080 || (alt_smaller
2081 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
2083 /* Case
2085 if (smaller < larger)
2087 r' = MIN_EXPR (larger, bound)
2089 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
2090 if (ass_code != MIN_EXPR)
2091 return false;
2093 minmax = MAX_EXPR;
2094 if (operand_equal_for_phi_arg_p (op0, larger)
2095 || (alt_larger
2096 && operand_equal_for_phi_arg_p (op0, alt_larger)))
2097 bound = op1;
2098 else if (operand_equal_for_phi_arg_p (op1, larger)
2099 || (alt_larger
2100 && operand_equal_for_phi_arg_p (op1, alt_larger)))
2101 bound = op0;
2102 else
2103 return false;
2105 /* We need BOUND >= SMALLER. */
2106 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2107 bound, arg_false)))
2108 return false;
2110 else
2111 return false;
2113 else
2115 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
2116 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
2117 return false;
2119 if (operand_equal_for_phi_arg_p (arg_true, larger)
2120 || (alt_larger
2121 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
2123 /* Case
2125 if (smaller > larger)
2127 r' = MIN_EXPR (smaller, bound)
2129 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
2130 if (ass_code != MIN_EXPR)
2131 return false;
2133 minmax = MAX_EXPR;
2134 if (operand_equal_for_phi_arg_p (op0, smaller)
2135 || (alt_smaller
2136 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2137 bound = op1;
2138 else if (operand_equal_for_phi_arg_p (op1, smaller)
2139 || (alt_smaller
2140 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2141 bound = op0;
2142 else
2143 return false;
2145 /* We need BOUND >= LARGER. */
2146 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2147 bound, arg_true)))
2148 return false;
2150 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
2151 || (alt_smaller
2152 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
2154 /* Case
2156 if (smaller > larger)
2158 r' = MAX_EXPR (larger, bound)
2160 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
2161 if (ass_code != MAX_EXPR)
2162 return false;
2164 minmax = MIN_EXPR;
2165 if (operand_equal_for_phi_arg_p (op0, larger))
2166 bound = op1;
2167 else if (operand_equal_for_phi_arg_p (op1, larger))
2168 bound = op0;
2169 else
2170 return false;
2172 /* We need BOUND <= SMALLER. */
2173 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2174 bound, arg_true)))
2175 return false;
2177 else
2178 return false;
2181 /* Move the statement from the middle block. */
2182 gsi = gsi_last_bb (cond_bb);
2183 gsi_from = gsi_last_nondebug_bb (middle_bb);
2184 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
2185 SSA_OP_DEF));
2186 gsi_move_before (&gsi_from, &gsi);
2188 else
2189 return false;
2191 /* Emit the statement to compute min/max. */
2192 gimple_seq stmts = NULL;
2193 tree phi_result = PHI_RESULT (phi);
2195 /* When we can't use a MIN/MAX_EXPR still make sure the expression
2196 stays in a form to be recognized by ISA that map to IEEE x > y ? x : y
2197 semantics (that's not IEEE max semantics). */
2198 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
2200 result = gimple_build (&stmts, cmp, boolean_type_node,
2201 gimple_cond_lhs (cond), rhs);
2202 result = gimple_build (&stmts, COND_EXPR, TREE_TYPE (phi_result),
2203 result, arg_true, arg_false);
2205 else
2206 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
2208 gsi = gsi_last_bb (cond_bb);
2209 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2211 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2213 return true;
2216 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
2217 For strong ordering <=> try to match something like:
2218 <bb 2> : // cond3_bb (== cond2_bb)
2219 if (x_4(D) != y_5(D))
2220 goto <bb 3>; [INV]
2221 else
2222 goto <bb 6>; [INV]
2224 <bb 3> : // cond_bb
2225 if (x_4(D) < y_5(D))
2226 goto <bb 6>; [INV]
2227 else
2228 goto <bb 4>; [INV]
2230 <bb 4> : // middle_bb
2232 <bb 6> : // phi_bb
2233 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2234 _1 = iftmp.0_2 == 0;
2236 and for partial ordering <=> something like:
2238 <bb 2> : // cond3_bb
2239 if (a_3(D) == b_5(D))
2240 goto <bb 6>; [50.00%]
2241 else
2242 goto <bb 3>; [50.00%]
2244 <bb 3> [local count: 536870913]: // cond2_bb
2245 if (a_3(D) < b_5(D))
2246 goto <bb 6>; [50.00%]
2247 else
2248 goto <bb 4>; [50.00%]
2250 <bb 4> [local count: 268435456]: // cond_bb
2251 if (a_3(D) > b_5(D))
2252 goto <bb 6>; [50.00%]
2253 else
2254 goto <bb 5>; [50.00%]
2256 <bb 5> [local count: 134217728]: // middle_bb
2258 <bb 6> [local count: 1073741824]: // phi_bb
2259 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2260 _2 = SR.27_4 > 0; */
2262 static bool
2263 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
2264 edge e0, edge e1, gphi *phi,
2265 tree arg0, tree arg1)
2267 tree phires = PHI_RESULT (phi);
2268 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
2269 || TYPE_UNSIGNED (TREE_TYPE (phires))
2270 || !tree_fits_shwi_p (arg0)
2271 || !tree_fits_shwi_p (arg1)
2272 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
2273 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
2274 return false;
2276 basic_block phi_bb = gimple_bb (phi);
2277 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
2278 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
2279 return false;
2281 use_operand_p use_p;
2282 gimple *use_stmt;
2283 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
2284 return false;
2285 if (!single_imm_use (phires, &use_p, &use_stmt))
2286 return false;
2287 enum tree_code cmp;
2288 tree lhs, rhs;
2289 gimple *orig_use_stmt = use_stmt;
2290 tree orig_use_lhs = NULL_TREE;
2291 int prec = TYPE_PRECISION (TREE_TYPE (phires));
2292 bool is_cast = false;
2294 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2295 into res <= 1 and has left a type-cast for signed types. */
2296 if (gimple_assign_cast_p (use_stmt))
2298 orig_use_lhs = gimple_assign_lhs (use_stmt);
2299 /* match.pd would have only done this for a signed type,
2300 so the conversion must be to an unsigned one. */
2301 tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
2302 tree ty2 = TREE_TYPE (orig_use_lhs);
2304 if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
2305 return false;
2306 if (TYPE_PRECISION (ty1) > TYPE_PRECISION (ty2))
2307 return false;
2308 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2309 return false;
2310 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2311 return false;
2313 is_cast = true;
2315 else if (is_gimple_assign (use_stmt)
2316 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2317 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2318 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2319 == wi::shifted_mask (1, prec - 1, false, prec)))
2321 /* For partial_ordering result operator>= with unspec as second
2322 argument is (res & 1) == res, folded by match.pd into
2323 (res & ~1) == 0. */
2324 orig_use_lhs = gimple_assign_lhs (use_stmt);
2325 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2326 return false;
2327 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2328 return false;
2330 if (gimple_code (use_stmt) == GIMPLE_COND)
2332 cmp = gimple_cond_code (use_stmt);
2333 lhs = gimple_cond_lhs (use_stmt);
2334 rhs = gimple_cond_rhs (use_stmt);
2336 else if (is_gimple_assign (use_stmt))
2338 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2340 cmp = gimple_assign_rhs_code (use_stmt);
2341 lhs = gimple_assign_rhs1 (use_stmt);
2342 rhs = gimple_assign_rhs2 (use_stmt);
2344 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2346 tree cond = gimple_assign_rhs1 (use_stmt);
2347 if (!COMPARISON_CLASS_P (cond))
2348 return false;
2349 cmp = TREE_CODE (cond);
2350 lhs = TREE_OPERAND (cond, 0);
2351 rhs = TREE_OPERAND (cond, 1);
2353 else
2354 return false;
2356 else
2357 return false;
2358 switch (cmp)
2360 case EQ_EXPR:
2361 case NE_EXPR:
2362 case LT_EXPR:
2363 case GT_EXPR:
2364 case LE_EXPR:
2365 case GE_EXPR:
2366 break;
2367 default:
2368 return false;
2370 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2371 || !tree_fits_shwi_p (rhs)
2372 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2373 return false;
2375 if (is_cast)
2377 if (TREE_CODE (rhs) != INTEGER_CST)
2378 return false;
2379 /* As for -ffast-math we assume the 2 return to be
2380 impossible, canonicalize (unsigned) res <= 1U or
2381 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2382 or (unsigned) res >= 2U as res < 0. */
2383 switch (cmp)
2385 case LE_EXPR:
2386 if (!integer_onep (rhs))
2387 return false;
2388 cmp = GE_EXPR;
2389 break;
2390 case LT_EXPR:
2391 if (wi::ne_p (wi::to_widest (rhs), 2))
2392 return false;
2393 cmp = GE_EXPR;
2394 break;
2395 case GT_EXPR:
2396 if (!integer_onep (rhs))
2397 return false;
2398 cmp = LT_EXPR;
2399 break;
2400 case GE_EXPR:
2401 if (wi::ne_p (wi::to_widest (rhs), 2))
2402 return false;
2403 cmp = LT_EXPR;
2404 break;
2405 default:
2406 return false;
2408 rhs = build_zero_cst (TREE_TYPE (phires));
2410 else if (orig_use_lhs)
2412 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2413 return false;
2414 /* As for -ffast-math we assume the 2 return to be
2415 impossible, canonicalize (res & ~1) == 0 into
2416 res >= 0 and (res & ~1) != 0 as res < 0. */
2417 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2420 if (!empty_block_p (middle_bb))
2421 return false;
2423 gcond *cond1 = as_a <gcond *> (*gsi_last_bb (cond_bb));
2424 enum tree_code cmp1 = gimple_cond_code (cond1);
2425 switch (cmp1)
2427 case LT_EXPR:
2428 case LE_EXPR:
2429 case GT_EXPR:
2430 case GE_EXPR:
2431 break;
2432 default:
2433 return false;
2435 tree lhs1 = gimple_cond_lhs (cond1);
2436 tree rhs1 = gimple_cond_rhs (cond1);
2437 /* The optimization may be unsafe due to NaNs. */
2438 if (HONOR_NANS (TREE_TYPE (lhs1)))
2439 return false;
2440 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2441 return false;
2442 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2443 return false;
2445 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2446 return false;
2448 basic_block cond2_bb = single_pred (cond_bb);
2449 if (EDGE_COUNT (cond2_bb->succs) != 2)
2450 return false;
2451 edge cond2_phi_edge;
2452 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2454 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2455 return false;
2456 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2458 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2459 return false;
2460 else
2461 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2462 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2463 if (!tree_fits_shwi_p (arg2))
2464 return false;
2465 gcond *cond2 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond2_bb));
2466 if (!cond2)
2467 return false;
2468 enum tree_code cmp2 = gimple_cond_code (cond2);
2469 tree lhs2 = gimple_cond_lhs (cond2);
2470 tree rhs2 = gimple_cond_rhs (cond2);
2471 if (lhs2 == lhs1)
2473 if (!operand_equal_p (rhs2, rhs1, 0))
2475 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2476 && TREE_CODE (rhs1) == INTEGER_CST
2477 && TREE_CODE (rhs2) == INTEGER_CST)
2479 /* For integers, we can have cond2 x == 5
2480 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2481 x > 5, x >= 6, x >= 5 or x > 4. */
2482 if (tree_int_cst_lt (rhs1, rhs2))
2484 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2485 return false;
2486 if (cmp1 == LE_EXPR)
2487 cmp1 = LT_EXPR;
2488 else if (cmp1 == GT_EXPR)
2489 cmp1 = GE_EXPR;
2490 else
2491 return false;
2493 else
2495 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2496 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2497 return false;
2498 if (cmp1 == LT_EXPR)
2499 cmp1 = LE_EXPR;
2500 else if (cmp1 == GE_EXPR)
2501 cmp1 = GT_EXPR;
2502 else
2503 return false;
2505 rhs1 = rhs2;
2507 else
2508 return false;
2511 else if (lhs2 == rhs1)
2513 if (rhs2 != lhs1)
2514 return false;
2516 else
2517 return false;
2519 tree arg3 = arg2;
2520 basic_block cond3_bb = cond2_bb;
2521 edge cond3_phi_edge = cond2_phi_edge;
2522 gcond *cond3 = cond2;
2523 enum tree_code cmp3 = cmp2;
2524 tree lhs3 = lhs2;
2525 tree rhs3 = rhs2;
2526 if (EDGE_COUNT (phi_bb->preds) == 4)
2528 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2529 return false;
2530 if (e1->flags & EDGE_TRUE_VALUE)
2532 if (tree_to_shwi (arg0) != 2
2533 || absu_hwi (tree_to_shwi (arg1)) != 1
2534 || wi::to_widest (arg1) == wi::to_widest (arg2))
2535 return false;
2537 else if (tree_to_shwi (arg1) != 2
2538 || absu_hwi (tree_to_shwi (arg0)) != 1
2539 || wi::to_widest (arg0) == wi::to_widest (arg1))
2540 return false;
2541 switch (cmp2)
2543 case LT_EXPR:
2544 case LE_EXPR:
2545 case GT_EXPR:
2546 case GE_EXPR:
2547 break;
2548 default:
2549 return false;
2551 /* if (x < y) goto phi_bb; else fallthru;
2552 if (x > y) goto phi_bb; else fallthru;
2553 bbx:;
2554 phi_bb:;
2555 is ok, but if x and y are swapped in one of the comparisons,
2556 or the comparisons are the same and operands not swapped,
2557 or the true and false edges are swapped, it is not. */
2558 if ((lhs2 == lhs1)
2559 ^ (((cond2_phi_edge->flags
2560 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2561 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2562 != ((e1->flags
2563 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2564 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2565 return false;
2566 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2567 return false;
2568 cond3_bb = single_pred (cond2_bb);
2569 if (EDGE_COUNT (cond2_bb->succs) != 2)
2570 return false;
2571 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2573 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2574 return false;
2575 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2577 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2578 return false;
2579 else
2580 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2581 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2582 cond3 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond3_bb));
2583 if (!cond3)
2584 return false;
2585 cmp3 = gimple_cond_code (cond3);
2586 lhs3 = gimple_cond_lhs (cond3);
2587 rhs3 = gimple_cond_rhs (cond3);
2588 if (lhs3 == lhs1)
2590 if (!operand_equal_p (rhs3, rhs1, 0))
2591 return false;
2593 else if (lhs3 == rhs1)
2595 if (rhs3 != lhs1)
2596 return false;
2598 else
2599 return false;
2601 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2602 || absu_hwi (tree_to_shwi (arg1)) != 1
2603 || wi::to_widest (arg0) == wi::to_widest (arg1))
2604 return false;
2606 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2607 return false;
2608 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2609 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2610 return false;
2612 /* lhs1 one_cmp rhs1 results in phires of 1. */
2613 enum tree_code one_cmp;
2614 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2615 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2616 one_cmp = LT_EXPR;
2617 else
2618 one_cmp = GT_EXPR;
2620 enum tree_code res_cmp;
2621 switch (cmp)
2623 case EQ_EXPR:
2624 if (integer_zerop (rhs))
2625 res_cmp = EQ_EXPR;
2626 else if (integer_minus_onep (rhs))
2627 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2628 else if (integer_onep (rhs))
2629 res_cmp = one_cmp;
2630 else
2631 return false;
2632 break;
2633 case NE_EXPR:
2634 if (integer_zerop (rhs))
2635 res_cmp = NE_EXPR;
2636 else if (integer_minus_onep (rhs))
2637 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2638 else if (integer_onep (rhs))
2639 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2640 else
2641 return false;
2642 break;
2643 case LT_EXPR:
2644 if (integer_onep (rhs))
2645 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2646 else if (integer_zerop (rhs))
2647 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2648 else
2649 return false;
2650 break;
2651 case LE_EXPR:
2652 if (integer_zerop (rhs))
2653 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2654 else if (integer_minus_onep (rhs))
2655 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2656 else
2657 return false;
2658 break;
2659 case GT_EXPR:
2660 if (integer_minus_onep (rhs))
2661 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2662 else if (integer_zerop (rhs))
2663 res_cmp = one_cmp;
2664 else
2665 return false;
2666 break;
2667 case GE_EXPR:
2668 if (integer_zerop (rhs))
2669 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2670 else if (integer_onep (rhs))
2671 res_cmp = one_cmp;
2672 else
2673 return false;
2674 break;
2675 default:
2676 gcc_unreachable ();
2679 if (gimple_code (use_stmt) == GIMPLE_COND)
2681 gcond *use_cond = as_a <gcond *> (use_stmt);
2682 gimple_cond_set_code (use_cond, res_cmp);
2683 gimple_cond_set_lhs (use_cond, lhs1);
2684 gimple_cond_set_rhs (use_cond, rhs1);
2686 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2688 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2689 gimple_assign_set_rhs1 (use_stmt, lhs1);
2690 gimple_assign_set_rhs2 (use_stmt, rhs1);
2692 else
2694 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2695 lhs1, rhs1);
2696 gimple_assign_set_rhs1 (use_stmt, cond);
2698 update_stmt (use_stmt);
2700 if (MAY_HAVE_DEBUG_BIND_STMTS)
2702 use_operand_p use_p;
2703 imm_use_iterator iter;
2704 bool has_debug_uses = false;
2705 bool has_cast_debug_uses = false;
2706 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2708 gimple *use_stmt = USE_STMT (use_p);
2709 if (orig_use_lhs && use_stmt == orig_use_stmt)
2710 continue;
2711 gcc_assert (is_gimple_debug (use_stmt));
2712 has_debug_uses = true;
2713 break;
2715 if (orig_use_lhs)
2717 if (!has_debug_uses || is_cast)
2718 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2720 gimple *use_stmt = USE_STMT (use_p);
2721 gcc_assert (is_gimple_debug (use_stmt));
2722 has_debug_uses = true;
2723 if (is_cast)
2724 has_cast_debug_uses = true;
2726 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2727 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2728 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2729 update_stmt (orig_use_stmt);
2732 if (has_debug_uses)
2734 /* If there are debug uses, emit something like:
2735 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2736 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2737 where > stands for the comparison that yielded 1
2738 and replace debug uses of phi result with that D#2.
2739 Ignore the value of 2, because if NaNs aren't expected,
2740 all floating point numbers should be comparable. */
2741 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2742 tree type = TREE_TYPE (phires);
2743 tree temp1 = build_debug_expr_decl (type);
2744 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2745 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2746 build_int_cst (type, -1));
2747 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2748 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2749 tree temp2 = build_debug_expr_decl (type);
2750 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2751 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2752 g = gimple_build_debug_bind (temp2, t, phi);
2753 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2754 replace_uses_by (phires, temp2);
2755 if (orig_use_lhs)
2757 if (has_cast_debug_uses)
2759 tree temp3 = make_node (DEBUG_EXPR_DECL);
2760 DECL_ARTIFICIAL (temp3) = 1;
2761 TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
2762 SET_DECL_MODE (temp3, TYPE_MODE (type));
2763 t = fold_convert (TREE_TYPE (temp3), temp2);
2764 g = gimple_build_debug_bind (temp3, t, phi);
2765 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2766 replace_uses_by (orig_use_lhs, temp3);
2768 else
2769 replace_uses_by (orig_use_lhs, temp2);
2774 if (orig_use_lhs)
2776 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2777 gsi_remove (&gsi, true);
2780 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2781 remove_phi_node (&psi, true);
2782 statistics_counter_event (cfun, "spaceship replacement", 1);
2784 return true;
2787 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2788 Convert
2790 <bb 2>
2791 if (b_4(D) != 0)
2792 goto <bb 3>
2793 else
2794 goto <bb 4>
2796 <bb 3>
2797 _2 = (unsigned long) b_4(D);
2798 _9 = __builtin_popcountl (_2);
2800 _9 = __builtin_popcountl (b_4(D));
2802 <bb 4>
2803 c_12 = PHI <0(2), _9(3)>
2805 Into
2806 <bb 2>
2807 _2 = (unsigned long) b_4(D);
2808 _9 = __builtin_popcountl (_2);
2810 _9 = __builtin_popcountl (b_4(D));
2812 <bb 4>
2813 c_12 = PHI <_9(2)>
2815 Similarly for __builtin_clz or __builtin_ctz if
2816 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2817 instead of 0 above it uses the value from that macro. */
2819 static bool
2820 cond_removal_in_builtin_zero_pattern (basic_block cond_bb,
2821 basic_block middle_bb,
2822 edge e1, edge e2, gphi *phi,
2823 tree arg0, tree arg1)
2825 gimple_stmt_iterator gsi, gsi_from;
2826 gimple *call;
2827 gimple *cast = NULL;
2828 tree lhs, arg;
2830 /* Check that
2831 _2 = (unsigned long) b_4(D);
2832 _9 = __builtin_popcountl (_2);
2834 _9 = __builtin_popcountl (b_4(D));
2835 are the only stmts in the middle_bb. */
2837 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2838 if (gsi_end_p (gsi))
2839 return false;
2840 cast = gsi_stmt (gsi);
2841 gsi_next_nondebug (&gsi);
2842 if (!gsi_end_p (gsi))
2844 call = gsi_stmt (gsi);
2845 gsi_next_nondebug (&gsi);
2846 if (!gsi_end_p (gsi))
2847 return false;
2849 else
2851 call = cast;
2852 cast = NULL;
2855 /* Check that we have a popcount/clz/ctz builtin. */
2856 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
2857 return false;
2859 arg = gimple_call_arg (call, 0);
2860 lhs = gimple_get_lhs (call);
2862 if (lhs == NULL_TREE)
2863 return false;
2865 combined_fn cfn = gimple_call_combined_fn (call);
2866 internal_fn ifn = IFN_LAST;
2867 int val = 0;
2868 switch (cfn)
2870 case CFN_BUILT_IN_BSWAP16:
2871 case CFN_BUILT_IN_BSWAP32:
2872 case CFN_BUILT_IN_BSWAP64:
2873 case CFN_BUILT_IN_BSWAP128:
2874 CASE_CFN_FFS:
2875 CASE_CFN_PARITY:
2876 CASE_CFN_POPCOUNT:
2877 break;
2878 CASE_CFN_CLZ:
2879 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2881 tree type = TREE_TYPE (arg);
2882 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2883 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2884 val) == 2)
2886 ifn = IFN_CLZ;
2887 break;
2890 return false;
2891 CASE_CFN_CTZ:
2892 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2894 tree type = TREE_TYPE (arg);
2895 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2896 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2897 val) == 2)
2899 ifn = IFN_CTZ;
2900 break;
2903 return false;
2904 case CFN_BUILT_IN_CLRSB:
2905 val = TYPE_PRECISION (integer_type_node) - 1;
2906 break;
2907 case CFN_BUILT_IN_CLRSBL:
2908 val = TYPE_PRECISION (long_integer_type_node) - 1;
2909 break;
2910 case CFN_BUILT_IN_CLRSBLL:
2911 val = TYPE_PRECISION (long_long_integer_type_node) - 1;
2912 break;
2913 default:
2914 return false;
2917 if (cast)
2919 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2920 /* Check that we have a cast prior to that. */
2921 if (gimple_code (cast) != GIMPLE_ASSIGN
2922 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
2923 return false;
2924 /* Result of the cast stmt is the argument to the builtin. */
2925 if (arg != gimple_assign_lhs (cast))
2926 return false;
2927 arg = gimple_assign_rhs1 (cast);
2930 gcond *cond = dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
2932 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
2933 builtin. */
2934 if (!cond
2935 || (gimple_cond_code (cond) != NE_EXPR
2936 && gimple_cond_code (cond) != EQ_EXPR)
2937 || !integer_zerop (gimple_cond_rhs (cond))
2938 || arg != gimple_cond_lhs (cond))
2939 return false;
2941 /* Canonicalize. */
2942 if ((e2->flags & EDGE_TRUE_VALUE
2943 && gimple_cond_code (cond) == NE_EXPR)
2944 || (e1->flags & EDGE_TRUE_VALUE
2945 && gimple_cond_code (cond) == EQ_EXPR))
2947 std::swap (arg0, arg1);
2948 std::swap (e1, e2);
2951 /* Check PHI arguments. */
2952 if (lhs != arg0
2953 || TREE_CODE (arg1) != INTEGER_CST
2954 || wi::to_wide (arg1) != val)
2955 return false;
2957 /* And insert the popcount/clz/ctz builtin and cast stmt before the
2958 cond_bb. */
2959 gsi = gsi_last_bb (cond_bb);
2960 if (cast)
2962 gsi_from = gsi_for_stmt (cast);
2963 gsi_move_before (&gsi_from, &gsi);
2964 reset_flow_sensitive_info (gimple_get_lhs (cast));
2966 gsi_from = gsi_for_stmt (call);
2967 if (ifn == IFN_LAST || gimple_call_internal_p (call))
2968 gsi_move_before (&gsi_from, &gsi);
2969 else
2971 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
2972 the latter is well defined at zero. */
2973 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
2974 gimple_call_set_lhs (call, lhs);
2975 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2976 gsi_remove (&gsi_from, true);
2978 reset_flow_sensitive_info (lhs);
2980 /* Now update the PHI and remove unneeded bbs. */
2981 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
2982 return true;
2985 /* Auxiliary functions to determine the set of memory accesses which
2986 can't trap because they are preceded by accesses to the same memory
2987 portion. We do that for MEM_REFs, so we only need to track
2988 the SSA_NAME of the pointer indirectly referenced. The algorithm
2989 simply is a walk over all instructions in dominator order. When
2990 we see an MEM_REF we determine if we've already seen a same
2991 ref anywhere up to the root of the dominator tree. If we do the
2992 current access can't trap. If we don't see any dominating access
2993 the current access might trap, but might also make later accesses
2994 non-trapping, so we remember it. We need to be careful with loads
2995 or stores, for instance a load might not trap, while a store would,
2996 so if we see a dominating read access this doesn't mean that a later
2997 write access would not trap. Hence we also need to differentiate the
2998 type of access(es) seen.
3000 ??? We currently are very conservative and assume that a load might
3001 trap even if a store doesn't (write-only memory). This probably is
3002 overly conservative.
3004 We currently support a special case that for !TREE_ADDRESSABLE automatic
3005 variables, it could ignore whether something is a load or store because the
3006 local stack should be always writable. */
3008 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
3009 basic block an *_REF through it was seen, which would constitute a
3010 no-trap region for same accesses.
3012 Size is needed to support 2 MEM_REFs of different types, like
3013 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
3014 OEP_ADDRESS_OF. */
3015 struct ref_to_bb
3017 tree exp;
3018 HOST_WIDE_INT size;
3019 unsigned int phase;
3020 basic_block bb;
3023 /* Hashtable helpers. */
3025 struct refs_hasher : free_ptr_hash<ref_to_bb>
3027 static inline hashval_t hash (const ref_to_bb *);
3028 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
3031 /* Used for quick clearing of the hash-table when we see calls.
3032 Hash entries with phase < nt_call_phase are invalid. */
3033 static unsigned int nt_call_phase;
3035 /* The hash function. */
3037 inline hashval_t
3038 refs_hasher::hash (const ref_to_bb *n)
3040 inchash::hash hstate;
3041 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
3042 hstate.add_hwi (n->size);
3043 return hstate.end ();
3046 /* The equality function of *P1 and *P2. */
3048 inline bool
3049 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
3051 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
3052 && n1->size == n2->size;
3055 class nontrapping_dom_walker : public dom_walker
3057 public:
3058 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
3059 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
3062 edge before_dom_children (basic_block) final override;
3063 void after_dom_children (basic_block) final override;
3065 private:
3067 /* We see the expression EXP in basic block BB. If it's an interesting
3068 expression (an MEM_REF through an SSA_NAME) possibly insert the
3069 expression into the set NONTRAP or the hash table of seen expressions.
3070 STORE is true if this expression is on the LHS, otherwise it's on
3071 the RHS. */
3072 void add_or_mark_expr (basic_block, tree, bool);
3074 hash_set<tree> *m_nontrapping;
3076 /* The hash table for remembering what we've seen. */
3077 hash_table<refs_hasher> m_seen_refs;
3080 /* Called by walk_dominator_tree, when entering the block BB. */
3081 edge
3082 nontrapping_dom_walker::before_dom_children (basic_block bb)
3084 edge e;
3085 edge_iterator ei;
3086 gimple_stmt_iterator gsi;
3088 /* If we haven't seen all our predecessors, clear the hash-table. */
3089 FOR_EACH_EDGE (e, ei, bb->preds)
3090 if ((((size_t)e->src->aux) & 2) == 0)
3092 nt_call_phase++;
3093 break;
3096 /* Mark this BB as being on the path to dominator root and as visited. */
3097 bb->aux = (void*)(1 | 2);
3099 /* And walk the statements in order. */
3100 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3102 gimple *stmt = gsi_stmt (gsi);
3104 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
3105 || (is_gimple_call (stmt)
3106 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
3107 nt_call_phase++;
3108 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
3110 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
3111 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
3114 return NULL;
3117 /* Called by walk_dominator_tree, when basic block BB is exited. */
3118 void
3119 nontrapping_dom_walker::after_dom_children (basic_block bb)
3121 /* This BB isn't on the path to dominator root anymore. */
3122 bb->aux = (void*)2;
3125 /* We see the expression EXP in basic block BB. If it's an interesting
3126 expression of:
3127 1) MEM_REF
3128 2) ARRAY_REF
3129 3) COMPONENT_REF
3130 possibly insert the expression into the set NONTRAP or the hash table
3131 of seen expressions. STORE is true if this expression is on the LHS,
3132 otherwise it's on the RHS. */
3133 void
3134 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
3136 HOST_WIDE_INT size;
3138 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
3139 || TREE_CODE (exp) == COMPONENT_REF)
3140 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
3142 struct ref_to_bb map;
3143 ref_to_bb **slot;
3144 struct ref_to_bb *r2bb;
3145 basic_block found_bb = 0;
3147 if (!store)
3149 tree base = get_base_address (exp);
3150 /* Only record a LOAD of a local variable without address-taken, as
3151 the local stack is always writable. This allows cselim on a STORE
3152 with a dominating LOAD. */
3153 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
3154 return;
3157 /* Try to find the last seen *_REF, which can trap. */
3158 map.exp = exp;
3159 map.size = size;
3160 slot = m_seen_refs.find_slot (&map, INSERT);
3161 r2bb = *slot;
3162 if (r2bb && r2bb->phase >= nt_call_phase)
3163 found_bb = r2bb->bb;
3165 /* If we've found a trapping *_REF, _and_ it dominates EXP
3166 (it's in a basic block on the path from us to the dominator root)
3167 then we can't trap. */
3168 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
3170 m_nontrapping->add (exp);
3172 else
3174 /* EXP might trap, so insert it into the hash table. */
3175 if (r2bb)
3177 r2bb->phase = nt_call_phase;
3178 r2bb->bb = bb;
3180 else
3182 r2bb = XNEW (struct ref_to_bb);
3183 r2bb->phase = nt_call_phase;
3184 r2bb->bb = bb;
3185 r2bb->exp = exp;
3186 r2bb->size = size;
3187 *slot = r2bb;
3193 /* This is the entry point of gathering non trapping memory accesses.
3194 It will do a dominator walk over the whole function, and it will
3195 make use of the bb->aux pointers. It returns a set of trees
3196 (the MEM_REFs itself) which can't trap. */
3197 static hash_set<tree> *
3198 get_non_trapping (void)
3200 nt_call_phase = 0;
3201 hash_set<tree> *nontrap = new hash_set<tree>;
3203 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
3204 .walk (cfun->cfg->x_entry_block_ptr);
3206 clear_aux_for_blocks ();
3207 return nontrap;
3210 /* Do the main work of conditional store replacement. We already know
3211 that the recognized pattern looks like so:
3213 split:
3214 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
3215 MIDDLE_BB:
3216 something
3217 fallthrough (edge E0)
3218 JOIN_BB:
3219 some more
3221 We check that MIDDLE_BB contains only one store, that that store
3222 doesn't trap (not via NOTRAP, but via checking if an access to the same
3223 memory location dominates us, or the store is to a local addressable
3224 object) and that the store has a "simple" RHS. */
3226 static bool
3227 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
3228 edge e0, edge e1, hash_set<tree> *nontrap)
3230 gimple *assign = last_and_only_stmt (middle_bb);
3231 tree lhs, rhs, name, name2;
3232 gphi *newphi;
3233 gassign *new_stmt;
3234 gimple_stmt_iterator gsi;
3235 location_t locus;
3237 /* Check if middle_bb contains of only one store. */
3238 if (!assign
3239 || !gimple_assign_single_p (assign)
3240 || gimple_has_volatile_ops (assign))
3241 return false;
3243 /* And no PHI nodes so all uses in the single stmt are also
3244 available where we insert to. */
3245 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
3246 return false;
3248 locus = gimple_location (assign);
3249 lhs = gimple_assign_lhs (assign);
3250 rhs = gimple_assign_rhs1 (assign);
3251 if ((!REFERENCE_CLASS_P (lhs)
3252 && !DECL_P (lhs))
3253 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3254 return false;
3256 /* Prove that we can move the store down. We could also check
3257 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3258 whose value is not available readily, which we want to avoid. */
3259 if (!nontrap->contains (lhs))
3261 /* If LHS is an access to a local variable without address-taken
3262 (or when we allow data races) and known not to trap, we could
3263 always safely move down the store. */
3264 tree base = get_base_address (lhs);
3265 if (!auto_var_p (base)
3266 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
3267 || tree_could_trap_p (lhs))
3268 return false;
3271 /* Now we've checked the constraints, so do the transformation:
3272 1) Remove the single store. */
3273 gsi = gsi_for_stmt (assign);
3274 unlink_stmt_vdef (assign);
3275 gsi_remove (&gsi, true);
3276 release_defs (assign);
3278 /* Make both store and load use alias-set zero as we have to
3279 deal with the case of the store being a conditional change
3280 of the dynamic type. */
3281 lhs = unshare_expr (lhs);
3282 tree *basep = &lhs;
3283 while (handled_component_p (*basep))
3284 basep = &TREE_OPERAND (*basep, 0);
3285 if (TREE_CODE (*basep) == MEM_REF
3286 || TREE_CODE (*basep) == TARGET_MEM_REF)
3287 TREE_OPERAND (*basep, 1)
3288 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3289 else
3290 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3291 build_fold_addr_expr (*basep),
3292 build_zero_cst (ptr_type_node));
3294 /* 2) Insert a load from the memory of the store to the temporary
3295 on the edge which did not contain the store. */
3296 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3297 new_stmt = gimple_build_assign (name, lhs);
3298 gimple_set_location (new_stmt, locus);
3299 lhs = unshare_expr (lhs);
3301 /* Set the no-warning bit on the rhs of the load to avoid uninit
3302 warnings. */
3303 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3304 suppress_warning (rhs1, OPT_Wuninitialized);
3306 gsi_insert_on_edge (e1, new_stmt);
3308 /* 3) Create a PHI node at the join block, with one argument
3309 holding the old RHS, and the other holding the temporary
3310 where we stored the old memory contents. */
3311 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3312 newphi = create_phi_node (name2, join_bb);
3313 add_phi_arg (newphi, rhs, e0, locus);
3314 add_phi_arg (newphi, name, e1, locus);
3316 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3318 /* 4) Insert that PHI node. */
3319 gsi = gsi_after_labels (join_bb);
3320 if (gsi_end_p (gsi))
3322 gsi = gsi_last_bb (join_bb);
3323 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3325 else
3326 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3328 if (dump_file && (dump_flags & TDF_DETAILS))
3330 fprintf (dump_file, "\nConditional store replacement happened!");
3331 fprintf (dump_file, "\nReplaced the store with a load.");
3332 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3333 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3335 statistics_counter_event (cfun, "conditional store replacement", 1);
3337 return true;
3340 /* Do the main work of conditional store replacement. */
3342 static bool
3343 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3344 basic_block join_bb, gimple *then_assign,
3345 gimple *else_assign)
3347 tree lhs_base, lhs, then_rhs, else_rhs, name;
3348 location_t then_locus, else_locus;
3349 gimple_stmt_iterator gsi;
3350 gphi *newphi;
3351 gassign *new_stmt;
3353 if (then_assign == NULL
3354 || !gimple_assign_single_p (then_assign)
3355 || gimple_clobber_p (then_assign)
3356 || gimple_has_volatile_ops (then_assign)
3357 || else_assign == NULL
3358 || !gimple_assign_single_p (else_assign)
3359 || gimple_clobber_p (else_assign)
3360 || gimple_has_volatile_ops (else_assign))
3361 return false;
3363 lhs = gimple_assign_lhs (then_assign);
3364 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3365 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3366 return false;
3368 lhs_base = get_base_address (lhs);
3369 if (lhs_base == NULL_TREE
3370 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3371 return false;
3373 then_rhs = gimple_assign_rhs1 (then_assign);
3374 else_rhs = gimple_assign_rhs1 (else_assign);
3375 then_locus = gimple_location (then_assign);
3376 else_locus = gimple_location (else_assign);
3378 /* Now we've checked the constraints, so do the transformation:
3379 1) Remove the stores. */
3380 gsi = gsi_for_stmt (then_assign);
3381 unlink_stmt_vdef (then_assign);
3382 gsi_remove (&gsi, true);
3383 release_defs (then_assign);
3385 gsi = gsi_for_stmt (else_assign);
3386 unlink_stmt_vdef (else_assign);
3387 gsi_remove (&gsi, true);
3388 release_defs (else_assign);
3390 /* 2) Create a PHI node at the join block, with one argument
3391 holding the old RHS, and the other holding the temporary
3392 where we stored the old memory contents. */
3393 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3394 newphi = create_phi_node (name, join_bb);
3395 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3396 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3398 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3400 /* 3) Insert that PHI node. */
3401 gsi = gsi_after_labels (join_bb);
3402 if (gsi_end_p (gsi))
3404 gsi = gsi_last_bb (join_bb);
3405 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3407 else
3408 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3410 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3412 return true;
3415 /* Return the single store in BB with VDEF or NULL if there are
3416 other stores in the BB or loads following the store. */
3418 static gimple *
3419 single_trailing_store_in_bb (basic_block bb, tree vdef)
3421 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3422 return NULL;
3423 gimple *store = SSA_NAME_DEF_STMT (vdef);
3424 if (gimple_bb (store) != bb
3425 || gimple_code (store) == GIMPLE_PHI)
3426 return NULL;
3428 /* Verify there is no other store in this BB. */
3429 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3430 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3431 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3432 return NULL;
3434 /* Verify there is no load or store after the store. */
3435 use_operand_p use_p;
3436 imm_use_iterator imm_iter;
3437 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3438 if (USE_STMT (use_p) != store
3439 && gimple_bb (USE_STMT (use_p)) == bb)
3440 return NULL;
3442 return store;
3445 /* Conditional store replacement. We already know
3446 that the recognized pattern looks like so:
3448 split:
3449 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3450 THEN_BB:
3452 X = Y;
3454 goto JOIN_BB;
3455 ELSE_BB:
3457 X = Z;
3459 fallthrough (edge E0)
3460 JOIN_BB:
3461 some more
3463 We check that it is safe to sink the store to JOIN_BB by verifying that
3464 there are no read-after-write or write-after-write dependencies in
3465 THEN_BB and ELSE_BB. */
3467 static bool
3468 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3469 basic_block join_bb)
3471 vec<data_reference_p> then_datarefs, else_datarefs;
3472 vec<ddr_p> then_ddrs, else_ddrs;
3473 gimple *then_store, *else_store;
3474 bool found, ok = false, res;
3475 struct data_dependence_relation *ddr;
3476 data_reference_p then_dr, else_dr;
3477 int i, j;
3478 tree then_lhs, else_lhs;
3479 basic_block blocks[3];
3481 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3482 cheap enough to always handle as it allows us to elide dependence
3483 checking. */
3484 gphi *vphi = NULL;
3485 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3486 gsi_next (&si))
3487 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3489 vphi = si.phi ();
3490 break;
3492 if (!vphi)
3493 return false;
3494 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3495 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3496 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3497 if (then_assign)
3499 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3500 if (else_assign)
3501 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3502 then_assign, else_assign);
3505 /* If either vectorization or if-conversion is disabled then do
3506 not sink any stores. */
3507 if (param_max_stores_to_sink == 0
3508 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3509 || !flag_tree_loop_if_convert)
3510 return false;
3512 /* Find data references. */
3513 then_datarefs.create (1);
3514 else_datarefs.create (1);
3515 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3516 == chrec_dont_know)
3517 || !then_datarefs.length ()
3518 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3519 == chrec_dont_know)
3520 || !else_datarefs.length ())
3522 free_data_refs (then_datarefs);
3523 free_data_refs (else_datarefs);
3524 return false;
3527 /* Find pairs of stores with equal LHS. */
3528 auto_vec<gimple *, 1> then_stores, else_stores;
3529 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3531 if (DR_IS_READ (then_dr))
3532 continue;
3534 then_store = DR_STMT (then_dr);
3535 then_lhs = gimple_get_lhs (then_store);
3536 if (then_lhs == NULL_TREE)
3537 continue;
3538 found = false;
3540 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3542 if (DR_IS_READ (else_dr))
3543 continue;
3545 else_store = DR_STMT (else_dr);
3546 else_lhs = gimple_get_lhs (else_store);
3547 if (else_lhs == NULL_TREE)
3548 continue;
3550 if (operand_equal_p (then_lhs, else_lhs, 0))
3552 found = true;
3553 break;
3557 if (!found)
3558 continue;
3560 then_stores.safe_push (then_store);
3561 else_stores.safe_push (else_store);
3564 /* No pairs of stores found. */
3565 if (!then_stores.length ()
3566 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3568 free_data_refs (then_datarefs);
3569 free_data_refs (else_datarefs);
3570 return false;
3573 /* Compute and check data dependencies in both basic blocks. */
3574 then_ddrs.create (1);
3575 else_ddrs.create (1);
3576 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3577 vNULL, false)
3578 || !compute_all_dependences (else_datarefs, &else_ddrs,
3579 vNULL, false))
3581 free_dependence_relations (then_ddrs);
3582 free_dependence_relations (else_ddrs);
3583 free_data_refs (then_datarefs);
3584 free_data_refs (else_datarefs);
3585 return false;
3587 blocks[0] = then_bb;
3588 blocks[1] = else_bb;
3589 blocks[2] = join_bb;
3590 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3592 /* Check that there are no read-after-write or write-after-write dependencies
3593 in THEN_BB. */
3594 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3596 struct data_reference *dra = DDR_A (ddr);
3597 struct data_reference *drb = DDR_B (ddr);
3599 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3600 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3601 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3602 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3603 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3604 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3606 free_dependence_relations (then_ddrs);
3607 free_dependence_relations (else_ddrs);
3608 free_data_refs (then_datarefs);
3609 free_data_refs (else_datarefs);
3610 return false;
3614 /* Check that there are no read-after-write or write-after-write dependencies
3615 in ELSE_BB. */
3616 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3618 struct data_reference *dra = DDR_A (ddr);
3619 struct data_reference *drb = DDR_B (ddr);
3621 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3622 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3623 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3624 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3625 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3626 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3628 free_dependence_relations (then_ddrs);
3629 free_dependence_relations (else_ddrs);
3630 free_data_refs (then_datarefs);
3631 free_data_refs (else_datarefs);
3632 return false;
3636 /* Sink stores with same LHS. */
3637 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3639 else_store = else_stores[i];
3640 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3641 then_store, else_store);
3642 ok = ok || res;
3645 free_dependence_relations (then_ddrs);
3646 free_dependence_relations (else_ddrs);
3647 free_data_refs (then_datarefs);
3648 free_data_refs (else_datarefs);
3650 return ok;
3653 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3655 static bool
3656 local_mem_dependence (gimple *stmt, basic_block bb)
3658 tree vuse = gimple_vuse (stmt);
3659 gimple *def;
3661 if (!vuse)
3662 return false;
3664 def = SSA_NAME_DEF_STMT (vuse);
3665 return (def && gimple_bb (def) == bb);
3668 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3669 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3670 and BB3 rejoins control flow following BB1 and BB2, look for
3671 opportunities to hoist loads as follows. If BB3 contains a PHI of
3672 two loads, one each occurring in BB1 and BB2, and the loads are
3673 provably of adjacent fields in the same structure, then move both
3674 loads into BB0. Of course this can only be done if there are no
3675 dependencies preventing such motion.
3677 One of the hoisted loads will always be speculative, so the
3678 transformation is currently conservative:
3680 - The fields must be strictly adjacent.
3681 - The two fields must occupy a single memory block that is
3682 guaranteed to not cross a page boundary.
3684 The last is difficult to prove, as such memory blocks should be
3685 aligned on the minimum of the stack alignment boundary and the
3686 alignment guaranteed by heap allocation interfaces. Thus we rely
3687 on a parameter for the alignment value.
3689 Provided a good value is used for the last case, the first
3690 restriction could possibly be relaxed. */
3692 static void
3693 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3694 basic_block bb2, basic_block bb3)
3696 int param_align = param_l1_cache_line_size;
3697 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
3698 gphi_iterator gsi;
3700 /* Walk the phis in bb3 looking for an opportunity. We are looking
3701 for phis of two SSA names, one each of which is defined in bb1 and
3702 bb2. */
3703 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3705 gphi *phi_stmt = gsi.phi ();
3706 gimple *def1, *def2;
3707 tree arg1, arg2, ref1, ref2, field1, field2;
3708 tree tree_offset1, tree_offset2, tree_size2, next;
3709 int offset1, offset2, size2;
3710 unsigned align1;
3711 gimple_stmt_iterator gsi2;
3712 basic_block bb_for_def1, bb_for_def2;
3714 if (gimple_phi_num_args (phi_stmt) != 2
3715 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3716 continue;
3718 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3719 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3721 if (TREE_CODE (arg1) != SSA_NAME
3722 || TREE_CODE (arg2) != SSA_NAME
3723 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3724 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3725 continue;
3727 def1 = SSA_NAME_DEF_STMT (arg1);
3728 def2 = SSA_NAME_DEF_STMT (arg2);
3730 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3731 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3732 continue;
3734 /* Check the mode of the arguments to be sure a conditional move
3735 can be generated for it. */
3736 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3737 == CODE_FOR_nothing)
3738 continue;
3740 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3741 if (!gimple_assign_single_p (def1)
3742 || !gimple_assign_single_p (def2)
3743 || gimple_has_volatile_ops (def1)
3744 || gimple_has_volatile_ops (def2))
3745 continue;
3747 ref1 = gimple_assign_rhs1 (def1);
3748 ref2 = gimple_assign_rhs1 (def2);
3750 if (TREE_CODE (ref1) != COMPONENT_REF
3751 || TREE_CODE (ref2) != COMPONENT_REF)
3752 continue;
3754 /* The zeroth operand of the two component references must be
3755 identical. It is not sufficient to compare get_base_address of
3756 the two references, because this could allow for different
3757 elements of the same array in the two trees. It is not safe to
3758 assume that the existence of one array element implies the
3759 existence of a different one. */
3760 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3761 continue;
3763 field1 = TREE_OPERAND (ref1, 1);
3764 field2 = TREE_OPERAND (ref2, 1);
3766 /* Check for field adjacency, and ensure field1 comes first. */
3767 for (next = DECL_CHAIN (field1);
3768 next && TREE_CODE (next) != FIELD_DECL;
3769 next = DECL_CHAIN (next))
3772 if (next != field2)
3774 for (next = DECL_CHAIN (field2);
3775 next && TREE_CODE (next) != FIELD_DECL;
3776 next = DECL_CHAIN (next))
3779 if (next != field1)
3780 continue;
3782 std::swap (field1, field2);
3783 std::swap (def1, def2);
3786 bb_for_def1 = gimple_bb (def1);
3787 bb_for_def2 = gimple_bb (def2);
3789 /* Check for proper alignment of the first field. */
3790 tree_offset1 = bit_position (field1);
3791 tree_offset2 = bit_position (field2);
3792 tree_size2 = DECL_SIZE (field2);
3794 if (!tree_fits_uhwi_p (tree_offset1)
3795 || !tree_fits_uhwi_p (tree_offset2)
3796 || !tree_fits_uhwi_p (tree_size2))
3797 continue;
3799 offset1 = tree_to_uhwi (tree_offset1);
3800 offset2 = tree_to_uhwi (tree_offset2);
3801 size2 = tree_to_uhwi (tree_size2);
3802 align1 = DECL_ALIGN (field1) % param_align_bits;
3804 if (offset1 % BITS_PER_UNIT != 0)
3805 continue;
3807 /* For profitability, the two field references should fit within
3808 a single cache line. */
3809 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3810 continue;
3812 /* The two expressions cannot be dependent upon vdefs defined
3813 in bb1/bb2. */
3814 if (local_mem_dependence (def1, bb_for_def1)
3815 || local_mem_dependence (def2, bb_for_def2))
3816 continue;
3818 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3819 bb0. We hoist the first one first so that a cache miss is handled
3820 efficiently regardless of hardware cache-fill policy. */
3821 gsi2 = gsi_for_stmt (def1);
3822 gsi_move_to_bb_end (&gsi2, bb0);
3823 gsi2 = gsi_for_stmt (def2);
3824 gsi_move_to_bb_end (&gsi2, bb0);
3825 statistics_counter_event (cfun, "hoisted loads", 1);
3827 if (dump_file && (dump_flags & TDF_DETAILS))
3829 fprintf (dump_file,
3830 "\nHoisting adjacent loads from %d and %d into %d: \n",
3831 bb_for_def1->index, bb_for_def2->index, bb0->index);
3832 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3833 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3838 /* Determine whether we should attempt to hoist adjacent loads out of
3839 diamond patterns in pass_phiopt. Always hoist loads if
3840 -fhoist-adjacent-loads is specified and the target machine has
3841 both a conditional move instruction and a defined cache line size. */
3843 static bool
3844 gate_hoist_loads (void)
3846 return (flag_hoist_adjacent_loads == 1
3847 && param_l1_cache_line_size
3848 && HAVE_conditional_move);
3851 /* This pass tries to replaces an if-then-else block with an
3852 assignment. We have different kinds of transformations.
3853 Some of these transformations are also performed by the ifcvt
3854 RTL optimizer.
3856 PHI-OPT using Match-and-simplify infrastructure
3857 -----------------------
3859 The PHI-OPT pass will try to use match-and-simplify infrastructure
3860 (gimple_simplify) to do transformations. This is implemented in
3861 match_simplify_replacement.
3863 The way it works is it replaces:
3864 bb0:
3865 if (cond) goto bb2; else goto bb1;
3866 bb1:
3867 bb2:
3868 x = PHI <a (bb1), b (bb0), ...>;
3870 with a statement if it gets simplified from `cond ? b : a`.
3872 bb0:
3873 x1 = cond ? b : a;
3874 bb2:
3875 x = PHI <a (bb1), x1 (bb0), ...>;
3876 Bb1 might be removed as it becomes unreachable when doing the replacement.
3877 Though bb1 does not have to be considered a forwarding basic block from bb0.
3879 Will try to see if `(!cond) ? a : b` gets simplified (iff !cond simplifies);
3880 this is done not to have an explosion of patterns in match.pd.
3881 Note bb1 does not need to be completely empty, it can contain
3882 one statement which is known not to trap.
3884 It also can handle the case where we have two forwarding bbs (diamond):
3885 bb0:
3886 if (cond) goto bb2; else goto bb1;
3887 bb1: goto bb3;
3888 bb2: goto bb3;
3889 bb3:
3890 x = PHI <a (bb1), b (bb2), ...>;
3891 And that is replaced with a statement if it is simplified
3892 from `cond ? b : a`.
3893 Again bb1 and bb2 does not have to be completely empty but
3894 each can contain one statement which is known not to trap.
3895 But in this case bb1/bb2 can only be forwarding basic blocks.
3897 This fully replaces the old "Conditional Replacement",
3898 "ABS Replacement" transformations as they are now
3899 implmeneted in match.pd.
3900 Some parts of the "MIN/MAX Replacement" are re-implemented in match.pd.
3902 Value Replacement
3903 -----------------
3905 This transformation, implemented in value_replacement, replaces
3907 bb0:
3908 if (a != b) goto bb2; else goto bb1;
3909 bb1:
3910 bb2:
3911 x = PHI <a (bb1), b (bb0), ...>;
3913 with
3915 bb0:
3916 bb2:
3917 x = PHI <b (bb0), ...>;
3919 This opportunity can sometimes occur as a result of other
3920 optimizations.
3923 Another case caught by value replacement looks like this:
3925 bb0:
3926 t1 = a == CONST;
3927 t2 = b > c;
3928 t3 = t1 & t2;
3929 if (t3 != 0) goto bb1; else goto bb2;
3930 bb1:
3931 bb2:
3932 x = PHI (CONST, a)
3934 Gets replaced with:
3935 bb0:
3936 bb2:
3937 t1 = a == CONST;
3938 t2 = b > c;
3939 t3 = t1 & t2;
3940 x = a;
3942 MIN/MAX Replacement
3943 -------------------
3945 This transformation, minmax_replacement replaces
3947 bb0:
3948 if (a <= b) goto bb2; else goto bb1;
3949 bb1:
3950 bb2:
3951 x = PHI <b (bb1), a (bb0), ...>;
3953 with
3955 bb0:
3956 x' = MIN_EXPR (a, b)
3957 bb2:
3958 x = PHI <x' (bb0), ...>;
3960 A similar transformation is done for MAX_EXPR.
3963 This pass also performs a fifth transformation of a slightly different
3964 flavor.
3966 Factor operations in COND_EXPR
3967 ------------------------------
3969 This transformation factors the unary operations out of COND_EXPR with
3970 factor_out_conditional_operation.
3972 For example:
3973 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3974 <bb 3>:
3975 tmp = (int) a;
3976 <bb 4>:
3977 tmp = PHI <tmp, CST>
3979 Into:
3980 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3981 <bb 3>:
3982 <bb 4>:
3983 a = PHI <a, CST>
3984 tmp = (int) a;
3986 Adjacent Load Hoisting
3987 ----------------------
3989 This transformation replaces
3991 bb0:
3992 if (...) goto bb2; else goto bb1;
3993 bb1:
3994 x1 = (<expr>).field1;
3995 goto bb3;
3996 bb2:
3997 x2 = (<expr>).field2;
3998 bb3:
3999 # x = PHI <x1, x2>;
4001 with
4003 bb0:
4004 x1 = (<expr>).field1;
4005 x2 = (<expr>).field2;
4006 if (...) goto bb2; else goto bb1;
4007 bb1:
4008 goto bb3;
4009 bb2:
4010 bb3:
4011 # x = PHI <x1, x2>;
4013 The purpose of this transformation is to enable generation of conditional
4014 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
4015 the loads is speculative, the transformation is restricted to very
4016 specific cases to avoid introducing a page fault. We are looking for
4017 the common idiom:
4019 if (...)
4020 x = y->left;
4021 else
4022 x = y->right;
4024 where left and right are typically adjacent pointers in a tree structure. */
4026 namespace {
4028 const pass_data pass_data_phiopt =
4030 GIMPLE_PASS, /* type */
4031 "phiopt", /* name */
4032 OPTGROUP_NONE, /* optinfo_flags */
4033 TV_TREE_PHIOPT, /* tv_id */
4034 ( PROP_cfg | PROP_ssa ), /* properties_required */
4035 0, /* properties_provided */
4036 0, /* properties_destroyed */
4037 0, /* todo_flags_start */
4038 0, /* todo_flags_finish */
4041 class pass_phiopt : public gimple_opt_pass
4043 public:
4044 pass_phiopt (gcc::context *ctxt)
4045 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
4048 /* opt_pass methods: */
4049 opt_pass * clone () final override { return new pass_phiopt (m_ctxt); }
4050 void set_pass_param (unsigned n, bool param) final override
4052 gcc_assert (n == 0);
4053 early_p = param;
4055 bool gate (function *) final override { return flag_ssa_phiopt; }
4056 unsigned int execute (function *) final override;
4058 private:
4059 bool early_p;
4060 }; // class pass_phiopt
4062 } // anon namespace
4064 gimple_opt_pass *
4065 make_pass_phiopt (gcc::context *ctxt)
4067 return new pass_phiopt (ctxt);
4070 unsigned int
4071 pass_phiopt::execute (function *)
4073 bool do_hoist_loads = !early_p ? gate_hoist_loads () : false;
4074 basic_block bb;
4075 basic_block *bb_order;
4076 unsigned n, i;
4077 bool cfgchanged = false;
4079 calculate_dominance_info (CDI_DOMINATORS);
4080 mark_ssa_maybe_undefs ();
4082 /* Search every basic block for COND_EXPR we may be able to optimize.
4084 We walk the blocks in order that guarantees that a block with
4085 a single predecessor is processed before the predecessor.
4086 This ensures that we collapse inner ifs before visiting the
4087 outer ones, and also that we do not try to visit a removed
4088 block. */
4089 bb_order = single_pred_before_succ_order ();
4090 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4092 for (i = 0; i < n; i++)
4094 gphi *phi;
4095 basic_block bb1, bb2;
4096 edge e1, e2;
4097 tree arg0, arg1;
4098 bool diamond_p = false;
4100 bb = bb_order[i];
4102 /* Check to see if the last statement is a GIMPLE_COND. */
4103 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4104 if (!cond_stmt)
4105 continue;
4107 e1 = EDGE_SUCC (bb, 0);
4108 bb1 = e1->dest;
4109 e2 = EDGE_SUCC (bb, 1);
4110 bb2 = e2->dest;
4112 /* We cannot do the optimization on abnormal edges. */
4113 if ((e1->flags & EDGE_ABNORMAL) != 0
4114 || (e2->flags & EDGE_ABNORMAL) != 0)
4115 continue;
4117 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4118 if (EDGE_COUNT (bb1->succs) == 0
4119 || EDGE_COUNT (bb2->succs) == 0)
4120 continue;
4122 /* Find the bb which is the fall through to the other. */
4123 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4125 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4127 std::swap (bb1, bb2);
4128 std::swap (e1, e2);
4130 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4131 && single_succ_p (bb2))
4133 diamond_p = true;
4134 e2 = EDGE_SUCC (bb2, 0);
4135 /* Make sure bb2 is just a fall through. */
4136 if ((e2->flags & EDGE_FALLTHRU) == 0)
4137 continue;
4139 else
4140 continue;
4142 e1 = EDGE_SUCC (bb1, 0);
4144 /* Make sure that bb1 is just a fall through. */
4145 if (!single_succ_p (bb1)
4146 || (e1->flags & EDGE_FALLTHRU) == 0)
4147 continue;
4149 if (diamond_p)
4151 basic_block bb3 = e1->dest;
4153 if (!single_pred_p (bb1)
4154 || !single_pred_p (bb2))
4155 continue;
4157 if (do_hoist_loads
4158 && !FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
4159 && EDGE_COUNT (bb->succs) == 2
4160 && EDGE_COUNT (bb3->preds) == 2
4161 /* If one edge or the other is dominant, a conditional move
4162 is likely to perform worse than the well-predicted branch. */
4163 && !predictable_edge_p (EDGE_SUCC (bb, 0))
4164 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
4165 hoist_adjacent_loads (bb, bb1, bb2, bb3);
4168 gimple_stmt_iterator gsi;
4169 bool candorest = true;
4171 /* Check that we're looking for nested phis. */
4172 basic_block merge = diamond_p ? EDGE_SUCC (bb2, 0)->dest : bb2;
4173 gimple_seq phis = phi_nodes (merge);
4175 /* Value replacement can work with more than one PHI
4176 so try that first. */
4177 if (!early_p && !diamond_p)
4178 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4180 phi = as_a <gphi *> (gsi_stmt (gsi));
4181 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4182 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4183 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
4185 candorest = false;
4186 cfgchanged = true;
4187 break;
4191 if (!candorest)
4192 continue;
4194 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
4195 if (!phi)
4196 continue;
4198 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4199 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4201 /* Something is wrong if we cannot find the arguments in the PHI
4202 node. */
4203 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4205 if (single_pred_p (bb1)
4206 && EDGE_COUNT (merge->preds) == 2)
4208 gphi *newphi = phi;
4209 while (newphi)
4211 phi = newphi;
4212 /* factor_out_conditional_operation may create a new PHI in
4213 BB2 and eliminate an existing PHI in BB2. Recompute values
4214 that may be affected by that change. */
4215 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4216 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4217 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4218 newphi = factor_out_conditional_operation (e1, e2, phi,
4219 arg0, arg1,
4220 cond_stmt);
4224 /* Do the replacement of conditional if it can be done. */
4225 if (match_simplify_replacement (bb, bb1, bb2, e1, e2, phi,
4226 arg0, arg1, early_p, diamond_p))
4227 cfgchanged = true;
4228 else if (!early_p
4229 && !diamond_p
4230 && single_pred_p (bb1)
4231 && cond_removal_in_builtin_zero_pattern (bb, bb1, e1, e2,
4232 phi, arg0, arg1))
4233 cfgchanged = true;
4234 else if (minmax_replacement (bb, bb1, bb2, e1, e2, phi, arg0, arg1,
4235 diamond_p))
4236 cfgchanged = true;
4237 else if (single_pred_p (bb1)
4238 && !diamond_p
4239 && spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
4240 cfgchanged = true;
4243 free (bb_order);
4245 if (cfgchanged)
4246 return TODO_cleanup_cfg;
4247 return 0;
4250 /* This pass tries to transform conditional stores into unconditional
4251 ones, enabling further simplifications with the simpler then and else
4252 blocks. In particular it replaces this:
4254 bb0:
4255 if (cond) goto bb2; else goto bb1;
4256 bb1:
4257 *p = RHS;
4258 bb2:
4260 with
4262 bb0:
4263 if (cond) goto bb1; else goto bb2;
4264 bb1:
4265 condtmp' = *p;
4266 bb2:
4267 condtmp = PHI <RHS, condtmp'>
4268 *p = condtmp;
4270 This transformation can only be done under several constraints,
4271 documented below. It also replaces:
4273 bb0:
4274 if (cond) goto bb2; else goto bb1;
4275 bb1:
4276 *p = RHS1;
4277 goto bb3;
4278 bb2:
4279 *p = RHS2;
4280 bb3:
4282 with
4284 bb0:
4285 if (cond) goto bb3; else goto bb1;
4286 bb1:
4287 bb3:
4288 condtmp = PHI <RHS1, RHS2>
4289 *p = condtmp; */
4291 namespace {
4293 const pass_data pass_data_cselim =
4295 GIMPLE_PASS, /* type */
4296 "cselim", /* name */
4297 OPTGROUP_NONE, /* optinfo_flags */
4298 TV_TREE_PHIOPT, /* tv_id */
4299 ( PROP_cfg | PROP_ssa ), /* properties_required */
4300 0, /* properties_provided */
4301 0, /* properties_destroyed */
4302 0, /* todo_flags_start */
4303 0, /* todo_flags_finish */
4306 class pass_cselim : public gimple_opt_pass
4308 public:
4309 pass_cselim (gcc::context *ctxt)
4310 : gimple_opt_pass (pass_data_cselim, ctxt)
4313 /* opt_pass methods: */
4314 bool gate (function *) final override { return flag_tree_cselim; }
4315 unsigned int execute (function *) final override;
4317 }; // class pass_cselim
4319 } // anon namespace
4321 gimple_opt_pass *
4322 make_pass_cselim (gcc::context *ctxt)
4324 return new pass_cselim (ctxt);
4327 unsigned int
4328 pass_cselim::execute (function *)
4330 basic_block bb;
4331 basic_block *bb_order;
4332 unsigned n, i;
4333 bool cfgchanged = false;
4334 hash_set<tree> *nontrap = 0;
4335 unsigned todo = 0;
4337 /* ??? We are not interested in loop related info, but the following
4338 will create it, ICEing as we didn't init loops with pre-headers.
4339 An interfacing issue of find_data_references_in_bb. */
4340 loop_optimizer_init (LOOPS_NORMAL);
4341 scev_initialize ();
4343 calculate_dominance_info (CDI_DOMINATORS);
4345 /* Calculate the set of non-trapping memory accesses. */
4346 nontrap = get_non_trapping ();
4348 /* Search every basic block for COND_EXPR we may be able to optimize.
4350 We walk the blocks in order that guarantees that a block with
4351 a single predecessor is processed before the predecessor.
4352 This ensures that we collapse inner ifs before visiting the
4353 outer ones, and also that we do not try to visit a removed
4354 block. */
4355 bb_order = single_pred_before_succ_order ();
4356 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4358 for (i = 0; i < n; i++)
4360 basic_block bb1, bb2;
4361 edge e1, e2;
4362 bool diamond_p = false;
4364 bb = bb_order[i];
4366 /* Check to see if the last statement is a GIMPLE_COND. */
4367 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4368 if (!cond_stmt)
4369 continue;
4371 e1 = EDGE_SUCC (bb, 0);
4372 bb1 = e1->dest;
4373 e2 = EDGE_SUCC (bb, 1);
4374 bb2 = e2->dest;
4376 /* We cannot do the optimization on abnormal edges. */
4377 if ((e1->flags & EDGE_ABNORMAL) != 0
4378 || (e2->flags & EDGE_ABNORMAL) != 0)
4379 continue;
4381 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4382 if (EDGE_COUNT (bb1->succs) == 0
4383 || EDGE_COUNT (bb2->succs) == 0)
4384 continue;
4386 /* Find the bb which is the fall through to the other. */
4387 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4389 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4391 std::swap (bb1, bb2);
4392 std::swap (e1, e2);
4394 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4395 && single_succ_p (bb2))
4397 diamond_p = true;
4398 e2 = EDGE_SUCC (bb2, 0);
4399 /* Make sure bb2 is just a fall through. */
4400 if ((e2->flags & EDGE_FALLTHRU) == 0)
4401 continue;
4403 else
4404 continue;
4406 e1 = EDGE_SUCC (bb1, 0);
4408 /* Make sure that bb1 is just a fall through. */
4409 if (!single_succ_p (bb1)
4410 || (e1->flags & EDGE_FALLTHRU) == 0)
4411 continue;
4413 if (diamond_p)
4415 basic_block bb3 = e1->dest;
4417 /* Only handle sinking of store from 2 bbs only,
4418 The middle bbs don't need to come from the
4419 if always since we are sinking rather than
4420 hoisting. */
4421 if (EDGE_COUNT (bb3->preds) != 2)
4422 continue;
4423 if (cond_if_else_store_replacement (bb1, bb2, bb3))
4424 cfgchanged = true;
4425 continue;
4428 /* Also make sure that bb1 only have one predecessor and that it
4429 is bb. */
4430 if (!single_pred_p (bb1)
4431 || single_pred (bb1) != bb)
4432 continue;
4434 /* bb1 is the middle block, bb2 the join block, bb the split block,
4435 e1 the fallthrough edge from bb1 to bb2. We can't do the
4436 optimization if the join block has more than two predecessors. */
4437 if (EDGE_COUNT (bb2->preds) > 2)
4438 continue;
4439 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
4440 cfgchanged = true;
4443 free (bb_order);
4445 delete nontrap;
4446 /* If the CFG has changed, we should cleanup the CFG. */
4447 if (cfgchanged)
4449 /* In cond-store replacement we have added some loads on edges
4450 and new VOPS (as we moved the store, and created a load). */
4451 gsi_commit_edge_inserts ();
4452 todo = TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4454 scev_finalize ();
4455 loop_optimizer_finalize ();
4456 return todo;