Daily bump.
[official-gcc.git] / gcc / tree-ssa-phiopt.cc
blob65f63eb0652e0044efea07a9f5ebe9b8a0536d06
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "tree-ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-dfa.h"
43 #include "domwalk.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
49 #include "tree-eh.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
54 #include "dbgcnt.h"
55 #include "tree-ssa-propagate.h"
56 #include "tree-ssa-dce.h"
58 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
60 static gphi *
61 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
63 gimple_stmt_iterator i;
64 gphi *phi = NULL;
65 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
67 gphi *p = as_a <gphi *> (gsi_stmt (i));
68 /* If the PHI arguments are equal then we can skip this PHI. */
69 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
70 gimple_phi_arg_def (p, e1->dest_idx)))
71 continue;
73 /* Punt on virtual phis with different arguments from the edges. */
74 if (virtual_operand_p (gimple_phi_result (p)))
75 return NULL;
77 /* If we already have a PHI that has the two edge arguments are
78 different, then return it is not a singleton for these PHIs. */
79 if (phi)
80 return NULL;
82 phi = p;
84 return phi;
87 /* Replace PHI node element whose edge is E in block BB with variable NEW.
88 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
89 is known to have two edges, one of which must reach BB). */
91 static void
92 replace_phi_edge_with_variable (basic_block cond_block,
93 edge e, gphi *phi, tree new_tree,
94 bitmap dce_ssa_names = nullptr)
96 basic_block bb = gimple_bb (phi);
97 gimple_stmt_iterator gsi;
98 tree phi_result = PHI_RESULT (phi);
99 bool deleteboth = false;
101 /* Duplicate range info if they are the only things setting the target PHI.
102 This is needed as later on, the new_tree will be replacing
103 The assignement of the PHI.
104 For an example:
105 bb1:
106 _4 = min<a_1, 255>
107 goto bb2
109 # RANGE [-INF, 255]
110 a_3 = PHI<_4(1)>
111 bb3:
113 use(a_3)
114 And _4 gets propagated into the use of a_3 and losing the range info.
115 This can't be done for more than 2 incoming edges as the propagation
116 won't happen.
117 The new_tree needs to be defined in the same basic block as the conditional. */
118 if (TREE_CODE (new_tree) == SSA_NAME
119 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
120 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
121 && !SSA_NAME_RANGE_INFO (new_tree)
122 && SSA_NAME_RANGE_INFO (phi_result)
123 && gimple_bb (SSA_NAME_DEF_STMT (new_tree)) == cond_block
124 && dbg_cnt (phiopt_edge_range))
125 duplicate_ssa_name_range_info (new_tree, phi_result);
127 /* Change the PHI argument to new. */
128 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
130 /* Remove the empty basic block. */
131 edge edge_to_remove = NULL, keep_edge = NULL;
132 if (EDGE_SUCC (cond_block, 0)->dest == bb)
134 edge_to_remove = EDGE_SUCC (cond_block, 1);
135 keep_edge = EDGE_SUCC (cond_block, 0);
137 else if (EDGE_SUCC (cond_block, 1)->dest == bb)
139 edge_to_remove = EDGE_SUCC (cond_block, 0);
140 keep_edge = EDGE_SUCC (cond_block, 1);
142 else if ((keep_edge = find_edge (cond_block, e->src)))
144 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
145 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
146 if (single_pred_p (bb1) && single_pred_p (bb2)
147 && single_succ_p (bb1) && single_succ_p (bb2)
148 && empty_block_p (bb1) && empty_block_p (bb2))
149 deleteboth = true;
151 else
152 gcc_unreachable ();
154 if (edge_to_remove && EDGE_COUNT (edge_to_remove->dest->preds) == 1)
156 e->flags |= EDGE_FALLTHRU;
157 e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
158 e->probability = profile_probability::always ();
159 delete_basic_block (edge_to_remove->dest);
161 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
162 gsi = gsi_last_bb (cond_block);
163 gsi_remove (&gsi, true);
165 else if (deleteboth)
167 basic_block bb1 = EDGE_SUCC (cond_block, 0)->dest;
168 basic_block bb2 = EDGE_SUCC (cond_block, 1)->dest;
170 edge newedge = redirect_edge_and_branch (keep_edge, bb);
172 /* The new edge should be the same. */
173 gcc_assert (newedge == keep_edge);
175 keep_edge->flags |= EDGE_FALLTHRU;
176 keep_edge->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
177 keep_edge->probability = profile_probability::always ();
179 /* Copy the edge's phi entry from the old one. */
180 copy_phi_arg_into_existing_phi (e, keep_edge);
182 /* Delete the old 2 empty basic blocks */
183 delete_basic_block (bb1);
184 delete_basic_block (bb2);
186 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
187 gsi = gsi_last_bb (cond_block);
188 gsi_remove (&gsi, true);
190 else
192 /* If there are other edges into the middle block make
193 CFG cleanup deal with the edge removal to avoid
194 updating dominators here in a non-trivial way. */
195 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_block));
196 if (keep_edge->flags & EDGE_FALSE_VALUE)
197 gimple_cond_make_false (cond);
198 else if (keep_edge->flags & EDGE_TRUE_VALUE)
199 gimple_cond_make_true (cond);
202 if (dce_ssa_names)
203 simple_dce_from_worklist (dce_ssa_names);
205 statistics_counter_event (cfun, "Replace PHI with variable", 1);
207 if (dump_file && (dump_flags & TDF_DETAILS))
208 fprintf (dump_file,
209 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
210 cond_block->index,
211 bb->index);
214 /* PR66726: Factor operations out of COND_EXPR. If the arguments of the PHI
215 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
216 to the result of PHI stmt. COND_STMT is the controlling predicate.
217 Return the newly-created PHI, if any. */
219 static gphi *
220 factor_out_conditional_operation (edge e0, edge e1, gphi *phi,
221 tree arg0, tree arg1, gimple *cond_stmt)
223 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
224 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
225 tree temp, result;
226 gphi *newphi;
227 gimple_stmt_iterator gsi, gsi_for_def;
228 location_t locus = gimple_location (phi);
229 enum tree_code op_code;
231 /* Handle only PHI statements with two arguments. TODO: If all
232 other arguments to PHI are INTEGER_CST or if their defining
233 statement have the same unary operation, we can handle more
234 than two arguments too. */
235 if (gimple_phi_num_args (phi) != 2)
236 return NULL;
238 /* First canonicalize to simplify tests. */
239 if (TREE_CODE (arg0) != SSA_NAME)
241 std::swap (arg0, arg1);
242 std::swap (e0, e1);
245 if (TREE_CODE (arg0) != SSA_NAME
246 || (TREE_CODE (arg1) != SSA_NAME
247 && TREE_CODE (arg1) != INTEGER_CST))
248 return NULL;
250 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
251 an unary operation. */
252 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
253 if (!is_gimple_assign (arg0_def_stmt)
254 || (gimple_assign_rhs_class (arg0_def_stmt) != GIMPLE_UNARY_RHS
255 && gimple_assign_rhs_code (arg0_def_stmt) != VIEW_CONVERT_EXPR))
256 return NULL;
258 /* Use the RHS as new_arg0. */
259 op_code = gimple_assign_rhs_code (arg0_def_stmt);
260 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
261 if (op_code == VIEW_CONVERT_EXPR)
263 new_arg0 = TREE_OPERAND (new_arg0, 0);
264 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
265 return NULL;
267 if (TREE_CODE (new_arg0) == SSA_NAME
268 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
269 return NULL;
271 if (TREE_CODE (arg1) == SSA_NAME)
273 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
274 is an unary operation. */
275 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
276 if (!is_gimple_assign (arg1_def_stmt)
277 || gimple_assign_rhs_code (arg1_def_stmt) != op_code)
278 return NULL;
280 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
281 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
282 && dominated_by_p (CDI_DOMINATORS,
283 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
284 return NULL;
286 /* Use the RHS as new_arg1. */
287 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
288 if (op_code == VIEW_CONVERT_EXPR)
289 new_arg1 = TREE_OPERAND (new_arg1, 0);
290 if (TREE_CODE (new_arg1) == SSA_NAME
291 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
292 return NULL;
294 else
296 /* TODO: handle more than just casts here. */
297 if (!gimple_assign_cast_p (arg0_def_stmt))
298 return NULL;
300 /* arg0_def_stmt should be conditional. */
301 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
302 return NULL;
303 /* If arg1 is an INTEGER_CST, fold it to new type. */
304 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
305 && (int_fits_type_p (arg1, TREE_TYPE (new_arg0))
306 || (TYPE_PRECISION (TREE_TYPE (new_arg0))
307 == TYPE_PRECISION (TREE_TYPE (arg1)))))
309 if (gimple_assign_cast_p (arg0_def_stmt))
311 /* For the INTEGER_CST case, we are just moving the
312 conversion from one place to another, which can often
313 hurt as the conversion moves further away from the
314 statement that computes the value. So, perform this
315 only if new_arg0 is an operand of COND_STMT, or
316 if arg0_def_stmt is the only non-debug stmt in
317 its basic block, because then it is possible this
318 could enable further optimizations (minmax replacement
319 etc.). See PR71016.
320 Note no-op conversions don't have this issue as
321 it will not generate any zero/sign extend in that case. */
322 if ((TYPE_PRECISION (TREE_TYPE (new_arg0))
323 != TYPE_PRECISION (TREE_TYPE (arg1)))
324 && new_arg0 != gimple_cond_lhs (cond_stmt)
325 && new_arg0 != gimple_cond_rhs (cond_stmt)
326 && gimple_bb (arg0_def_stmt) == e0->src)
328 gsi = gsi_for_stmt (arg0_def_stmt);
329 gsi_prev_nondebug (&gsi);
330 if (!gsi_end_p (gsi))
332 if (gassign *assign
333 = dyn_cast <gassign *> (gsi_stmt (gsi)))
335 tree lhs = gimple_assign_lhs (assign);
336 enum tree_code ass_code
337 = gimple_assign_rhs_code (assign);
338 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
339 return NULL;
340 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
341 return NULL;
342 gsi_prev_nondebug (&gsi);
343 if (!gsi_end_p (gsi))
344 return NULL;
346 else
347 return NULL;
349 gsi = gsi_for_stmt (arg0_def_stmt);
350 gsi_next_nondebug (&gsi);
351 if (!gsi_end_p (gsi))
352 return NULL;
354 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
356 /* Drop the overlow that fold_convert might add. */
357 if (TREE_OVERFLOW (new_arg1))
358 new_arg1 = drop_tree_overflow (new_arg1);
360 else
361 return NULL;
363 else
364 return NULL;
367 /* If arg0/arg1 have > 1 use, then this transformation actually increases
368 the number of expressions evaluated at runtime. */
369 if (!has_single_use (arg0)
370 || (arg1_def_stmt && !has_single_use (arg1)))
371 return NULL;
373 /* If types of new_arg0 and new_arg1 are different bailout. */
374 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
375 return NULL;
377 /* Create a new PHI stmt. */
378 result = PHI_RESULT (phi);
379 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
380 newphi = create_phi_node (temp, gimple_bb (phi));
382 if (dump_file && (dump_flags & TDF_DETAILS))
384 fprintf (dump_file, "PHI ");
385 print_generic_expr (dump_file, gimple_phi_result (phi));
386 fprintf (dump_file,
387 " changed to factor operation out from COND_EXPR.\n");
388 fprintf (dump_file, "New stmt with OPERATION that defines ");
389 print_generic_expr (dump_file, result);
390 fprintf (dump_file, ".\n");
393 /* Remove the old operation(s) that has single use. */
394 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
395 gsi_remove (&gsi_for_def, true);
396 release_defs (arg0_def_stmt);
398 if (arg1_def_stmt)
400 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
401 gsi_remove (&gsi_for_def, true);
402 release_defs (arg1_def_stmt);
405 add_phi_arg (newphi, new_arg0, e0, locus);
406 add_phi_arg (newphi, new_arg1, e1, locus);
408 /* Create the operation stmt and insert it. */
409 if (op_code == VIEW_CONVERT_EXPR)
411 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
412 new_stmt = gimple_build_assign (result, temp);
414 else
415 new_stmt = gimple_build_assign (result, op_code, temp);
416 gsi = gsi_after_labels (gimple_bb (phi));
417 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
419 /* Remove the original PHI stmt. */
420 gsi = gsi_for_stmt (phi);
421 gsi_remove (&gsi, true);
423 statistics_counter_event (cfun, "factored out operation", 1);
425 return newphi;
429 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
430 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
431 static bool
432 phiopt_early_allow (gimple_seq &seq, gimple_match_op &op)
434 /* Don't allow functions. */
435 if (!op.code.is_tree_code ())
436 return false;
437 tree_code code = (tree_code)op.code;
439 /* For non-empty sequence, only allow one statement
440 except for MIN/MAX, allow max 2 statements,
441 each with MIN/MAX. */
442 if (!gimple_seq_empty_p (seq))
444 if (code == MIN_EXPR || code == MAX_EXPR)
446 if (!gimple_seq_singleton_p (seq))
447 return false;
449 gimple *stmt = gimple_seq_first_stmt (seq);
450 /* Only allow assignments. */
451 if (!is_gimple_assign (stmt))
452 return false;
453 code = gimple_assign_rhs_code (stmt);
454 return code == MIN_EXPR || code == MAX_EXPR;
456 /* Check to make sure op was already a SSA_NAME. */
457 if (code != SSA_NAME)
458 return false;
459 if (!gimple_seq_singleton_p (seq))
460 return false;
461 gimple *stmt = gimple_seq_first_stmt (seq);
462 /* Only allow assignments. */
463 if (!is_gimple_assign (stmt))
464 return false;
465 if (gimple_assign_lhs (stmt) != op.ops[0])
466 return false;
467 code = gimple_assign_rhs_code (stmt);
470 switch (code)
472 case MIN_EXPR:
473 case MAX_EXPR:
474 case ABS_EXPR:
475 case ABSU_EXPR:
476 case NEGATE_EXPR:
477 case SSA_NAME:
478 return true;
479 case INTEGER_CST:
480 case REAL_CST:
481 case VECTOR_CST:
482 case FIXED_CST:
483 return true;
484 default:
485 return false;
489 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
490 Return NULL if nothing can be simplified or the resulting simplified value
491 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
492 if EARLY_P is set.
493 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
494 to simplify CMP ? ARG0 : ARG1.
495 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
496 static tree
497 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
498 tree arg0, tree arg1,
499 gimple_seq *seq)
501 gimple_seq seq1 = NULL;
502 enum tree_code comp_code = gimple_cond_code (comp_stmt);
503 location_t loc = gimple_location (comp_stmt);
504 tree cmp0 = gimple_cond_lhs (comp_stmt);
505 tree cmp1 = gimple_cond_rhs (comp_stmt);
506 /* To handle special cases like floating point comparison, it is easier and
507 less error-prone to build a tree and gimplify it on the fly though it is
508 less efficient.
509 Don't use fold_build2 here as that might create (bool)a instead of just
510 "a != 0". */
511 tree cond = build2_loc (loc, comp_code, boolean_type_node,
512 cmp0, cmp1);
514 if (dump_file && (dump_flags & TDF_FOLDING))
516 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
517 print_generic_expr (dump_file, cond);
518 fprintf (dump_file, " ? ");
519 print_generic_expr (dump_file, arg0);
520 fprintf (dump_file, " : ");
521 print_generic_expr (dump_file, arg1);
522 fprintf (dump_file, "\n");
525 gimple_match_op op (gimple_match_cond::UNCOND,
526 COND_EXPR, type, cond, arg0, arg1);
528 if (op.resimplify (&seq1, follow_all_ssa_edges))
530 bool allowed = !early_p || phiopt_early_allow (seq1, op);
531 tree result = maybe_push_res_to_seq (&op, &seq1);
532 if (dump_file && (dump_flags & TDF_FOLDING))
534 fprintf (dump_file, "\nphiopt match-simplify back:\n");
535 if (seq1)
536 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
537 fprintf (dump_file, "result: ");
538 if (result)
539 print_generic_expr (dump_file, result);
540 else
541 fprintf (dump_file, " (none)");
542 fprintf (dump_file, "\n");
543 if (!allowed)
544 fprintf (dump_file, "rejected because early\n");
546 /* Early we want only to allow some generated tree codes. */
547 if (allowed && result)
549 if (loc != UNKNOWN_LOCATION)
550 annotate_all_with_location (seq1, loc);
551 gimple_seq_add_seq_without_update (seq, seq1);
552 return result;
555 gimple_seq_discard (seq1);
556 seq1 = NULL;
558 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
559 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
561 if (comp_code == ERROR_MARK)
562 return NULL;
564 cond = build2_loc (loc,
565 comp_code, boolean_type_node,
566 cmp0, cmp1);
568 if (dump_file && (dump_flags & TDF_FOLDING))
570 fprintf (dump_file, "\nphiopt match-simplify trying:\n\t");
571 print_generic_expr (dump_file, cond);
572 fprintf (dump_file, " ? ");
573 print_generic_expr (dump_file, arg1);
574 fprintf (dump_file, " : ");
575 print_generic_expr (dump_file, arg0);
576 fprintf (dump_file, "\n");
579 gimple_match_op op1 (gimple_match_cond::UNCOND,
580 COND_EXPR, type, cond, arg1, arg0);
582 if (op1.resimplify (&seq1, follow_all_ssa_edges))
584 bool allowed = !early_p || phiopt_early_allow (seq1, op1);
585 tree result = maybe_push_res_to_seq (&op1, &seq1);
586 if (dump_file && (dump_flags & TDF_FOLDING))
588 fprintf (dump_file, "\nphiopt match-simplify back:\n");
589 if (seq1)
590 print_gimple_seq (dump_file, seq1, 0, TDF_VOPS|TDF_MEMSYMS);
591 fprintf (dump_file, "result: ");
592 if (result)
593 print_generic_expr (dump_file, result);
594 else
595 fprintf (dump_file, " (none)");
596 fprintf (dump_file, "\n");
597 if (!allowed)
598 fprintf (dump_file, "rejected because early\n");
600 /* Early we want only to allow some generated tree codes. */
601 if (allowed && result)
603 if (loc != UNKNOWN_LOCATION)
604 annotate_all_with_location (seq1, loc);
605 gimple_seq_add_seq_without_update (seq, seq1);
606 return result;
609 gimple_seq_discard (seq1);
611 return NULL;
614 /* empty_bb_or_one_feeding_into_p returns true if bb was empty basic block
615 or it has one cheap preparation statement that feeds into the PHI
616 statement and it sets STMT to that statement. */
617 static bool
618 empty_bb_or_one_feeding_into_p (basic_block bb,
619 gimple *phi,
620 gimple *&stmt)
622 stmt = nullptr;
623 gimple *stmt_to_move = nullptr;
624 tree lhs;
626 if (empty_block_p (bb))
627 return true;
629 if (!single_pred_p (bb))
630 return false;
632 /* The middle bb cannot have phi nodes as we don't
633 move those assignments yet. */
634 if (!gimple_seq_empty_p (phi_nodes (bb)))
635 return false;
637 gimple_stmt_iterator gsi;
639 gsi = gsi_start_nondebug_after_labels_bb (bb);
640 while (!gsi_end_p (gsi))
642 gimple *s = gsi_stmt (gsi);
643 gsi_next_nondebug (&gsi);
644 /* Skip over Predict and nop statements. */
645 if (gimple_code (s) == GIMPLE_PREDICT
646 || gimple_code (s) == GIMPLE_NOP)
647 continue;
648 /* If there is more one statement return false. */
649 if (stmt_to_move)
650 return false;
651 stmt_to_move = s;
654 /* The only statement here was a Predict or a nop statement
655 so return true. */
656 if (!stmt_to_move)
657 return true;
659 if (gimple_vuse (stmt_to_move))
660 return false;
662 if (gimple_could_trap_p (stmt_to_move)
663 || gimple_has_side_effects (stmt_to_move))
664 return false;
666 ssa_op_iter it;
667 tree use;
668 FOR_EACH_SSA_TREE_OPERAND (use, stmt_to_move, it, SSA_OP_USE)
669 if (ssa_name_maybe_undef_p (use))
670 return false;
672 /* Allow assignments but allow some builtin/internal calls.
673 As const calls don't match any of the above, yet they could
674 still have some side-effects - they could contain
675 gimple_could_trap_p statements, like floating point
676 exceptions or integer division by zero. See PR70586.
677 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
678 should handle this.
679 Allow some known builtin/internal calls that are known not to
680 trap: logical functions (e.g. bswap and bit counting). */
681 if (!is_gimple_assign (stmt_to_move))
683 if (!is_gimple_call (stmt_to_move))
684 return false;
685 combined_fn cfn = gimple_call_combined_fn (stmt_to_move);
686 switch (cfn)
688 default:
689 return false;
690 case CFN_BUILT_IN_BSWAP16:
691 case CFN_BUILT_IN_BSWAP32:
692 case CFN_BUILT_IN_BSWAP64:
693 case CFN_BUILT_IN_BSWAP128:
694 CASE_CFN_FFS:
695 CASE_CFN_PARITY:
696 CASE_CFN_POPCOUNT:
697 CASE_CFN_CLZ:
698 CASE_CFN_CTZ:
699 case CFN_BUILT_IN_CLRSB:
700 case CFN_BUILT_IN_CLRSBL:
701 case CFN_BUILT_IN_CLRSBLL:
702 lhs = gimple_call_lhs (stmt_to_move);
703 break;
706 else
707 lhs = gimple_assign_lhs (stmt_to_move);
709 gimple *use_stmt;
710 use_operand_p use_p;
712 /* Allow only a statement which feeds into the other stmt. */
713 if (!lhs || TREE_CODE (lhs) != SSA_NAME
714 || !single_imm_use (lhs, &use_p, &use_stmt)
715 || use_stmt != phi)
716 return false;
718 stmt = stmt_to_move;
719 return true;
722 /* Move STMT to before GSI and insert its defining
723 name into INSERTED_EXPRS bitmap. */
724 static void
725 move_stmt (gimple *stmt, gimple_stmt_iterator *gsi, auto_bitmap &inserted_exprs)
727 if (!stmt)
728 return;
729 if (dump_file && (dump_flags & TDF_DETAILS))
731 fprintf (dump_file, "statement un-sinked:\n");
732 print_gimple_stmt (dump_file, stmt, 0,
733 TDF_VOPS|TDF_MEMSYMS);
736 tree name = gimple_get_lhs (stmt);
737 // Mark the name to be renamed if there is one.
738 bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (name));
739 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt);
740 gsi_move_before (&gsi1, gsi);
741 reset_flow_sensitive_info (name);
744 /* RAII style class to temporarily remove flow sensitive
745 from ssa names defined by a gimple statement. */
746 class auto_flow_sensitive
748 public:
749 auto_flow_sensitive (gimple *s);
750 ~auto_flow_sensitive ();
751 private:
752 auto_vec<std::pair<tree, flow_sensitive_info_storage>, 2> stack;
755 /* Constructor for auto_flow_sensitive. Saves
756 off the ssa names' flow sensitive information
757 that was defined by gimple statement S and
758 resets it to be non-flow based ones. */
760 auto_flow_sensitive::auto_flow_sensitive (gimple *s)
762 if (!s)
763 return;
764 ssa_op_iter it;
765 tree def;
766 FOR_EACH_SSA_TREE_OPERAND (def, s, it, SSA_OP_DEF)
768 flow_sensitive_info_storage storage;
769 storage.save_and_clear (def);
770 stack.safe_push (std::make_pair (def, storage));
774 /* Deconstructor, restores the flow sensitive information
775 for the SSA names that had been saved off. */
777 auto_flow_sensitive::~auto_flow_sensitive ()
779 for (auto p : stack)
780 p.second.restore (p.first);
783 /* The function match_simplify_replacement does the main work of doing the
784 replacement using match and simplify. Return true if the replacement is done.
785 Otherwise return false.
786 BB is the basic block where the replacement is going to be done on. ARG0
787 is argument 0 from PHI. Likewise for ARG1. */
789 static bool
790 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
791 basic_block middle_bb_alt,
792 edge e0, edge e1, gphi *phi,
793 tree arg0, tree arg1, bool early_p,
794 bool threeway_p)
796 gimple *stmt;
797 gimple_stmt_iterator gsi;
798 edge true_edge, false_edge;
799 gimple_seq seq = NULL;
800 tree result;
801 gimple *stmt_to_move = NULL;
802 gimple *stmt_to_move_alt = NULL;
803 tree arg_true, arg_false;
805 /* Special case A ? B : B as this will always simplify to B. */
806 if (operand_equal_for_phi_arg_p (arg0, arg1))
807 return false;
809 /* If the basic block only has a cheap preparation statement,
810 allow it and move it once the transformation is done. */
811 if (!empty_bb_or_one_feeding_into_p (middle_bb, phi, stmt_to_move))
812 return false;
814 if (threeway_p
815 && middle_bb != middle_bb_alt
816 && !empty_bb_or_one_feeding_into_p (middle_bb_alt, phi,
817 stmt_to_move_alt))
818 return false;
820 /* At this point we know we have a GIMPLE_COND with two successors.
821 One successor is BB, the other successor is an empty block which
822 falls through into BB.
824 There is a single PHI node at the join point (BB).
826 So, given the condition COND, and the two PHI arguments, match and simplify
827 can happen on (COND) ? arg0 : arg1. */
829 stmt = last_nondebug_stmt (cond_bb);
831 /* We need to know which is the true edge and which is the false
832 edge so that we know when to invert the condition below. */
833 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
835 /* Forward the edges over the middle basic block. */
836 if (true_edge->dest == middle_bb)
837 true_edge = EDGE_SUCC (true_edge->dest, 0);
838 if (false_edge->dest == middle_bb)
839 false_edge = EDGE_SUCC (false_edge->dest, 0);
841 /* When THREEWAY_P then e1 will point to the edge of the final transition
842 from middle-bb to end. */
843 if (true_edge == e0)
845 if (!threeway_p)
846 gcc_assert (false_edge == e1);
847 arg_true = arg0;
848 arg_false = arg1;
850 else
852 gcc_assert (false_edge == e0);
853 if (!threeway_p)
854 gcc_assert (true_edge == e1);
855 arg_true = arg1;
856 arg_false = arg0;
859 /* Do not make conditional undefs unconditional. */
860 if ((TREE_CODE (arg_true) == SSA_NAME
861 && ssa_name_maybe_undef_p (arg_true))
862 || (TREE_CODE (arg_false) == SSA_NAME
863 && ssa_name_maybe_undef_p (arg_false)))
864 return false;
866 tree type = TREE_TYPE (gimple_phi_result (phi));
868 auto_flow_sensitive s1(stmt_to_move);
869 auto_flow_sensitive s_alt(stmt_to_move_alt);
871 result = gimple_simplify_phiopt (early_p, type, stmt,
872 arg_true, arg_false,
873 &seq);
876 if (!result)
877 return false;
878 if (dump_file && (dump_flags & TDF_FOLDING))
879 fprintf (dump_file, "accepted the phiopt match-simplify.\n");
881 auto_bitmap exprs_maybe_dce;
883 /* Mark the cond statements' lhs/rhs as maybe dce. */
884 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
885 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_lhs (stmt)))
886 bitmap_set_bit (exprs_maybe_dce,
887 SSA_NAME_VERSION (gimple_cond_lhs (stmt)));
888 if (TREE_CODE (gimple_cond_rhs (stmt)) == SSA_NAME
889 && !SSA_NAME_IS_DEFAULT_DEF (gimple_cond_rhs (stmt)))
890 bitmap_set_bit (exprs_maybe_dce,
891 SSA_NAME_VERSION (gimple_cond_rhs (stmt)));
893 gsi = gsi_last_bb (cond_bb);
894 /* Insert the sequence generated from gimple_simplify_phiopt. */
895 if (seq)
897 // Mark the lhs of the new statements maybe for dce
898 gimple_stmt_iterator gsi1 = gsi_start (seq);
899 for (; !gsi_end_p (gsi1); gsi_next (&gsi1))
901 gimple *stmt = gsi_stmt (gsi1);
902 tree name = gimple_get_lhs (stmt);
903 if (name && TREE_CODE (name) == SSA_NAME)
904 bitmap_set_bit (exprs_maybe_dce, SSA_NAME_VERSION (name));
906 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
909 /* If there was a statement to move, move it to right before
910 the original conditional. */
911 move_stmt (stmt_to_move, &gsi, exprs_maybe_dce);
912 move_stmt (stmt_to_move_alt, &gsi, exprs_maybe_dce);
914 replace_phi_edge_with_variable (cond_bb, e1, phi, result, exprs_maybe_dce);
916 /* Add Statistic here even though replace_phi_edge_with_variable already
917 does it as we want to be able to count when match-simplify happens vs
918 the others. */
919 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
921 /* Note that we optimized this PHI. */
922 return true;
925 /* Update *ARG which is defined in STMT so that it contains the
926 computed value if that seems profitable. Return true if the
927 statement is made dead by that rewriting. */
929 static bool
930 jump_function_from_stmt (tree *arg, gimple *stmt)
932 enum tree_code code = gimple_assign_rhs_code (stmt);
933 if (code == ADDR_EXPR)
935 /* For arg = &p->i transform it to p, if possible. */
936 tree rhs1 = gimple_assign_rhs1 (stmt);
937 poly_int64 offset;
938 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
939 &offset);
940 if (tem
941 && TREE_CODE (tem) == MEM_REF
942 && known_eq (mem_ref_offset (tem) + offset, 0))
944 *arg = TREE_OPERAND (tem, 0);
945 return true;
948 /* TODO: Much like IPA-CP jump-functions we want to handle constant
949 additions symbolically here, and we'd need to update the comparison
950 code that compares the arg + cst tuples in our caller. For now the
951 code above exactly handles the VEC_BASE pattern from vec.h. */
952 return false;
955 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
956 of the form SSA_NAME NE 0.
958 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
959 the two input values of the EQ_EXPR match arg0 and arg1.
961 If so update *code and return TRUE. Otherwise return FALSE. */
963 static bool
964 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
965 enum tree_code *code, const_tree rhs)
967 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
968 statement. */
969 if (TREE_CODE (rhs) == SSA_NAME)
971 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
973 /* Verify the defining statement has an EQ_EXPR on the RHS. */
974 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
976 /* Finally verify the source operands of the EQ_EXPR are equal
977 to arg0 and arg1. */
978 tree op0 = gimple_assign_rhs1 (def1);
979 tree op1 = gimple_assign_rhs2 (def1);
980 if ((operand_equal_for_phi_arg_p (arg0, op0)
981 && operand_equal_for_phi_arg_p (arg1, op1))
982 || (operand_equal_for_phi_arg_p (arg0, op1)
983 && operand_equal_for_phi_arg_p (arg1, op0)))
985 /* We will perform the optimization. */
986 *code = gimple_assign_rhs_code (def1);
987 return true;
991 return false;
994 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
996 Also return TRUE if arg0/arg1 are equal to the source arguments of a
997 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
999 Return FALSE otherwise. */
1001 static bool
1002 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1003 enum tree_code *code, gimple *cond)
1005 gimple *def;
1006 tree lhs = gimple_cond_lhs (cond);
1007 tree rhs = gimple_cond_rhs (cond);
1009 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1010 && operand_equal_for_phi_arg_p (arg1, rhs))
1011 || (operand_equal_for_phi_arg_p (arg1, lhs)
1012 && operand_equal_for_phi_arg_p (arg0, rhs)))
1013 return true;
1015 /* Now handle more complex case where we have an EQ comparison
1016 which feeds a BIT_AND_EXPR which feeds COND.
1018 First verify that COND is of the form SSA_NAME NE 0. */
1019 if (*code != NE_EXPR || !integer_zerop (rhs)
1020 || TREE_CODE (lhs) != SSA_NAME)
1021 return false;
1023 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1024 def = SSA_NAME_DEF_STMT (lhs);
1025 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1026 return false;
1028 /* Now verify arg0/arg1 correspond to the source arguments of an
1029 EQ comparison feeding the BIT_AND_EXPR. */
1031 tree tmp = gimple_assign_rhs1 (def);
1032 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1033 return true;
1035 tmp = gimple_assign_rhs2 (def);
1036 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1037 return true;
1039 return false;
1042 /* Returns true if ARG is a neutral element for operation CODE
1043 on the RIGHT side. */
1045 static bool
1046 neutral_element_p (tree_code code, tree arg, bool right)
1048 switch (code)
1050 case PLUS_EXPR:
1051 case BIT_IOR_EXPR:
1052 case BIT_XOR_EXPR:
1053 return integer_zerop (arg);
1055 case LROTATE_EXPR:
1056 case RROTATE_EXPR:
1057 case LSHIFT_EXPR:
1058 case RSHIFT_EXPR:
1059 case MINUS_EXPR:
1060 case POINTER_PLUS_EXPR:
1061 return right && integer_zerop (arg);
1063 case MULT_EXPR:
1064 return integer_onep (arg);
1066 case TRUNC_DIV_EXPR:
1067 case CEIL_DIV_EXPR:
1068 case FLOOR_DIV_EXPR:
1069 case ROUND_DIV_EXPR:
1070 case EXACT_DIV_EXPR:
1071 return right && integer_onep (arg);
1073 case BIT_AND_EXPR:
1074 return integer_all_onesp (arg);
1076 default:
1077 return false;
1081 /* Returns true if ARG is an absorbing element for operation CODE. */
1083 static bool
1084 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1086 switch (code)
1088 case BIT_IOR_EXPR:
1089 return integer_all_onesp (arg);
1091 case MULT_EXPR:
1092 case BIT_AND_EXPR:
1093 return integer_zerop (arg);
1095 case LSHIFT_EXPR:
1096 case RSHIFT_EXPR:
1097 case LROTATE_EXPR:
1098 case RROTATE_EXPR:
1099 return !right && integer_zerop (arg);
1101 case TRUNC_DIV_EXPR:
1102 case CEIL_DIV_EXPR:
1103 case FLOOR_DIV_EXPR:
1104 case ROUND_DIV_EXPR:
1105 case EXACT_DIV_EXPR:
1106 case TRUNC_MOD_EXPR:
1107 case CEIL_MOD_EXPR:
1108 case FLOOR_MOD_EXPR:
1109 case ROUND_MOD_EXPR:
1110 return (!right
1111 && integer_zerop (arg)
1112 && tree_single_nonzero_warnv_p (rval, NULL));
1114 default:
1115 return false;
1119 /* The function value_replacement does the main work of doing the value
1120 replacement. Return non-zero if the replacement is done. Otherwise return
1121 0. If we remove the middle basic block, return 2.
1122 BB is the basic block where the replacement is going to be done on. ARG0
1123 is argument 0 from the PHI. Likewise for ARG1. */
1125 static int
1126 value_replacement (basic_block cond_bb, basic_block middle_bb,
1127 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1129 gimple_stmt_iterator gsi;
1130 edge true_edge, false_edge;
1131 enum tree_code code;
1132 bool empty_or_with_defined_p = true;
1134 /* Virtual operands don't need to be handled. */
1135 if (virtual_operand_p (arg1))
1136 return 0;
1138 /* Special case A ? B : B as this will always simplify to B. */
1139 if (operand_equal_for_phi_arg_p (arg0, arg1))
1140 return 0;
1142 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1143 code = gimple_cond_code (cond);
1145 /* This transformation is only valid for equality comparisons. */
1146 if (code != NE_EXPR && code != EQ_EXPR)
1147 return 0;
1149 /* Do not make conditional undefs unconditional. */
1150 if ((TREE_CODE (arg0) == SSA_NAME
1151 && ssa_name_maybe_undef_p (arg0))
1152 || (TREE_CODE (arg1) == SSA_NAME
1153 && ssa_name_maybe_undef_p (arg1)))
1154 return false;
1156 /* If the type says honor signed zeros we cannot do this
1157 optimization. */
1158 if (HONOR_SIGNED_ZEROS (arg1))
1159 return 0;
1161 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1162 arguments, then adjust arg0 or arg1. */
1163 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1164 while (!gsi_end_p (gsi))
1166 gimple *stmt = gsi_stmt (gsi);
1167 tree lhs;
1168 gsi_next_nondebug (&gsi);
1169 if (!is_gimple_assign (stmt))
1171 if (gimple_code (stmt) != GIMPLE_PREDICT
1172 && gimple_code (stmt) != GIMPLE_NOP)
1173 empty_or_with_defined_p = false;
1174 continue;
1176 /* Now try to adjust arg0 or arg1 according to the computation
1177 in the statement. */
1178 lhs = gimple_assign_lhs (stmt);
1179 if (!(lhs == arg0
1180 && jump_function_from_stmt (&arg0, stmt))
1181 || (lhs == arg1
1182 && jump_function_from_stmt (&arg1, stmt)))
1183 empty_or_with_defined_p = false;
1186 /* We need to know which is the true edge and which is the false
1187 edge so that we know if have abs or negative abs. */
1188 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1190 /* At this point we know we have a COND_EXPR with two successors.
1191 One successor is BB, the other successor is an empty block which
1192 falls through into BB.
1194 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1196 There is a single PHI node at the join point (BB) with two arguments.
1198 We now need to verify that the two arguments in the PHI node match
1199 the two arguments to the equality comparison. */
1201 bool equal_p = operand_equal_for_value_replacement (arg0, arg1, &code, cond);
1202 bool maybe_equal_p = false;
1203 if (!equal_p
1204 && empty_or_with_defined_p
1205 && TREE_CODE (gimple_cond_rhs (cond)) == INTEGER_CST
1206 && (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg0)
1207 ? TREE_CODE (arg1) == INTEGER_CST
1208 : (operand_equal_for_phi_arg_p (gimple_cond_lhs (cond), arg1)
1209 && TREE_CODE (arg0) == INTEGER_CST)))
1210 maybe_equal_p = true;
1211 if (equal_p || maybe_equal_p)
1213 edge e;
1214 tree arg;
1216 /* For NE_EXPR, we want to build an assignment result = arg where
1217 arg is the PHI argument associated with the true edge. For
1218 EQ_EXPR we want the PHI argument associated with the false edge. */
1219 e = (code == NE_EXPR ? true_edge : false_edge);
1221 /* Unfortunately, E may not reach BB (it may instead have gone to
1222 OTHER_BLOCK). If that is the case, then we want the single outgoing
1223 edge from OTHER_BLOCK which reaches BB and represents the desired
1224 path from COND_BLOCK. */
1225 if (e->dest == middle_bb)
1226 e = single_succ_edge (e->dest);
1228 /* Now we know the incoming edge to BB that has the argument for the
1229 RHS of our new assignment statement. */
1230 if (e0 == e)
1231 arg = arg0;
1232 else
1233 arg = arg1;
1235 /* If the middle basic block was empty or is defining the
1236 PHI arguments and this is a single phi where the args are different
1237 for the edges e0 and e1 then we can remove the middle basic block. */
1238 if (empty_or_with_defined_p
1239 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1240 e0, e1) == phi)
1242 use_operand_p use_p;
1243 gimple *use_stmt;
1245 /* Even if arg0/arg1 isn't equal to second operand of cond, we
1246 can optimize away the bb if we can prove it doesn't care whether
1247 phi result is arg0/arg1 or second operand of cond. Consider:
1248 <bb 2> [local count: 118111600]:
1249 if (i_2(D) == 4)
1250 goto <bb 4>; [97.00%]
1251 else
1252 goto <bb 3>; [3.00%]
1254 <bb 3> [local count: 3540129]:
1256 <bb 4> [local count: 118111600]:
1257 # i_6 = PHI <i_2(D)(3), 6(2)>
1258 _3 = i_6 != 0;
1259 Here, carg is 4, oarg is 6, crhs is 0, and because
1260 (4 != 0) == (6 != 0), we don't care if i_6 is 4 or 6, both
1261 have the same outcome. So, we can optimize this to:
1262 _3 = i_2(D) != 0;
1263 If the single imm use of phi result >, >=, < or <=, similarly
1264 we can check if both carg and oarg compare the same against
1265 crhs using ccode. */
1266 if (maybe_equal_p
1267 && TREE_CODE (arg) != INTEGER_CST
1268 && single_imm_use (gimple_phi_result (phi), &use_p, &use_stmt))
1270 enum tree_code ccode = ERROR_MARK;
1271 tree clhs = NULL_TREE, crhs = NULL_TREE;
1272 tree carg = gimple_cond_rhs (cond);
1273 tree oarg = e0 == e ? arg1 : arg0;
1274 if (is_gimple_assign (use_stmt)
1275 && (TREE_CODE_CLASS (gimple_assign_rhs_code (use_stmt))
1276 == tcc_comparison))
1278 ccode = gimple_assign_rhs_code (use_stmt);
1279 clhs = gimple_assign_rhs1 (use_stmt);
1280 crhs = gimple_assign_rhs2 (use_stmt);
1282 else if (gimple_code (use_stmt) == GIMPLE_COND)
1284 ccode = gimple_cond_code (use_stmt);
1285 clhs = gimple_cond_lhs (use_stmt);
1286 crhs = gimple_cond_rhs (use_stmt);
1288 if (ccode != ERROR_MARK
1289 && clhs == gimple_phi_result (phi)
1290 && TREE_CODE (crhs) == INTEGER_CST)
1291 switch (ccode)
1293 case EQ_EXPR:
1294 case NE_EXPR:
1295 if (!tree_int_cst_equal (crhs, carg)
1296 && !tree_int_cst_equal (crhs, oarg))
1297 equal_p = true;
1298 break;
1299 case GT_EXPR:
1300 if (tree_int_cst_lt (crhs, carg)
1301 == tree_int_cst_lt (crhs, oarg))
1302 equal_p = true;
1303 break;
1304 case GE_EXPR:
1305 if (tree_int_cst_le (crhs, carg)
1306 == tree_int_cst_le (crhs, oarg))
1307 equal_p = true;
1308 break;
1309 case LT_EXPR:
1310 if (tree_int_cst_lt (carg, crhs)
1311 == tree_int_cst_lt (oarg, crhs))
1312 equal_p = true;
1313 break;
1314 case LE_EXPR:
1315 if (tree_int_cst_le (carg, crhs)
1316 == tree_int_cst_le (oarg, crhs))
1317 equal_p = true;
1318 break;
1319 default:
1320 break;
1322 if (equal_p)
1324 tree phires = gimple_phi_result (phi);
1325 if (SSA_NAME_RANGE_INFO (phires))
1327 /* After the optimization PHI result can have value
1328 which it couldn't have previously. */
1329 Value_Range r (TREE_TYPE (phires));
1330 if (get_global_range_query ()->range_of_expr (r, phires,
1331 phi))
1333 Value_Range tmp (carg, carg);
1334 r.union_ (tmp);
1335 reset_flow_sensitive_info (phires);
1336 set_range_info (phires, r);
1338 else
1339 reset_flow_sensitive_info (phires);
1342 if (equal_p && MAY_HAVE_DEBUG_BIND_STMTS)
1344 imm_use_iterator imm_iter;
1345 tree phires = gimple_phi_result (phi);
1346 tree temp = NULL_TREE;
1347 bool reset_p = false;
1349 /* Add # DEBUG D#1 => arg != carg ? arg : oarg. */
1350 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, phires)
1352 if (!is_gimple_debug (use_stmt))
1353 continue;
1354 if (temp == NULL_TREE)
1356 if (!single_pred_p (middle_bb)
1357 || EDGE_COUNT (gimple_bb (phi)->preds) != 2)
1359 /* But only if middle_bb has a single
1360 predecessor and phi bb has two, otherwise
1361 we could use a SSA_NAME not usable in that
1362 place or wrong-debug. */
1363 reset_p = true;
1364 break;
1366 gimple_stmt_iterator gsi
1367 = gsi_after_labels (gimple_bb (phi));
1368 tree type = TREE_TYPE (phires);
1369 temp = build_debug_expr_decl (type);
1370 tree t = build2 (NE_EXPR, boolean_type_node,
1371 arg, carg);
1372 t = build3 (COND_EXPR, type, t, arg, oarg);
1373 gimple *g = gimple_build_debug_bind (temp, t, phi);
1374 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
1376 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1377 replace_exp (use_p, temp);
1378 update_stmt (use_stmt);
1380 if (reset_p)
1381 reset_debug_uses (phi);
1384 if (equal_p)
1386 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1387 /* Note that we optimized this PHI. */
1388 return 2;
1391 else if (equal_p)
1393 if (!single_pred_p (middle_bb))
1394 return 0;
1395 statistics_counter_event (cfun, "Replace PHI with "
1396 "variable/value_replacement", 1);
1398 /* Replace the PHI arguments with arg. */
1399 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1400 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1401 if (dump_file && (dump_flags & TDF_DETAILS))
1403 fprintf (dump_file, "PHI ");
1404 print_generic_expr (dump_file, gimple_phi_result (phi));
1405 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1406 cond_bb->index);
1407 print_generic_expr (dump_file, arg);
1408 fprintf (dump_file, ".\n");
1410 return 1;
1414 if (!single_pred_p (middle_bb))
1415 return 0;
1417 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1418 gsi = gsi_last_nondebug_bb (middle_bb);
1419 if (gsi_end_p (gsi))
1420 return 0;
1422 gimple *assign = gsi_stmt (gsi);
1423 if (!is_gimple_assign (assign)
1424 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1425 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1426 return 0;
1428 if (gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS)
1430 /* If last stmt of the middle_bb is a conversion, handle it like
1431 a preparation statement through constant evaluation with
1432 checking for UB. */
1433 enum tree_code sc = gimple_assign_rhs_code (assign);
1434 if (CONVERT_EXPR_CODE_P (sc))
1435 assign = NULL;
1436 else
1437 return 0;
1440 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1441 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1442 return 0;
1444 /* Allow up to 2 cheap preparation statements that prepare argument
1445 for assign, e.g.:
1446 if (y_4 != 0)
1447 goto <bb 3>;
1448 else
1449 goto <bb 4>;
1450 <bb 3>:
1451 _1 = (int) y_4;
1452 iftmp.0_6 = x_5(D) r<< _1;
1453 <bb 4>:
1454 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1456 if (y_3(D) == 0)
1457 goto <bb 4>;
1458 else
1459 goto <bb 3>;
1460 <bb 3>:
1461 y_4 = y_3(D) & 31;
1462 _1 = (int) y_4;
1463 _6 = x_5(D) r<< _1;
1464 <bb 4>:
1465 # _2 = PHI <x_5(D)(2), _6(3)> */
1466 gimple *prep_stmt[2] = { NULL, NULL };
1467 int prep_cnt;
1468 for (prep_cnt = 0; ; prep_cnt++)
1470 if (prep_cnt || assign)
1471 gsi_prev_nondebug (&gsi);
1472 if (gsi_end_p (gsi))
1473 break;
1475 gimple *g = gsi_stmt (gsi);
1476 if (gimple_code (g) == GIMPLE_LABEL)
1477 break;
1479 if (prep_cnt == 2 || !is_gimple_assign (g))
1480 return 0;
1482 tree lhs = gimple_assign_lhs (g);
1483 tree rhs1 = gimple_assign_rhs1 (g);
1484 use_operand_p use_p;
1485 gimple *use_stmt;
1486 if (TREE_CODE (lhs) != SSA_NAME
1487 || TREE_CODE (rhs1) != SSA_NAME
1488 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1489 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1490 || !single_imm_use (lhs, &use_p, &use_stmt)
1491 || ((prep_cnt || assign)
1492 && use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign)))
1493 return 0;
1494 switch (gimple_assign_rhs_code (g))
1496 CASE_CONVERT:
1497 break;
1498 case PLUS_EXPR:
1499 case BIT_AND_EXPR:
1500 case BIT_IOR_EXPR:
1501 case BIT_XOR_EXPR:
1502 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1503 return 0;
1504 break;
1505 default:
1506 return 0;
1508 prep_stmt[prep_cnt] = g;
1511 /* Only transform if it removes the condition. */
1512 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1513 return 0;
1515 /* Size-wise, this is always profitable. */
1516 if (optimize_bb_for_speed_p (cond_bb)
1517 /* The special case is useless if it has a low probability. */
1518 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1519 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1520 /* If assign is cheap, there is no point avoiding it. */
1521 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1522 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1523 return 0;
1525 tree cond_lhs = gimple_cond_lhs (cond);
1526 tree cond_rhs = gimple_cond_rhs (cond);
1528 /* Propagate the cond_rhs constant through preparation stmts,
1529 make sure UB isn't invoked while doing that. */
1530 for (int i = prep_cnt - 1; i >= 0; --i)
1532 gimple *g = prep_stmt[i];
1533 tree grhs1 = gimple_assign_rhs1 (g);
1534 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1535 return 0;
1536 cond_lhs = gimple_assign_lhs (g);
1537 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1538 if (TREE_CODE (cond_rhs) != INTEGER_CST
1539 || TREE_OVERFLOW (cond_rhs))
1540 return 0;
1541 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1543 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1544 gimple_assign_rhs2 (g));
1545 if (TREE_OVERFLOW (cond_rhs))
1546 return 0;
1548 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1549 if (TREE_CODE (cond_rhs) != INTEGER_CST
1550 || TREE_OVERFLOW (cond_rhs))
1551 return 0;
1554 tree lhs, rhs1, rhs2;
1555 enum tree_code code_def;
1556 if (assign)
1558 lhs = gimple_assign_lhs (assign);
1559 rhs1 = gimple_assign_rhs1 (assign);
1560 rhs2 = gimple_assign_rhs2 (assign);
1561 code_def = gimple_assign_rhs_code (assign);
1563 else
1565 gcc_assert (prep_cnt > 0);
1566 lhs = cond_lhs;
1567 rhs1 = NULL_TREE;
1568 rhs2 = NULL_TREE;
1569 code_def = ERROR_MARK;
1572 if (((code == NE_EXPR && e1 == false_edge)
1573 || (code == EQ_EXPR && e1 == true_edge))
1574 && arg0 == lhs
1575 && ((assign == NULL
1576 && operand_equal_for_phi_arg_p (arg1, cond_rhs))
1577 || (assign
1578 && arg1 == rhs1
1579 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1580 && neutral_element_p (code_def, cond_rhs, true))
1581 || (assign
1582 && arg1 == rhs2
1583 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1584 && neutral_element_p (code_def, cond_rhs, false))
1585 || (assign
1586 && operand_equal_for_phi_arg_p (arg1, cond_rhs)
1587 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1588 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1589 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1590 && absorbing_element_p (code_def,
1591 cond_rhs, false, rhs2))))))
1593 gsi = gsi_for_stmt (cond);
1594 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1595 def-stmt in:
1596 if (n_5 != 0)
1597 goto <bb 3>;
1598 else
1599 goto <bb 4>;
1601 <bb 3>:
1602 # RANGE [0, 4294967294]
1603 u_6 = n_5 + 4294967295;
1605 <bb 4>:
1606 # u_3 = PHI <u_6(3), 4294967295(2)> */
1607 reset_flow_sensitive_info (lhs);
1608 gimple_stmt_iterator gsi_from;
1609 for (int i = prep_cnt - 1; i >= 0; --i)
1611 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1612 reset_flow_sensitive_info (plhs);
1613 gsi_from = gsi_for_stmt (prep_stmt[i]);
1614 gsi_move_before (&gsi_from, &gsi);
1616 if (assign)
1618 gsi_from = gsi_for_stmt (assign);
1619 gsi_move_before (&gsi_from, &gsi);
1621 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1622 return 2;
1625 return 0;
1628 /* If VAR is an SSA_NAME that points to a BIT_NOT_EXPR then return the TREE for
1629 the value being inverted. */
1631 static tree
1632 strip_bit_not (tree var)
1634 if (TREE_CODE (var) != SSA_NAME)
1635 return NULL_TREE;
1637 gimple *assign = SSA_NAME_DEF_STMT (var);
1638 if (gimple_code (assign) != GIMPLE_ASSIGN)
1639 return NULL_TREE;
1641 if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
1642 return NULL_TREE;
1644 return gimple_assign_rhs1 (assign);
1647 /* Invert a MIN to a MAX or a MAX to a MIN expression CODE. */
1649 enum tree_code
1650 invert_minmax_code (enum tree_code code)
1652 switch (code) {
1653 case MIN_EXPR:
1654 return MAX_EXPR;
1655 case MAX_EXPR:
1656 return MIN_EXPR;
1657 default:
1658 gcc_unreachable ();
1662 /* The function minmax_replacement does the main work of doing the minmax
1663 replacement. Return true if the replacement is done. Otherwise return
1664 false.
1665 BB is the basic block where the replacement is going to be done on. ARG0
1666 is argument 0 from the PHI. Likewise for ARG1.
1668 If THREEWAY_P then expect the BB to be laid out in diamond shape with each
1669 BB containing only a MIN or MAX expression. */
1671 static bool
1672 minmax_replacement (basic_block cond_bb, basic_block middle_bb, basic_block alt_middle_bb,
1673 edge e0, edge e1, gphi *phi, tree arg0, tree arg1, bool threeway_p)
1675 tree result;
1676 edge true_edge, false_edge;
1677 enum tree_code minmax, ass_code;
1678 tree smaller, larger, arg_true, arg_false;
1679 gimple_stmt_iterator gsi, gsi_from;
1681 tree type = TREE_TYPE (PHI_RESULT (phi));
1683 gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
1684 enum tree_code cmp = gimple_cond_code (cond);
1685 tree rhs = gimple_cond_rhs (cond);
1687 /* Turn EQ/NE of extreme values to order comparisons. */
1688 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1689 && TREE_CODE (rhs) == INTEGER_CST
1690 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1692 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1694 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1695 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1696 wi::min_value (TREE_TYPE (rhs)) + 1);
1698 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1700 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1701 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1702 wi::max_value (TREE_TYPE (rhs)) - 1);
1706 /* This transformation is only valid for order comparisons. Record which
1707 operand is smaller/larger if the result of the comparison is true. */
1708 tree alt_smaller = NULL_TREE;
1709 tree alt_larger = NULL_TREE;
1710 if (cmp == LT_EXPR || cmp == LE_EXPR)
1712 smaller = gimple_cond_lhs (cond);
1713 larger = rhs;
1714 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1715 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1716 if (TREE_CODE (larger) == INTEGER_CST
1717 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1719 if (cmp == LT_EXPR)
1721 wi::overflow_type overflow;
1722 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1723 TYPE_SIGN (TREE_TYPE (larger)),
1724 &overflow);
1725 if (! overflow)
1726 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1728 else
1730 wi::overflow_type overflow;
1731 wide_int alt = wi::add (wi::to_wide (larger), 1,
1732 TYPE_SIGN (TREE_TYPE (larger)),
1733 &overflow);
1734 if (! overflow)
1735 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1739 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1741 smaller = rhs;
1742 larger = gimple_cond_lhs (cond);
1743 /* If we have larger > CST it is equivalent to larger >= CST+1.
1744 Likewise larger >= CST is equivalent to larger > CST-1. */
1745 if (TREE_CODE (smaller) == INTEGER_CST
1746 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1748 wi::overflow_type overflow;
1749 if (cmp == GT_EXPR)
1751 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1752 TYPE_SIGN (TREE_TYPE (smaller)),
1753 &overflow);
1754 if (! overflow)
1755 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1757 else
1759 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1760 TYPE_SIGN (TREE_TYPE (smaller)),
1761 &overflow);
1762 if (! overflow)
1763 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1767 else
1768 return false;
1770 /* Handle the special case of (signed_type)x < 0 being equivalent
1771 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1772 to x <= MAX_VAL(signed_type). */
1773 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1774 && INTEGRAL_TYPE_P (type)
1775 && TYPE_UNSIGNED (type)
1776 && integer_zerop (rhs))
1778 tree op = gimple_cond_lhs (cond);
1779 if (TREE_CODE (op) == SSA_NAME
1780 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1781 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1783 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1784 if (gimple_assign_cast_p (def_stmt))
1786 tree op1 = gimple_assign_rhs1 (def_stmt);
1787 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1788 && TYPE_UNSIGNED (TREE_TYPE (op1))
1789 && (TYPE_PRECISION (TREE_TYPE (op))
1790 == TYPE_PRECISION (TREE_TYPE (op1)))
1791 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1793 wide_int w1 = wi::max_value (TREE_TYPE (op));
1794 wide_int w2 = wi::add (w1, 1);
1795 if (cmp == LT_EXPR)
1797 larger = op1;
1798 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1799 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1800 alt_larger = NULL_TREE;
1802 else
1804 smaller = op1;
1805 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1806 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1807 alt_smaller = NULL_TREE;
1814 /* We need to know which is the true edge and which is the false
1815 edge so that we know if have abs or negative abs. */
1816 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1818 /* Forward the edges over the middle basic block. */
1819 if (true_edge->dest == middle_bb)
1820 true_edge = EDGE_SUCC (true_edge->dest, 0);
1821 if (false_edge->dest == middle_bb)
1822 false_edge = EDGE_SUCC (false_edge->dest, 0);
1824 /* When THREEWAY_P then e1 will point to the edge of the final transition
1825 from middle-bb to end. */
1826 if (true_edge == e0)
1828 if (!threeway_p)
1829 gcc_assert (false_edge == e1);
1830 arg_true = arg0;
1831 arg_false = arg1;
1833 else
1835 gcc_assert (false_edge == e0);
1836 if (!threeway_p)
1837 gcc_assert (true_edge == e1);
1838 arg_true = arg1;
1839 arg_false = arg0;
1842 if (empty_block_p (middle_bb)
1843 && (!threeway_p
1844 || empty_block_p (alt_middle_bb)))
1846 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1847 || (alt_smaller
1848 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1849 && (operand_equal_for_phi_arg_p (arg_false, larger)
1850 || (alt_larger
1851 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1853 /* Case
1855 if (smaller < larger)
1856 rslt = smaller;
1857 else
1858 rslt = larger; */
1859 minmax = MIN_EXPR;
1861 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1862 || (alt_smaller
1863 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1864 && (operand_equal_for_phi_arg_p (arg_true, larger)
1865 || (alt_larger
1866 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1867 minmax = MAX_EXPR;
1868 else
1869 return false;
1871 else if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1872 /* The optimization may be unsafe due to NaNs. */
1873 return false;
1874 else if (middle_bb != alt_middle_bb && threeway_p)
1876 /* Recognize the following case:
1878 if (smaller < larger)
1879 a = MIN (smaller, c);
1880 else
1881 b = MIN (larger, c);
1882 x = PHI <a, b>
1884 This is equivalent to
1886 a = MIN (smaller, c);
1887 x = MIN (larger, a); */
1889 gimple *assign = last_and_only_stmt (middle_bb);
1890 tree lhs, op0, op1, bound;
1891 tree alt_lhs, alt_op0, alt_op1;
1892 bool invert = false;
1894 /* When THREEWAY_P then e1 will point to the edge of the final transition
1895 from middle-bb to end. */
1896 if (true_edge == e0)
1897 gcc_assert (false_edge == EDGE_PRED (e1->src, 0));
1898 else
1899 gcc_assert (true_edge == EDGE_PRED (e1->src, 0));
1901 bool valid_minmax_p = false;
1902 gimple_stmt_iterator it1
1903 = gsi_start_nondebug_after_labels_bb (middle_bb);
1904 gimple_stmt_iterator it2
1905 = gsi_start_nondebug_after_labels_bb (alt_middle_bb);
1906 if (gsi_one_nondebug_before_end_p (it1)
1907 && gsi_one_nondebug_before_end_p (it2))
1909 gimple *stmt1 = gsi_stmt (it1);
1910 gimple *stmt2 = gsi_stmt (it2);
1911 if (is_gimple_assign (stmt1) && is_gimple_assign (stmt2))
1913 enum tree_code code1 = gimple_assign_rhs_code (stmt1);
1914 enum tree_code code2 = gimple_assign_rhs_code (stmt2);
1915 valid_minmax_p = (code1 == MIN_EXPR || code1 == MAX_EXPR)
1916 && (code2 == MIN_EXPR || code2 == MAX_EXPR);
1920 if (!valid_minmax_p)
1921 return false;
1923 if (!assign
1924 || gimple_code (assign) != GIMPLE_ASSIGN)
1925 return false;
1927 /* There cannot be any phi nodes in the middle bb. */
1928 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1929 return false;
1931 lhs = gimple_assign_lhs (assign);
1932 ass_code = gimple_assign_rhs_code (assign);
1933 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1934 return false;
1936 op0 = gimple_assign_rhs1 (assign);
1937 op1 = gimple_assign_rhs2 (assign);
1939 assign = last_and_only_stmt (alt_middle_bb);
1940 if (!assign
1941 || gimple_code (assign) != GIMPLE_ASSIGN)
1942 return false;
1944 /* There cannot be any phi nodes in the alt middle bb. */
1945 if (!gimple_seq_empty_p (phi_nodes (alt_middle_bb)))
1946 return false;
1948 alt_lhs = gimple_assign_lhs (assign);
1949 if (ass_code != gimple_assign_rhs_code (assign))
1950 return false;
1952 if (!operand_equal_for_phi_arg_p (lhs, arg_true)
1953 || !operand_equal_for_phi_arg_p (alt_lhs, arg_false))
1954 return false;
1956 alt_op0 = gimple_assign_rhs1 (assign);
1957 alt_op1 = gimple_assign_rhs2 (assign);
1959 if ((operand_equal_for_phi_arg_p (op0, smaller)
1960 || (alt_smaller
1961 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1962 && (operand_equal_for_phi_arg_p (alt_op0, larger)
1963 || (alt_larger
1964 && operand_equal_for_phi_arg_p (alt_op0, alt_larger))))
1966 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1967 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1968 return false;
1970 if ((arg0 = strip_bit_not (op0)) != NULL
1971 && (arg1 = strip_bit_not (alt_op0)) != NULL
1972 && (bound = strip_bit_not (op1)) != NULL)
1974 minmax = MAX_EXPR;
1975 ass_code = invert_minmax_code (ass_code);
1976 invert = true;
1978 else
1980 bound = op1;
1981 minmax = MIN_EXPR;
1982 arg0 = op0;
1983 arg1 = alt_op0;
1986 else if ((operand_equal_for_phi_arg_p (op0, larger)
1987 || (alt_larger
1988 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1989 && (operand_equal_for_phi_arg_p (alt_op0, smaller)
1990 || (alt_smaller
1991 && operand_equal_for_phi_arg_p (alt_op0, alt_smaller))))
1993 /* We got here if the condition is true, i.e., SMALLER > LARGER. */
1994 if (!operand_equal_for_phi_arg_p (op1, alt_op1))
1995 return false;
1997 if ((arg0 = strip_bit_not (op0)) != NULL
1998 && (arg1 = strip_bit_not (alt_op0)) != NULL
1999 && (bound = strip_bit_not (op1)) != NULL)
2001 minmax = MIN_EXPR;
2002 ass_code = invert_minmax_code (ass_code);
2003 invert = true;
2005 else
2007 bound = op1;
2008 minmax = MAX_EXPR;
2009 arg0 = op0;
2010 arg1 = alt_op0;
2013 else
2014 return false;
2016 /* Emit the statement to compute min/max. */
2017 location_t locus = gimple_location (last_nondebug_stmt (cond_bb));
2018 gimple_seq stmts = NULL;
2019 tree phi_result = PHI_RESULT (phi);
2020 result = gimple_build (&stmts, locus, minmax, TREE_TYPE (phi_result),
2021 arg0, arg1);
2022 result = gimple_build (&stmts, locus, ass_code, TREE_TYPE (phi_result),
2023 result, bound);
2024 if (invert)
2025 result = gimple_build (&stmts, locus, BIT_NOT_EXPR, TREE_TYPE (phi_result),
2026 result);
2028 gsi = gsi_last_bb (cond_bb);
2029 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2031 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2033 return true;
2035 else if (!threeway_p
2036 || empty_block_p (alt_middle_bb))
2038 /* Recognize the following case, assuming d <= u:
2040 if (a <= u)
2041 b = MAX (a, d);
2042 x = PHI <b, u>
2044 This is equivalent to
2046 b = MAX (a, d);
2047 x = MIN (b, u); */
2049 gimple *assign = last_and_only_stmt (middle_bb);
2050 tree lhs, op0, op1, bound;
2052 if (!single_pred_p (middle_bb))
2053 return false;
2055 if (!assign
2056 || gimple_code (assign) != GIMPLE_ASSIGN)
2057 return false;
2059 /* There cannot be any phi nodes in the middle bb. */
2060 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2061 return false;
2063 lhs = gimple_assign_lhs (assign);
2064 ass_code = gimple_assign_rhs_code (assign);
2065 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
2066 return false;
2067 op0 = gimple_assign_rhs1 (assign);
2068 op1 = gimple_assign_rhs2 (assign);
2070 if (true_edge->src == middle_bb)
2072 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
2073 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
2074 return false;
2076 if (operand_equal_for_phi_arg_p (arg_false, larger)
2077 || (alt_larger
2078 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
2080 /* Case
2082 if (smaller < larger)
2084 r' = MAX_EXPR (smaller, bound)
2086 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
2087 if (ass_code != MAX_EXPR)
2088 return false;
2090 minmax = MIN_EXPR;
2091 if (operand_equal_for_phi_arg_p (op0, smaller)
2092 || (alt_smaller
2093 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2094 bound = op1;
2095 else if (operand_equal_for_phi_arg_p (op1, smaller)
2096 || (alt_smaller
2097 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2098 bound = op0;
2099 else
2100 return false;
2102 /* We need BOUND <= LARGER. */
2103 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2104 bound, arg_false)))
2105 return false;
2107 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
2108 || (alt_smaller
2109 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
2111 /* Case
2113 if (smaller < larger)
2115 r' = MIN_EXPR (larger, bound)
2117 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
2118 if (ass_code != MIN_EXPR)
2119 return false;
2121 minmax = MAX_EXPR;
2122 if (operand_equal_for_phi_arg_p (op0, larger)
2123 || (alt_larger
2124 && operand_equal_for_phi_arg_p (op0, alt_larger)))
2125 bound = op1;
2126 else if (operand_equal_for_phi_arg_p (op1, larger)
2127 || (alt_larger
2128 && operand_equal_for_phi_arg_p (op1, alt_larger)))
2129 bound = op0;
2130 else
2131 return false;
2133 /* We need BOUND >= SMALLER. */
2134 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2135 bound, arg_false)))
2136 return false;
2138 else
2139 return false;
2141 else
2143 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
2144 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
2145 return false;
2147 if (operand_equal_for_phi_arg_p (arg_true, larger)
2148 || (alt_larger
2149 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
2151 /* Case
2153 if (smaller > larger)
2155 r' = MIN_EXPR (smaller, bound)
2157 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
2158 if (ass_code != MIN_EXPR)
2159 return false;
2161 minmax = MAX_EXPR;
2162 if (operand_equal_for_phi_arg_p (op0, smaller)
2163 || (alt_smaller
2164 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
2165 bound = op1;
2166 else if (operand_equal_for_phi_arg_p (op1, smaller)
2167 || (alt_smaller
2168 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
2169 bound = op0;
2170 else
2171 return false;
2173 /* We need BOUND >= LARGER. */
2174 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
2175 bound, arg_true)))
2176 return false;
2178 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
2179 || (alt_smaller
2180 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
2182 /* Case
2184 if (smaller > larger)
2186 r' = MAX_EXPR (larger, bound)
2188 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
2189 if (ass_code != MAX_EXPR)
2190 return false;
2192 minmax = MIN_EXPR;
2193 if (operand_equal_for_phi_arg_p (op0, larger))
2194 bound = op1;
2195 else if (operand_equal_for_phi_arg_p (op1, larger))
2196 bound = op0;
2197 else
2198 return false;
2200 /* We need BOUND <= SMALLER. */
2201 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
2202 bound, arg_true)))
2203 return false;
2205 else
2206 return false;
2209 /* Move the statement from the middle block. */
2210 gsi = gsi_last_bb (cond_bb);
2211 gsi_from = gsi_last_nondebug_bb (middle_bb);
2212 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
2213 SSA_OP_DEF));
2214 gsi_move_before (&gsi_from, &gsi);
2216 else
2217 return false;
2219 /* Emit the statement to compute min/max. */
2220 gimple_seq stmts = NULL;
2221 tree phi_result = PHI_RESULT (phi);
2223 /* When we can't use a MIN/MAX_EXPR still make sure the expression
2224 stays in a form to be recognized by ISA that map to IEEE x > y ? x : y
2225 semantics (that's not IEEE max semantics). */
2226 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
2228 result = gimple_build (&stmts, cmp, boolean_type_node,
2229 gimple_cond_lhs (cond), rhs);
2230 result = gimple_build (&stmts, COND_EXPR, TREE_TYPE (phi_result),
2231 result, arg_true, arg_false);
2233 else
2234 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
2236 gsi = gsi_last_bb (cond_bb);
2237 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2239 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2241 return true;
2244 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
2245 For strong ordering <=> try to match something like:
2246 <bb 2> : // cond3_bb (== cond2_bb)
2247 if (x_4(D) != y_5(D))
2248 goto <bb 3>; [INV]
2249 else
2250 goto <bb 6>; [INV]
2252 <bb 3> : // cond_bb
2253 if (x_4(D) < y_5(D))
2254 goto <bb 6>; [INV]
2255 else
2256 goto <bb 4>; [INV]
2258 <bb 4> : // middle_bb
2260 <bb 6> : // phi_bb
2261 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2262 _1 = iftmp.0_2 == 0;
2264 and for partial ordering <=> something like:
2266 <bb 2> : // cond3_bb
2267 if (a_3(D) == b_5(D))
2268 goto <bb 6>; [50.00%]
2269 else
2270 goto <bb 3>; [50.00%]
2272 <bb 3> [local count: 536870913]: // cond2_bb
2273 if (a_3(D) < b_5(D))
2274 goto <bb 6>; [50.00%]
2275 else
2276 goto <bb 4>; [50.00%]
2278 <bb 4> [local count: 268435456]: // cond_bb
2279 if (a_3(D) > b_5(D))
2280 goto <bb 6>; [50.00%]
2281 else
2282 goto <bb 5>; [50.00%]
2284 <bb 5> [local count: 134217728]: // middle_bb
2286 <bb 6> [local count: 1073741824]: // phi_bb
2287 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2288 _2 = SR.27_4 > 0; */
2290 static bool
2291 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
2292 edge e0, edge e1, gphi *phi,
2293 tree arg0, tree arg1)
2295 tree phires = PHI_RESULT (phi);
2296 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
2297 || TYPE_UNSIGNED (TREE_TYPE (phires))
2298 || !tree_fits_shwi_p (arg0)
2299 || !tree_fits_shwi_p (arg1)
2300 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
2301 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
2302 return false;
2304 basic_block phi_bb = gimple_bb (phi);
2305 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
2306 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
2307 return false;
2309 use_operand_p use_p;
2310 gimple *use_stmt;
2311 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
2312 return false;
2313 if (!single_imm_use (phires, &use_p, &use_stmt))
2314 return false;
2315 enum tree_code cmp;
2316 tree lhs, rhs;
2317 gimple *orig_use_stmt = use_stmt;
2318 tree orig_use_lhs = NULL_TREE;
2319 int prec = TYPE_PRECISION (TREE_TYPE (phires));
2320 bool is_cast = false;
2322 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2323 into res <= 1 and has left a type-cast for signed types. */
2324 if (gimple_assign_cast_p (use_stmt))
2326 orig_use_lhs = gimple_assign_lhs (use_stmt);
2327 /* match.pd would have only done this for a signed type,
2328 so the conversion must be to an unsigned one. */
2329 tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
2330 tree ty2 = TREE_TYPE (orig_use_lhs);
2332 if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
2333 return false;
2334 if (TYPE_PRECISION (ty1) > TYPE_PRECISION (ty2))
2335 return false;
2336 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2337 return false;
2338 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2339 return false;
2341 is_cast = true;
2343 else if (is_gimple_assign (use_stmt)
2344 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2345 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2346 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2347 == wi::shifted_mask (1, prec - 1, false, prec)))
2349 /* For partial_ordering result operator>= with unspec as second
2350 argument is (res & 1) == res, folded by match.pd into
2351 (res & ~1) == 0. */
2352 orig_use_lhs = gimple_assign_lhs (use_stmt);
2353 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2354 return false;
2355 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2356 return false;
2358 if (gimple_code (use_stmt) == GIMPLE_COND)
2360 cmp = gimple_cond_code (use_stmt);
2361 lhs = gimple_cond_lhs (use_stmt);
2362 rhs = gimple_cond_rhs (use_stmt);
2364 else if (is_gimple_assign (use_stmt))
2366 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2368 cmp = gimple_assign_rhs_code (use_stmt);
2369 lhs = gimple_assign_rhs1 (use_stmt);
2370 rhs = gimple_assign_rhs2 (use_stmt);
2372 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2374 tree cond = gimple_assign_rhs1 (use_stmt);
2375 if (!COMPARISON_CLASS_P (cond))
2376 return false;
2377 cmp = TREE_CODE (cond);
2378 lhs = TREE_OPERAND (cond, 0);
2379 rhs = TREE_OPERAND (cond, 1);
2381 else
2382 return false;
2384 else
2385 return false;
2386 switch (cmp)
2388 case EQ_EXPR:
2389 case NE_EXPR:
2390 case LT_EXPR:
2391 case GT_EXPR:
2392 case LE_EXPR:
2393 case GE_EXPR:
2394 break;
2395 default:
2396 return false;
2398 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2399 || !tree_fits_shwi_p (rhs)
2400 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2401 return false;
2403 if (is_cast)
2405 if (TREE_CODE (rhs) != INTEGER_CST)
2406 return false;
2407 /* As for -ffast-math we assume the 2 return to be
2408 impossible, canonicalize (unsigned) res <= 1U or
2409 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2410 or (unsigned) res >= 2U as res < 0. */
2411 switch (cmp)
2413 case LE_EXPR:
2414 if (!integer_onep (rhs))
2415 return false;
2416 cmp = GE_EXPR;
2417 break;
2418 case LT_EXPR:
2419 if (wi::ne_p (wi::to_widest (rhs), 2))
2420 return false;
2421 cmp = GE_EXPR;
2422 break;
2423 case GT_EXPR:
2424 if (!integer_onep (rhs))
2425 return false;
2426 cmp = LT_EXPR;
2427 break;
2428 case GE_EXPR:
2429 if (wi::ne_p (wi::to_widest (rhs), 2))
2430 return false;
2431 cmp = LT_EXPR;
2432 break;
2433 default:
2434 return false;
2436 rhs = build_zero_cst (TREE_TYPE (phires));
2438 else if (orig_use_lhs)
2440 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2441 return false;
2442 /* As for -ffast-math we assume the 2 return to be
2443 impossible, canonicalize (res & ~1) == 0 into
2444 res >= 0 and (res & ~1) != 0 as res < 0. */
2445 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2448 if (!empty_block_p (middle_bb))
2449 return false;
2451 gcond *cond1 = as_a <gcond *> (*gsi_last_bb (cond_bb));
2452 enum tree_code cmp1 = gimple_cond_code (cond1);
2453 switch (cmp1)
2455 case LT_EXPR:
2456 case LE_EXPR:
2457 case GT_EXPR:
2458 case GE_EXPR:
2459 break;
2460 default:
2461 return false;
2463 tree lhs1 = gimple_cond_lhs (cond1);
2464 tree rhs1 = gimple_cond_rhs (cond1);
2465 /* The optimization may be unsafe due to NaNs. */
2466 if (HONOR_NANS (TREE_TYPE (lhs1)))
2467 return false;
2468 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2469 return false;
2470 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2471 return false;
2473 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2474 return false;
2476 basic_block cond2_bb = single_pred (cond_bb);
2477 if (EDGE_COUNT (cond2_bb->succs) != 2)
2478 return false;
2479 edge cond2_phi_edge;
2480 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2482 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2483 return false;
2484 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2486 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2487 return false;
2488 else
2489 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2490 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2491 if (!tree_fits_shwi_p (arg2))
2492 return false;
2493 gcond *cond2 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond2_bb));
2494 if (!cond2)
2495 return false;
2496 enum tree_code cmp2 = gimple_cond_code (cond2);
2497 tree lhs2 = gimple_cond_lhs (cond2);
2498 tree rhs2 = gimple_cond_rhs (cond2);
2499 if (lhs2 == lhs1)
2501 if (!operand_equal_p (rhs2, rhs1, 0))
2503 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2504 && TREE_CODE (rhs1) == INTEGER_CST
2505 && TREE_CODE (rhs2) == INTEGER_CST)
2507 /* For integers, we can have cond2 x == 5
2508 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2509 x > 5, x >= 6, x >= 5 or x > 4. */
2510 if (tree_int_cst_lt (rhs1, rhs2))
2512 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2513 return false;
2514 if (cmp1 == LE_EXPR)
2515 cmp1 = LT_EXPR;
2516 else if (cmp1 == GT_EXPR)
2517 cmp1 = GE_EXPR;
2518 else
2519 return false;
2521 else
2523 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2524 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2525 return false;
2526 if (cmp1 == LT_EXPR)
2527 cmp1 = LE_EXPR;
2528 else if (cmp1 == GE_EXPR)
2529 cmp1 = GT_EXPR;
2530 else
2531 return false;
2533 rhs1 = rhs2;
2535 else
2536 return false;
2539 else if (lhs2 == rhs1)
2541 if (rhs2 != lhs1)
2542 return false;
2544 else
2545 return false;
2547 tree arg3 = arg2;
2548 basic_block cond3_bb = cond2_bb;
2549 edge cond3_phi_edge = cond2_phi_edge;
2550 gcond *cond3 = cond2;
2551 enum tree_code cmp3 = cmp2;
2552 tree lhs3 = lhs2;
2553 tree rhs3 = rhs2;
2554 if (EDGE_COUNT (phi_bb->preds) == 4)
2556 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2557 return false;
2558 if (e1->flags & EDGE_TRUE_VALUE)
2560 if (tree_to_shwi (arg0) != 2
2561 || absu_hwi (tree_to_shwi (arg1)) != 1
2562 || wi::to_widest (arg1) == wi::to_widest (arg2))
2563 return false;
2565 else if (tree_to_shwi (arg1) != 2
2566 || absu_hwi (tree_to_shwi (arg0)) != 1
2567 || wi::to_widest (arg0) == wi::to_widest (arg1))
2568 return false;
2569 switch (cmp2)
2571 case LT_EXPR:
2572 case LE_EXPR:
2573 case GT_EXPR:
2574 case GE_EXPR:
2575 break;
2576 default:
2577 return false;
2579 /* if (x < y) goto phi_bb; else fallthru;
2580 if (x > y) goto phi_bb; else fallthru;
2581 bbx:;
2582 phi_bb:;
2583 is ok, but if x and y are swapped in one of the comparisons,
2584 or the comparisons are the same and operands not swapped,
2585 or the true and false edges are swapped, it is not. */
2586 if ((lhs2 == lhs1)
2587 ^ (((cond2_phi_edge->flags
2588 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2589 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2590 != ((e1->flags
2591 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2592 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2593 return false;
2594 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2595 return false;
2596 cond3_bb = single_pred (cond2_bb);
2597 if (EDGE_COUNT (cond2_bb->succs) != 2)
2598 return false;
2599 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2601 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2602 return false;
2603 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2605 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2606 return false;
2607 else
2608 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2609 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2610 cond3 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond3_bb));
2611 if (!cond3)
2612 return false;
2613 cmp3 = gimple_cond_code (cond3);
2614 lhs3 = gimple_cond_lhs (cond3);
2615 rhs3 = gimple_cond_rhs (cond3);
2616 if (lhs3 == lhs1)
2618 if (!operand_equal_p (rhs3, rhs1, 0))
2619 return false;
2621 else if (lhs3 == rhs1)
2623 if (rhs3 != lhs1)
2624 return false;
2626 else
2627 return false;
2629 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2630 || absu_hwi (tree_to_shwi (arg1)) != 1
2631 || wi::to_widest (arg0) == wi::to_widest (arg1))
2632 return false;
2634 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2635 return false;
2636 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2637 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2638 return false;
2640 /* lhs1 one_cmp rhs1 results in phires of 1. */
2641 enum tree_code one_cmp;
2642 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2643 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2644 one_cmp = LT_EXPR;
2645 else
2646 one_cmp = GT_EXPR;
2648 enum tree_code res_cmp;
2649 switch (cmp)
2651 case EQ_EXPR:
2652 if (integer_zerop (rhs))
2653 res_cmp = EQ_EXPR;
2654 else if (integer_minus_onep (rhs))
2655 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2656 else if (integer_onep (rhs))
2657 res_cmp = one_cmp;
2658 else
2659 return false;
2660 break;
2661 case NE_EXPR:
2662 if (integer_zerop (rhs))
2663 res_cmp = NE_EXPR;
2664 else if (integer_minus_onep (rhs))
2665 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2666 else if (integer_onep (rhs))
2667 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2668 else
2669 return false;
2670 break;
2671 case LT_EXPR:
2672 if (integer_onep (rhs))
2673 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2674 else if (integer_zerop (rhs))
2675 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2676 else
2677 return false;
2678 break;
2679 case LE_EXPR:
2680 if (integer_zerop (rhs))
2681 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2682 else if (integer_minus_onep (rhs))
2683 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2684 else
2685 return false;
2686 break;
2687 case GT_EXPR:
2688 if (integer_minus_onep (rhs))
2689 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2690 else if (integer_zerop (rhs))
2691 res_cmp = one_cmp;
2692 else
2693 return false;
2694 break;
2695 case GE_EXPR:
2696 if (integer_zerop (rhs))
2697 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2698 else if (integer_onep (rhs))
2699 res_cmp = one_cmp;
2700 else
2701 return false;
2702 break;
2703 default:
2704 gcc_unreachable ();
2707 if (gimple_code (use_stmt) == GIMPLE_COND)
2709 gcond *use_cond = as_a <gcond *> (use_stmt);
2710 gimple_cond_set_code (use_cond, res_cmp);
2711 gimple_cond_set_lhs (use_cond, lhs1);
2712 gimple_cond_set_rhs (use_cond, rhs1);
2714 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2716 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2717 gimple_assign_set_rhs1 (use_stmt, lhs1);
2718 gimple_assign_set_rhs2 (use_stmt, rhs1);
2720 else
2722 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2723 lhs1, rhs1);
2724 gimple_assign_set_rhs1 (use_stmt, cond);
2726 update_stmt (use_stmt);
2728 if (MAY_HAVE_DEBUG_BIND_STMTS)
2730 use_operand_p use_p;
2731 imm_use_iterator iter;
2732 bool has_debug_uses = false;
2733 bool has_cast_debug_uses = false;
2734 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2736 gimple *use_stmt = USE_STMT (use_p);
2737 if (orig_use_lhs && use_stmt == orig_use_stmt)
2738 continue;
2739 gcc_assert (is_gimple_debug (use_stmt));
2740 has_debug_uses = true;
2741 break;
2743 if (orig_use_lhs)
2745 if (!has_debug_uses || is_cast)
2746 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2748 gimple *use_stmt = USE_STMT (use_p);
2749 gcc_assert (is_gimple_debug (use_stmt));
2750 has_debug_uses = true;
2751 if (is_cast)
2752 has_cast_debug_uses = true;
2754 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2755 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2756 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2757 update_stmt (orig_use_stmt);
2760 if (has_debug_uses)
2762 /* If there are debug uses, emit something like:
2763 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2764 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2765 where > stands for the comparison that yielded 1
2766 and replace debug uses of phi result with that D#2.
2767 Ignore the value of 2, because if NaNs aren't expected,
2768 all floating point numbers should be comparable. */
2769 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2770 tree type = TREE_TYPE (phires);
2771 tree temp1 = build_debug_expr_decl (type);
2772 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2773 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2774 build_int_cst (type, -1));
2775 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2776 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2777 tree temp2 = build_debug_expr_decl (type);
2778 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2779 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2780 g = gimple_build_debug_bind (temp2, t, phi);
2781 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2782 replace_uses_by (phires, temp2);
2783 if (orig_use_lhs)
2785 if (has_cast_debug_uses)
2787 tree temp3 = make_node (DEBUG_EXPR_DECL);
2788 DECL_ARTIFICIAL (temp3) = 1;
2789 TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
2790 SET_DECL_MODE (temp3, TYPE_MODE (type));
2791 t = fold_convert (TREE_TYPE (temp3), temp2);
2792 g = gimple_build_debug_bind (temp3, t, phi);
2793 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2794 replace_uses_by (orig_use_lhs, temp3);
2796 else
2797 replace_uses_by (orig_use_lhs, temp2);
2802 if (orig_use_lhs)
2804 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2805 gsi_remove (&gsi, true);
2808 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2809 remove_phi_node (&psi, true);
2810 statistics_counter_event (cfun, "spaceship replacement", 1);
2812 return true;
2815 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2816 Convert
2818 <bb 2>
2819 if (b_4(D) != 0)
2820 goto <bb 3>
2821 else
2822 goto <bb 4>
2824 <bb 3>
2825 _2 = (unsigned long) b_4(D);
2826 _9 = __builtin_popcountl (_2);
2828 _9 = __builtin_popcountl (b_4(D));
2830 <bb 4>
2831 c_12 = PHI <0(2), _9(3)>
2833 Into
2834 <bb 2>
2835 _2 = (unsigned long) b_4(D);
2836 _9 = __builtin_popcountl (_2);
2838 _9 = __builtin_popcountl (b_4(D));
2840 <bb 4>
2841 c_12 = PHI <_9(2)>
2843 Similarly for __builtin_clz or __builtin_ctz if
2844 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2845 instead of 0 above it uses the value from that macro. */
2847 static bool
2848 cond_removal_in_builtin_zero_pattern (basic_block cond_bb,
2849 basic_block middle_bb,
2850 edge e1, edge e2, gphi *phi,
2851 tree arg0, tree arg1)
2853 gimple_stmt_iterator gsi, gsi_from;
2854 gimple *call;
2855 gimple *cast = NULL;
2856 tree lhs, arg;
2858 /* Check that
2859 _2 = (unsigned long) b_4(D);
2860 _9 = __builtin_popcountl (_2);
2862 _9 = __builtin_popcountl (b_4(D));
2863 are the only stmts in the middle_bb. */
2865 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2866 if (gsi_end_p (gsi))
2867 return false;
2868 cast = gsi_stmt (gsi);
2869 gsi_next_nondebug (&gsi);
2870 if (!gsi_end_p (gsi))
2872 call = gsi_stmt (gsi);
2873 gsi_next_nondebug (&gsi);
2874 if (!gsi_end_p (gsi))
2875 return false;
2877 else
2879 call = cast;
2880 cast = NULL;
2883 /* Check that we have a popcount/clz/ctz builtin. */
2884 if (!is_gimple_call (call))
2885 return false;
2887 lhs = gimple_get_lhs (call);
2889 if (lhs == NULL_TREE)
2890 return false;
2892 combined_fn cfn = gimple_call_combined_fn (call);
2893 if (gimple_call_num_args (call) != 1
2894 && (gimple_call_num_args (call) != 2
2895 || cfn == CFN_CLZ
2896 || cfn == CFN_CTZ))
2897 return false;
2899 arg = gimple_call_arg (call, 0);
2901 internal_fn ifn = IFN_LAST;
2902 int val = 0;
2903 bool any_val = false;
2904 switch (cfn)
2906 case CFN_BUILT_IN_BSWAP16:
2907 case CFN_BUILT_IN_BSWAP32:
2908 case CFN_BUILT_IN_BSWAP64:
2909 case CFN_BUILT_IN_BSWAP128:
2910 CASE_CFN_FFS:
2911 CASE_CFN_PARITY:
2912 CASE_CFN_POPCOUNT:
2913 break;
2914 CASE_CFN_CLZ:
2915 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2917 tree type = TREE_TYPE (arg);
2918 if (TREE_CODE (type) == BITINT_TYPE)
2920 if (gimple_call_num_args (call) == 1)
2922 any_val = true;
2923 ifn = IFN_CLZ;
2924 break;
2926 if (!tree_fits_shwi_p (gimple_call_arg (call, 1)))
2927 return false;
2928 HOST_WIDE_INT at_zero = tree_to_shwi (gimple_call_arg (call, 1));
2929 if ((int) at_zero != at_zero)
2930 return false;
2931 ifn = IFN_CLZ;
2932 val = at_zero;
2933 break;
2935 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2936 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2937 val) == 2)
2939 ifn = IFN_CLZ;
2940 break;
2943 return false;
2944 CASE_CFN_CTZ:
2945 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2947 tree type = TREE_TYPE (arg);
2948 if (TREE_CODE (type) == BITINT_TYPE)
2950 if (gimple_call_num_args (call) == 1)
2952 any_val = true;
2953 ifn = IFN_CTZ;
2954 break;
2956 if (!tree_fits_shwi_p (gimple_call_arg (call, 1)))
2957 return false;
2958 HOST_WIDE_INT at_zero = tree_to_shwi (gimple_call_arg (call, 1));
2959 if ((int) at_zero != at_zero)
2960 return false;
2961 ifn = IFN_CTZ;
2962 val = at_zero;
2963 break;
2965 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2966 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2967 val) == 2)
2969 ifn = IFN_CTZ;
2970 break;
2973 return false;
2974 case CFN_BUILT_IN_CLRSB:
2975 val = TYPE_PRECISION (integer_type_node) - 1;
2976 break;
2977 case CFN_BUILT_IN_CLRSBL:
2978 val = TYPE_PRECISION (long_integer_type_node) - 1;
2979 break;
2980 case CFN_BUILT_IN_CLRSBLL:
2981 val = TYPE_PRECISION (long_long_integer_type_node) - 1;
2982 break;
2983 default:
2984 return false;
2987 if (cast)
2989 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2990 /* Check that we have a cast prior to that. */
2991 if (gimple_code (cast) != GIMPLE_ASSIGN
2992 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
2993 return false;
2994 /* Result of the cast stmt is the argument to the builtin. */
2995 if (arg != gimple_assign_lhs (cast))
2996 return false;
2997 arg = gimple_assign_rhs1 (cast);
3000 gcond *cond = dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
3002 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
3003 builtin. */
3004 if (!cond
3005 || (gimple_cond_code (cond) != NE_EXPR
3006 && gimple_cond_code (cond) != EQ_EXPR)
3007 || !integer_zerop (gimple_cond_rhs (cond))
3008 || arg != gimple_cond_lhs (cond))
3009 return false;
3011 /* Canonicalize. */
3012 if ((e2->flags & EDGE_TRUE_VALUE
3013 && gimple_cond_code (cond) == NE_EXPR)
3014 || (e1->flags & EDGE_TRUE_VALUE
3015 && gimple_cond_code (cond) == EQ_EXPR))
3017 std::swap (arg0, arg1);
3018 std::swap (e1, e2);
3021 /* Check PHI arguments. */
3022 if (lhs != arg0
3023 || TREE_CODE (arg1) != INTEGER_CST)
3024 return false;
3025 if (any_val)
3027 if (!tree_fits_shwi_p (arg1))
3028 return false;
3029 HOST_WIDE_INT at_zero = tree_to_shwi (arg1);
3030 if ((int) at_zero != at_zero)
3031 return false;
3032 val = at_zero;
3034 else if (wi::to_wide (arg1) != val)
3035 return false;
3037 /* And insert the popcount/clz/ctz builtin and cast stmt before the
3038 cond_bb. */
3039 gsi = gsi_last_bb (cond_bb);
3040 if (cast)
3042 gsi_from = gsi_for_stmt (cast);
3043 gsi_move_before (&gsi_from, &gsi);
3044 reset_flow_sensitive_info (gimple_get_lhs (cast));
3046 gsi_from = gsi_for_stmt (call);
3047 if (ifn == IFN_LAST
3048 || (gimple_call_internal_p (call) && gimple_call_num_args (call) == 2))
3049 gsi_move_before (&gsi_from, &gsi);
3050 else
3052 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
3053 the latter is well defined at zero. */
3054 call = gimple_build_call_internal (ifn, 2, gimple_call_arg (call, 0),
3055 build_int_cst (integer_type_node, val));
3056 gimple_call_set_lhs (call, lhs);
3057 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
3058 gsi_remove (&gsi_from, true);
3060 reset_flow_sensitive_info (lhs);
3062 /* Now update the PHI and remove unneeded bbs. */
3063 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
3064 return true;
3067 /* Auxiliary functions to determine the set of memory accesses which
3068 can't trap because they are preceded by accesses to the same memory
3069 portion. We do that for MEM_REFs, so we only need to track
3070 the SSA_NAME of the pointer indirectly referenced. The algorithm
3071 simply is a walk over all instructions in dominator order. When
3072 we see an MEM_REF we determine if we've already seen a same
3073 ref anywhere up to the root of the dominator tree. If we do the
3074 current access can't trap. If we don't see any dominating access
3075 the current access might trap, but might also make later accesses
3076 non-trapping, so we remember it. We need to be careful with loads
3077 or stores, for instance a load might not trap, while a store would,
3078 so if we see a dominating read access this doesn't mean that a later
3079 write access would not trap. Hence we also need to differentiate the
3080 type of access(es) seen.
3082 ??? We currently are very conservative and assume that a load might
3083 trap even if a store doesn't (write-only memory). This probably is
3084 overly conservative.
3086 We currently support a special case that for !TREE_ADDRESSABLE automatic
3087 variables, it could ignore whether something is a load or store because the
3088 local stack should be always writable. */
3090 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
3091 basic block an *_REF through it was seen, which would constitute a
3092 no-trap region for same accesses.
3094 Size is needed to support 2 MEM_REFs of different types, like
3095 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
3096 OEP_ADDRESS_OF. */
3097 struct ref_to_bb
3099 tree exp;
3100 HOST_WIDE_INT size;
3101 unsigned int phase;
3102 basic_block bb;
3105 /* Hashtable helpers. */
3107 struct refs_hasher : free_ptr_hash<ref_to_bb>
3109 static inline hashval_t hash (const ref_to_bb *);
3110 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
3113 /* Used for quick clearing of the hash-table when we see calls.
3114 Hash entries with phase < nt_call_phase are invalid. */
3115 static unsigned int nt_call_phase;
3117 /* The hash function. */
3119 inline hashval_t
3120 refs_hasher::hash (const ref_to_bb *n)
3122 inchash::hash hstate;
3123 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
3124 hstate.add_hwi (n->size);
3125 return hstate.end ();
3128 /* The equality function of *P1 and *P2. */
3130 inline bool
3131 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
3133 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
3134 && n1->size == n2->size;
3137 class nontrapping_dom_walker : public dom_walker
3139 public:
3140 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
3141 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
3144 edge before_dom_children (basic_block) final override;
3145 void after_dom_children (basic_block) final override;
3147 private:
3149 /* We see the expression EXP in basic block BB. If it's an interesting
3150 expression (an MEM_REF through an SSA_NAME) possibly insert the
3151 expression into the set NONTRAP or the hash table of seen expressions.
3152 STORE is true if this expression is on the LHS, otherwise it's on
3153 the RHS. */
3154 void add_or_mark_expr (basic_block, tree, bool);
3156 hash_set<tree> *m_nontrapping;
3158 /* The hash table for remembering what we've seen. */
3159 hash_table<refs_hasher> m_seen_refs;
3162 /* Called by walk_dominator_tree, when entering the block BB. */
3163 edge
3164 nontrapping_dom_walker::before_dom_children (basic_block bb)
3166 edge e;
3167 edge_iterator ei;
3168 gimple_stmt_iterator gsi;
3170 /* If we haven't seen all our predecessors, clear the hash-table. */
3171 FOR_EACH_EDGE (e, ei, bb->preds)
3172 if ((((size_t)e->src->aux) & 2) == 0)
3174 nt_call_phase++;
3175 break;
3178 /* Mark this BB as being on the path to dominator root and as visited. */
3179 bb->aux = (void*)(1 | 2);
3181 /* And walk the statements in order. */
3182 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3184 gimple *stmt = gsi_stmt (gsi);
3186 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
3187 || (is_gimple_call (stmt)
3188 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
3189 nt_call_phase++;
3190 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
3192 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
3193 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
3196 return NULL;
3199 /* Called by walk_dominator_tree, when basic block BB is exited. */
3200 void
3201 nontrapping_dom_walker::after_dom_children (basic_block bb)
3203 /* This BB isn't on the path to dominator root anymore. */
3204 bb->aux = (void*)2;
3207 /* We see the expression EXP in basic block BB. If it's an interesting
3208 expression of:
3209 1) MEM_REF
3210 2) ARRAY_REF
3211 3) COMPONENT_REF
3212 possibly insert the expression into the set NONTRAP or the hash table
3213 of seen expressions. STORE is true if this expression is on the LHS,
3214 otherwise it's on the RHS. */
3215 void
3216 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
3218 HOST_WIDE_INT size;
3220 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
3221 || TREE_CODE (exp) == COMPONENT_REF)
3222 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
3224 struct ref_to_bb map;
3225 ref_to_bb **slot;
3226 struct ref_to_bb *r2bb;
3227 basic_block found_bb = 0;
3229 if (!store)
3231 tree base = get_base_address (exp);
3232 /* Only record a LOAD of a local variable without address-taken, as
3233 the local stack is always writable. This allows cselim on a STORE
3234 with a dominating LOAD. */
3235 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
3236 return;
3239 /* Try to find the last seen *_REF, which can trap. */
3240 map.exp = exp;
3241 map.size = size;
3242 slot = m_seen_refs.find_slot (&map, INSERT);
3243 r2bb = *slot;
3244 if (r2bb && r2bb->phase >= nt_call_phase)
3245 found_bb = r2bb->bb;
3247 /* If we've found a trapping *_REF, _and_ it dominates EXP
3248 (it's in a basic block on the path from us to the dominator root)
3249 then we can't trap. */
3250 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
3252 m_nontrapping->add (exp);
3254 else
3256 /* EXP might trap, so insert it into the hash table. */
3257 if (r2bb)
3259 r2bb->phase = nt_call_phase;
3260 r2bb->bb = bb;
3262 else
3264 r2bb = XNEW (struct ref_to_bb);
3265 r2bb->phase = nt_call_phase;
3266 r2bb->bb = bb;
3267 r2bb->exp = exp;
3268 r2bb->size = size;
3269 *slot = r2bb;
3275 /* This is the entry point of gathering non trapping memory accesses.
3276 It will do a dominator walk over the whole function, and it will
3277 make use of the bb->aux pointers. It returns a set of trees
3278 (the MEM_REFs itself) which can't trap. */
3279 static hash_set<tree> *
3280 get_non_trapping (void)
3282 nt_call_phase = 0;
3283 hash_set<tree> *nontrap = new hash_set<tree>;
3285 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
3286 .walk (cfun->cfg->x_entry_block_ptr);
3288 clear_aux_for_blocks ();
3289 return nontrap;
3292 /* Do the main work of conditional store replacement. We already know
3293 that the recognized pattern looks like so:
3295 split:
3296 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
3297 MIDDLE_BB:
3298 something
3299 fallthrough (edge E0)
3300 JOIN_BB:
3301 some more
3303 We check that MIDDLE_BB contains only one store, that that store
3304 doesn't trap (not via NOTRAP, but via checking if an access to the same
3305 memory location dominates us, or the store is to a local addressable
3306 object) and that the store has a "simple" RHS. */
3308 static bool
3309 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
3310 edge e0, edge e1, hash_set<tree> *nontrap)
3312 gimple *assign = last_and_only_stmt (middle_bb);
3313 tree lhs, rhs, name, name2;
3314 gphi *newphi;
3315 gassign *new_stmt;
3316 gimple_stmt_iterator gsi;
3317 location_t locus;
3319 /* Check if middle_bb contains of only one store. */
3320 if (!assign
3321 || !gimple_assign_single_p (assign)
3322 || gimple_has_volatile_ops (assign))
3323 return false;
3325 /* And no PHI nodes so all uses in the single stmt are also
3326 available where we insert to. */
3327 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
3328 return false;
3330 locus = gimple_location (assign);
3331 lhs = gimple_assign_lhs (assign);
3332 rhs = gimple_assign_rhs1 (assign);
3333 if ((!REFERENCE_CLASS_P (lhs)
3334 && !DECL_P (lhs))
3335 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3336 return false;
3338 /* Prove that we can move the store down. We could also check
3339 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3340 whose value is not available readily, which we want to avoid. */
3341 if (!nontrap->contains (lhs))
3343 /* If LHS is an access to a local variable without address-taken
3344 (or when we allow data races) and known not to trap, we could
3345 always safely move down the store. */
3346 tree base = get_base_address (lhs);
3347 if (!auto_var_p (base)
3348 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
3349 || tree_could_trap_p (lhs))
3350 return false;
3353 /* Now we've checked the constraints, so do the transformation:
3354 1) Remove the single store. */
3355 gsi = gsi_for_stmt (assign);
3356 unlink_stmt_vdef (assign);
3357 gsi_remove (&gsi, true);
3358 release_defs (assign);
3360 /* Make both store and load use alias-set zero as we have to
3361 deal with the case of the store being a conditional change
3362 of the dynamic type. */
3363 lhs = unshare_expr (lhs);
3364 tree *basep = &lhs;
3365 while (handled_component_p (*basep))
3366 basep = &TREE_OPERAND (*basep, 0);
3367 if (TREE_CODE (*basep) == MEM_REF
3368 || TREE_CODE (*basep) == TARGET_MEM_REF)
3369 TREE_OPERAND (*basep, 1)
3370 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3371 else
3372 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3373 build_fold_addr_expr (*basep),
3374 build_zero_cst (ptr_type_node));
3376 /* 2) Insert a load from the memory of the store to the temporary
3377 on the edge which did not contain the store. */
3378 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3379 new_stmt = gimple_build_assign (name, lhs);
3380 gimple_set_location (new_stmt, locus);
3381 lhs = unshare_expr (lhs);
3383 /* Set the no-warning bit on the rhs of the load to avoid uninit
3384 warnings. */
3385 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3386 suppress_warning (rhs1, OPT_Wuninitialized);
3388 gsi_insert_on_edge (e1, new_stmt);
3390 /* 3) Create a PHI node at the join block, with one argument
3391 holding the old RHS, and the other holding the temporary
3392 where we stored the old memory contents. */
3393 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3394 newphi = create_phi_node (name2, join_bb);
3395 add_phi_arg (newphi, rhs, e0, locus);
3396 add_phi_arg (newphi, name, e1, locus);
3398 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3400 /* 4) Insert that PHI node. */
3401 gsi = gsi_after_labels (join_bb);
3402 if (gsi_end_p (gsi))
3404 gsi = gsi_last_bb (join_bb);
3405 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3407 else
3408 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3410 if (dump_file && (dump_flags & TDF_DETAILS))
3412 fprintf (dump_file, "\nConditional store replacement happened!");
3413 fprintf (dump_file, "\nReplaced the store with a load.");
3414 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3415 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3417 statistics_counter_event (cfun, "conditional store replacement", 1);
3419 return true;
3422 /* Do the main work of conditional store replacement. */
3424 static bool
3425 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3426 basic_block join_bb, gimple *then_assign,
3427 gimple *else_assign)
3429 tree lhs_base, lhs, then_rhs, else_rhs, name;
3430 location_t then_locus, else_locus;
3431 gimple_stmt_iterator gsi;
3432 gphi *newphi;
3433 gassign *new_stmt;
3435 if (then_assign == NULL
3436 || !gimple_assign_single_p (then_assign)
3437 || gimple_clobber_p (then_assign)
3438 || gimple_has_volatile_ops (then_assign)
3439 || else_assign == NULL
3440 || !gimple_assign_single_p (else_assign)
3441 || gimple_clobber_p (else_assign)
3442 || gimple_has_volatile_ops (else_assign))
3443 return false;
3445 lhs = gimple_assign_lhs (then_assign);
3446 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3447 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3448 return false;
3450 lhs_base = get_base_address (lhs);
3451 if (lhs_base == NULL_TREE
3452 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3453 return false;
3455 then_rhs = gimple_assign_rhs1 (then_assign);
3456 else_rhs = gimple_assign_rhs1 (else_assign);
3457 then_locus = gimple_location (then_assign);
3458 else_locus = gimple_location (else_assign);
3460 /* Now we've checked the constraints, so do the transformation:
3461 1) Remove the stores. */
3462 gsi = gsi_for_stmt (then_assign);
3463 unlink_stmt_vdef (then_assign);
3464 gsi_remove (&gsi, true);
3465 release_defs (then_assign);
3467 gsi = gsi_for_stmt (else_assign);
3468 unlink_stmt_vdef (else_assign);
3469 gsi_remove (&gsi, true);
3470 release_defs (else_assign);
3472 /* 2) Create a PHI node at the join block, with one argument
3473 holding the old RHS, and the other holding the temporary
3474 where we stored the old memory contents. */
3475 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3476 newphi = create_phi_node (name, join_bb);
3477 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3478 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3480 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3482 /* 3) Insert that PHI node. */
3483 gsi = gsi_after_labels (join_bb);
3484 if (gsi_end_p (gsi))
3486 gsi = gsi_last_bb (join_bb);
3487 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3489 else
3490 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3492 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3494 return true;
3497 /* Return the single store in BB with VDEF or NULL if there are
3498 other stores in the BB or loads following the store. */
3500 static gimple *
3501 single_trailing_store_in_bb (basic_block bb, tree vdef)
3503 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3504 return NULL;
3505 gimple *store = SSA_NAME_DEF_STMT (vdef);
3506 if (gimple_bb (store) != bb
3507 || gimple_code (store) == GIMPLE_PHI)
3508 return NULL;
3510 /* Verify there is no other store in this BB. */
3511 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3512 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3513 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3514 return NULL;
3516 /* Verify there is no load or store after the store. */
3517 use_operand_p use_p;
3518 imm_use_iterator imm_iter;
3519 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3520 if (USE_STMT (use_p) != store
3521 && gimple_bb (USE_STMT (use_p)) == bb)
3522 return NULL;
3524 return store;
3527 /* Conditional store replacement. We already know
3528 that the recognized pattern looks like so:
3530 split:
3531 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3532 THEN_BB:
3534 X = Y;
3536 goto JOIN_BB;
3537 ELSE_BB:
3539 X = Z;
3541 fallthrough (edge E0)
3542 JOIN_BB:
3543 some more
3545 We check that it is safe to sink the store to JOIN_BB by verifying that
3546 there are no read-after-write or write-after-write dependencies in
3547 THEN_BB and ELSE_BB. */
3549 static bool
3550 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3551 basic_block join_bb)
3553 vec<data_reference_p> then_datarefs, else_datarefs;
3554 vec<ddr_p> then_ddrs, else_ddrs;
3555 gimple *then_store, *else_store;
3556 bool found, ok = false, res;
3557 struct data_dependence_relation *ddr;
3558 data_reference_p then_dr, else_dr;
3559 int i, j;
3560 tree then_lhs, else_lhs;
3561 basic_block blocks[3];
3563 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3564 cheap enough to always handle as it allows us to elide dependence
3565 checking. */
3566 gphi *vphi = NULL;
3567 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3568 gsi_next (&si))
3569 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3571 vphi = si.phi ();
3572 break;
3574 if (!vphi)
3575 return false;
3576 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3577 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3578 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3579 if (then_assign)
3581 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3582 if (else_assign)
3583 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3584 then_assign, else_assign);
3587 /* If either vectorization or if-conversion is disabled then do
3588 not sink any stores. */
3589 if (param_max_stores_to_sink == 0
3590 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3591 || !flag_tree_loop_if_convert)
3592 return false;
3594 /* Find data references. */
3595 then_datarefs.create (1);
3596 else_datarefs.create (1);
3597 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3598 == chrec_dont_know)
3599 || !then_datarefs.length ()
3600 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3601 == chrec_dont_know)
3602 || !else_datarefs.length ())
3604 free_data_refs (then_datarefs);
3605 free_data_refs (else_datarefs);
3606 return false;
3609 /* Find pairs of stores with equal LHS. */
3610 auto_vec<gimple *, 1> then_stores, else_stores;
3611 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3613 if (DR_IS_READ (then_dr))
3614 continue;
3616 then_store = DR_STMT (then_dr);
3617 then_lhs = gimple_get_lhs (then_store);
3618 if (then_lhs == NULL_TREE)
3619 continue;
3620 found = false;
3622 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3624 if (DR_IS_READ (else_dr))
3625 continue;
3627 else_store = DR_STMT (else_dr);
3628 else_lhs = gimple_get_lhs (else_store);
3629 if (else_lhs == NULL_TREE)
3630 continue;
3632 if (operand_equal_p (then_lhs, else_lhs, 0))
3634 found = true;
3635 break;
3639 if (!found)
3640 continue;
3642 then_stores.safe_push (then_store);
3643 else_stores.safe_push (else_store);
3646 /* No pairs of stores found. */
3647 if (!then_stores.length ()
3648 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3650 free_data_refs (then_datarefs);
3651 free_data_refs (else_datarefs);
3652 return false;
3655 /* Compute and check data dependencies in both basic blocks. */
3656 then_ddrs.create (1);
3657 else_ddrs.create (1);
3658 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3659 vNULL, false)
3660 || !compute_all_dependences (else_datarefs, &else_ddrs,
3661 vNULL, false))
3663 free_dependence_relations (then_ddrs);
3664 free_dependence_relations (else_ddrs);
3665 free_data_refs (then_datarefs);
3666 free_data_refs (else_datarefs);
3667 return false;
3669 blocks[0] = then_bb;
3670 blocks[1] = else_bb;
3671 blocks[2] = join_bb;
3672 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3674 /* Check that there are no read-after-write or write-after-write dependencies
3675 in THEN_BB. */
3676 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3678 struct data_reference *dra = DDR_A (ddr);
3679 struct data_reference *drb = DDR_B (ddr);
3681 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3682 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3683 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3684 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3685 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3686 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3688 free_dependence_relations (then_ddrs);
3689 free_dependence_relations (else_ddrs);
3690 free_data_refs (then_datarefs);
3691 free_data_refs (else_datarefs);
3692 return false;
3696 /* Check that there are no read-after-write or write-after-write dependencies
3697 in ELSE_BB. */
3698 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3700 struct data_reference *dra = DDR_A (ddr);
3701 struct data_reference *drb = DDR_B (ddr);
3703 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3704 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3705 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3706 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3707 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3708 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3710 free_dependence_relations (then_ddrs);
3711 free_dependence_relations (else_ddrs);
3712 free_data_refs (then_datarefs);
3713 free_data_refs (else_datarefs);
3714 return false;
3718 /* Sink stores with same LHS. */
3719 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3721 else_store = else_stores[i];
3722 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3723 then_store, else_store);
3724 ok = ok || res;
3727 free_dependence_relations (then_ddrs);
3728 free_dependence_relations (else_ddrs);
3729 free_data_refs (then_datarefs);
3730 free_data_refs (else_datarefs);
3732 return ok;
3735 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3737 static bool
3738 local_mem_dependence (gimple *stmt, basic_block bb)
3740 tree vuse = gimple_vuse (stmt);
3741 gimple *def;
3743 if (!vuse)
3744 return false;
3746 def = SSA_NAME_DEF_STMT (vuse);
3747 return (def && gimple_bb (def) == bb);
3750 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3751 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3752 and BB3 rejoins control flow following BB1 and BB2, look for
3753 opportunities to hoist loads as follows. If BB3 contains a PHI of
3754 two loads, one each occurring in BB1 and BB2, and the loads are
3755 provably of adjacent fields in the same structure, then move both
3756 loads into BB0. Of course this can only be done if there are no
3757 dependencies preventing such motion.
3759 One of the hoisted loads will always be speculative, so the
3760 transformation is currently conservative:
3762 - The fields must be strictly adjacent.
3763 - The two fields must occupy a single memory block that is
3764 guaranteed to not cross a page boundary.
3766 The last is difficult to prove, as such memory blocks should be
3767 aligned on the minimum of the stack alignment boundary and the
3768 alignment guaranteed by heap allocation interfaces. Thus we rely
3769 on a parameter for the alignment value.
3771 Provided a good value is used for the last case, the first
3772 restriction could possibly be relaxed. */
3774 static void
3775 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3776 basic_block bb2, basic_block bb3)
3778 unsigned HOST_WIDE_INT param_align = param_l1_cache_line_size;
3779 unsigned HOST_WIDE_INT param_align_bits = param_align * BITS_PER_UNIT;
3780 gphi_iterator gsi;
3782 /* Walk the phis in bb3 looking for an opportunity. We are looking
3783 for phis of two SSA names, one each of which is defined in bb1 and
3784 bb2. */
3785 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3787 gphi *phi_stmt = gsi.phi ();
3788 gimple *def1, *def2;
3789 tree arg1, arg2, ref1, ref2, field1, field2;
3790 tree tree_offset1, tree_offset2, tree_size2, next;
3791 unsigned HOST_WIDE_INT offset1, offset2, size2, align1;
3792 gimple_stmt_iterator gsi2;
3793 basic_block bb_for_def1, bb_for_def2;
3795 if (gimple_phi_num_args (phi_stmt) != 2
3796 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3797 continue;
3799 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3800 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3802 if (TREE_CODE (arg1) != SSA_NAME
3803 || TREE_CODE (arg2) != SSA_NAME
3804 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3805 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3806 continue;
3808 def1 = SSA_NAME_DEF_STMT (arg1);
3809 def2 = SSA_NAME_DEF_STMT (arg2);
3811 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3812 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3813 continue;
3815 /* Check the mode of the arguments to be sure a conditional move
3816 can be generated for it. */
3817 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3818 == CODE_FOR_nothing)
3819 continue;
3821 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3822 if (!gimple_assign_single_p (def1)
3823 || !gimple_assign_single_p (def2)
3824 || gimple_has_volatile_ops (def1)
3825 || gimple_has_volatile_ops (def2))
3826 continue;
3828 ref1 = gimple_assign_rhs1 (def1);
3829 ref2 = gimple_assign_rhs1 (def2);
3831 if (TREE_CODE (ref1) != COMPONENT_REF
3832 || TREE_CODE (ref2) != COMPONENT_REF)
3833 continue;
3835 /* The zeroth operand of the two component references must be
3836 identical. It is not sufficient to compare get_base_address of
3837 the two references, because this could allow for different
3838 elements of the same array in the two trees. It is not safe to
3839 assume that the existence of one array element implies the
3840 existence of a different one. */
3841 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3842 continue;
3844 field1 = TREE_OPERAND (ref1, 1);
3845 field2 = TREE_OPERAND (ref2, 1);
3847 /* Check for field adjacency, and ensure field1 comes first. */
3848 for (next = DECL_CHAIN (field1);
3849 next && TREE_CODE (next) != FIELD_DECL;
3850 next = DECL_CHAIN (next))
3853 if (next != field2)
3855 for (next = DECL_CHAIN (field2);
3856 next && TREE_CODE (next) != FIELD_DECL;
3857 next = DECL_CHAIN (next))
3860 if (next != field1)
3861 continue;
3863 std::swap (field1, field2);
3864 std::swap (def1, def2);
3867 bb_for_def1 = gimple_bb (def1);
3868 bb_for_def2 = gimple_bb (def2);
3870 /* Check for proper alignment of the first field. */
3871 tree_offset1 = bit_position (field1);
3872 tree_offset2 = bit_position (field2);
3873 tree_size2 = DECL_SIZE (field2);
3875 if (!tree_fits_uhwi_p (tree_offset1)
3876 || !tree_fits_uhwi_p (tree_offset2)
3877 || !tree_fits_uhwi_p (tree_size2))
3878 continue;
3880 offset1 = tree_to_uhwi (tree_offset1);
3881 offset2 = tree_to_uhwi (tree_offset2);
3882 size2 = tree_to_uhwi (tree_size2);
3883 align1 = DECL_ALIGN (field1) % param_align_bits;
3885 if (offset1 % BITS_PER_UNIT != 0)
3886 continue;
3888 /* For profitability, the two field references should fit within
3889 a single cache line. */
3890 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3891 continue;
3893 /* The two expressions cannot be dependent upon vdefs defined
3894 in bb1/bb2. */
3895 if (local_mem_dependence (def1, bb_for_def1)
3896 || local_mem_dependence (def2, bb_for_def2))
3897 continue;
3899 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3900 bb0. We hoist the first one first so that a cache miss is handled
3901 efficiently regardless of hardware cache-fill policy. */
3902 gsi2 = gsi_for_stmt (def1);
3903 gsi_move_to_bb_end (&gsi2, bb0);
3904 gsi2 = gsi_for_stmt (def2);
3905 gsi_move_to_bb_end (&gsi2, bb0);
3906 statistics_counter_event (cfun, "hoisted loads", 1);
3908 if (dump_file && (dump_flags & TDF_DETAILS))
3910 fprintf (dump_file,
3911 "\nHoisting adjacent loads from %d and %d into %d: \n",
3912 bb_for_def1->index, bb_for_def2->index, bb0->index);
3913 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3914 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3919 /* Determine whether we should attempt to hoist adjacent loads out of
3920 diamond patterns in pass_phiopt. Always hoist loads if
3921 -fhoist-adjacent-loads is specified and the target machine has
3922 both a conditional move instruction and a defined cache line size. */
3924 static bool
3925 gate_hoist_loads (void)
3927 return (flag_hoist_adjacent_loads == 1
3928 && param_l1_cache_line_size
3929 && HAVE_conditional_move);
3932 /* This pass tries to replaces an if-then-else block with an
3933 assignment. We have different kinds of transformations.
3934 Some of these transformations are also performed by the ifcvt
3935 RTL optimizer.
3937 PHI-OPT using Match-and-simplify infrastructure
3938 -----------------------
3940 The PHI-OPT pass will try to use match-and-simplify infrastructure
3941 (gimple_simplify) to do transformations. This is implemented in
3942 match_simplify_replacement.
3944 The way it works is it replaces:
3945 bb0:
3946 if (cond) goto bb2; else goto bb1;
3947 bb1:
3948 bb2:
3949 x = PHI <a (bb1), b (bb0), ...>;
3951 with a statement if it gets simplified from `cond ? b : a`.
3953 bb0:
3954 x1 = cond ? b : a;
3955 bb2:
3956 x = PHI <a (bb1), x1 (bb0), ...>;
3957 Bb1 might be removed as it becomes unreachable when doing the replacement.
3958 Though bb1 does not have to be considered a forwarding basic block from bb0.
3960 Will try to see if `(!cond) ? a : b` gets simplified (iff !cond simplifies);
3961 this is done not to have an explosion of patterns in match.pd.
3962 Note bb1 does not need to be completely empty, it can contain
3963 one statement which is known not to trap.
3965 It also can handle the case where we have two forwarding bbs (diamond):
3966 bb0:
3967 if (cond) goto bb2; else goto bb1;
3968 bb1: goto bb3;
3969 bb2: goto bb3;
3970 bb3:
3971 x = PHI <a (bb1), b (bb2), ...>;
3972 And that is replaced with a statement if it is simplified
3973 from `cond ? b : a`.
3974 Again bb1 and bb2 does not have to be completely empty but
3975 each can contain one statement which is known not to trap.
3976 But in this case bb1/bb2 can only be forwarding basic blocks.
3978 This fully replaces the old "Conditional Replacement",
3979 "ABS Replacement" transformations as they are now
3980 implmeneted in match.pd.
3981 Some parts of the "MIN/MAX Replacement" are re-implemented in match.pd.
3983 Value Replacement
3984 -----------------
3986 This transformation, implemented in value_replacement, replaces
3988 bb0:
3989 if (a != b) goto bb2; else goto bb1;
3990 bb1:
3991 bb2:
3992 x = PHI <a (bb1), b (bb0), ...>;
3994 with
3996 bb0:
3997 bb2:
3998 x = PHI <b (bb0), ...>;
4000 This opportunity can sometimes occur as a result of other
4001 optimizations.
4004 Another case caught by value replacement looks like this:
4006 bb0:
4007 t1 = a == CONST;
4008 t2 = b > c;
4009 t3 = t1 & t2;
4010 if (t3 != 0) goto bb1; else goto bb2;
4011 bb1:
4012 bb2:
4013 x = PHI (CONST, a)
4015 Gets replaced with:
4016 bb0:
4017 bb2:
4018 t1 = a == CONST;
4019 t2 = b > c;
4020 t3 = t1 & t2;
4021 x = a;
4023 MIN/MAX Replacement
4024 -------------------
4026 This transformation, minmax_replacement replaces
4028 bb0:
4029 if (a <= b) goto bb2; else goto bb1;
4030 bb1:
4031 bb2:
4032 x = PHI <b (bb1), a (bb0), ...>;
4034 with
4036 bb0:
4037 x' = MIN_EXPR (a, b)
4038 bb2:
4039 x = PHI <x' (bb0), ...>;
4041 A similar transformation is done for MAX_EXPR.
4044 This pass also performs a fifth transformation of a slightly different
4045 flavor.
4047 Factor operations in COND_EXPR
4048 ------------------------------
4050 This transformation factors the unary operations out of COND_EXPR with
4051 factor_out_conditional_operation.
4053 For example:
4054 if (a <= CST) goto <bb 3>; else goto <bb 4>;
4055 <bb 3>:
4056 tmp = (int) a;
4057 <bb 4>:
4058 tmp = PHI <tmp, CST>
4060 Into:
4061 if (a <= CST) goto <bb 3>; else goto <bb 4>;
4062 <bb 3>:
4063 <bb 4>:
4064 a = PHI <a, CST>
4065 tmp = (int) a;
4067 Adjacent Load Hoisting
4068 ----------------------
4070 This transformation replaces
4072 bb0:
4073 if (...) goto bb2; else goto bb1;
4074 bb1:
4075 x1 = (<expr>).field1;
4076 goto bb3;
4077 bb2:
4078 x2 = (<expr>).field2;
4079 bb3:
4080 # x = PHI <x1, x2>;
4082 with
4084 bb0:
4085 x1 = (<expr>).field1;
4086 x2 = (<expr>).field2;
4087 if (...) goto bb2; else goto bb1;
4088 bb1:
4089 goto bb3;
4090 bb2:
4091 bb3:
4092 # x = PHI <x1, x2>;
4094 The purpose of this transformation is to enable generation of conditional
4095 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
4096 the loads is speculative, the transformation is restricted to very
4097 specific cases to avoid introducing a page fault. We are looking for
4098 the common idiom:
4100 if (...)
4101 x = y->left;
4102 else
4103 x = y->right;
4105 where left and right are typically adjacent pointers in a tree structure. */
4107 namespace {
4109 const pass_data pass_data_phiopt =
4111 GIMPLE_PASS, /* type */
4112 "phiopt", /* name */
4113 OPTGROUP_NONE, /* optinfo_flags */
4114 TV_TREE_PHIOPT, /* tv_id */
4115 ( PROP_cfg | PROP_ssa ), /* properties_required */
4116 0, /* properties_provided */
4117 0, /* properties_destroyed */
4118 0, /* todo_flags_start */
4119 0, /* todo_flags_finish */
4122 class pass_phiopt : public gimple_opt_pass
4124 public:
4125 pass_phiopt (gcc::context *ctxt)
4126 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
4129 /* opt_pass methods: */
4130 opt_pass * clone () final override { return new pass_phiopt (m_ctxt); }
4131 void set_pass_param (unsigned n, bool param) final override
4133 gcc_assert (n == 0);
4134 early_p = param;
4136 bool gate (function *) final override { return flag_ssa_phiopt; }
4137 unsigned int execute (function *) final override;
4139 private:
4140 bool early_p;
4141 }; // class pass_phiopt
4143 } // anon namespace
4145 gimple_opt_pass *
4146 make_pass_phiopt (gcc::context *ctxt)
4148 return new pass_phiopt (ctxt);
4151 unsigned int
4152 pass_phiopt::execute (function *)
4154 bool do_hoist_loads = !early_p ? gate_hoist_loads () : false;
4155 basic_block bb;
4156 basic_block *bb_order;
4157 unsigned n, i;
4158 bool cfgchanged = false;
4160 calculate_dominance_info (CDI_DOMINATORS);
4161 mark_ssa_maybe_undefs ();
4163 /* Search every basic block for COND_EXPR we may be able to optimize.
4165 We walk the blocks in order that guarantees that a block with
4166 a single predecessor is processed before the predecessor.
4167 This ensures that we collapse inner ifs before visiting the
4168 outer ones, and also that we do not try to visit a removed
4169 block. */
4170 bb_order = single_pred_before_succ_order ();
4171 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4173 for (i = 0; i < n; i++)
4175 gphi *phi;
4176 basic_block bb1, bb2;
4177 edge e1, e2;
4178 tree arg0, arg1;
4179 bool diamond_p = false;
4181 bb = bb_order[i];
4183 /* Check to see if the last statement is a GIMPLE_COND. */
4184 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4185 if (!cond_stmt)
4186 continue;
4188 e1 = EDGE_SUCC (bb, 0);
4189 bb1 = e1->dest;
4190 e2 = EDGE_SUCC (bb, 1);
4191 bb2 = e2->dest;
4193 /* We cannot do the optimization on abnormal edges. */
4194 if ((e1->flags & EDGE_ABNORMAL) != 0
4195 || (e2->flags & EDGE_ABNORMAL) != 0)
4196 continue;
4198 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4199 if (EDGE_COUNT (bb1->succs) == 0
4200 || EDGE_COUNT (bb2->succs) == 0)
4201 continue;
4203 /* Find the bb which is the fall through to the other. */
4204 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4206 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4208 std::swap (bb1, bb2);
4209 std::swap (e1, e2);
4211 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4212 && single_succ_p (bb2))
4214 diamond_p = true;
4215 e2 = EDGE_SUCC (bb2, 0);
4216 /* Make sure bb2 is just a fall through. */
4217 if ((e2->flags & EDGE_FALLTHRU) == 0)
4218 continue;
4220 else
4221 continue;
4223 e1 = EDGE_SUCC (bb1, 0);
4225 /* Make sure that bb1 is just a fall through. */
4226 if (!single_succ_p (bb1)
4227 || (e1->flags & EDGE_FALLTHRU) == 0)
4228 continue;
4230 if (diamond_p)
4232 basic_block bb3 = e1->dest;
4234 if (!single_pred_p (bb1)
4235 || !single_pred_p (bb2))
4236 continue;
4238 if (do_hoist_loads
4239 && !FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
4240 && EDGE_COUNT (bb->succs) == 2
4241 && EDGE_COUNT (bb3->preds) == 2
4242 /* If one edge or the other is dominant, a conditional move
4243 is likely to perform worse than the well-predicted branch. */
4244 && !predictable_edge_p (EDGE_SUCC (bb, 0))
4245 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
4246 hoist_adjacent_loads (bb, bb1, bb2, bb3);
4249 gimple_stmt_iterator gsi;
4250 bool candorest = true;
4252 /* Check that we're looking for nested phis. */
4253 basic_block merge = diamond_p ? EDGE_SUCC (bb2, 0)->dest : bb2;
4254 gimple_seq phis = phi_nodes (merge);
4256 /* Value replacement can work with more than one PHI
4257 so try that first. */
4258 if (!early_p && !diamond_p)
4259 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4261 phi = as_a <gphi *> (gsi_stmt (gsi));
4262 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4263 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4264 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
4266 candorest = false;
4267 cfgchanged = true;
4268 break;
4272 if (!candorest)
4273 continue;
4275 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
4276 if (!phi)
4277 continue;
4279 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4280 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4282 /* Something is wrong if we cannot find the arguments in the PHI
4283 node. */
4284 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4286 if (single_pred_p (bb1)
4287 && EDGE_COUNT (merge->preds) == 2)
4289 gphi *newphi = phi;
4290 while (newphi)
4292 phi = newphi;
4293 /* factor_out_conditional_operation may create a new PHI in
4294 BB2 and eliminate an existing PHI in BB2. Recompute values
4295 that may be affected by that change. */
4296 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
4297 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
4298 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
4299 newphi = factor_out_conditional_operation (e1, e2, phi,
4300 arg0, arg1,
4301 cond_stmt);
4305 /* Do the replacement of conditional if it can be done. */
4306 if (match_simplify_replacement (bb, bb1, bb2, e1, e2, phi,
4307 arg0, arg1, early_p, diamond_p))
4308 cfgchanged = true;
4309 else if (!early_p
4310 && !diamond_p
4311 && single_pred_p (bb1)
4312 && cond_removal_in_builtin_zero_pattern (bb, bb1, e1, e2,
4313 phi, arg0, arg1))
4314 cfgchanged = true;
4315 else if (minmax_replacement (bb, bb1, bb2, e1, e2, phi, arg0, arg1,
4316 diamond_p))
4317 cfgchanged = true;
4318 else if (single_pred_p (bb1)
4319 && !diamond_p
4320 && spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
4321 cfgchanged = true;
4324 free (bb_order);
4326 if (cfgchanged)
4327 return TODO_cleanup_cfg;
4328 return 0;
4331 /* This pass tries to transform conditional stores into unconditional
4332 ones, enabling further simplifications with the simpler then and else
4333 blocks. In particular it replaces this:
4335 bb0:
4336 if (cond) goto bb2; else goto bb1;
4337 bb1:
4338 *p = RHS;
4339 bb2:
4341 with
4343 bb0:
4344 if (cond) goto bb1; else goto bb2;
4345 bb1:
4346 condtmp' = *p;
4347 bb2:
4348 condtmp = PHI <RHS, condtmp'>
4349 *p = condtmp;
4351 This transformation can only be done under several constraints,
4352 documented below. It also replaces:
4354 bb0:
4355 if (cond) goto bb2; else goto bb1;
4356 bb1:
4357 *p = RHS1;
4358 goto bb3;
4359 bb2:
4360 *p = RHS2;
4361 bb3:
4363 with
4365 bb0:
4366 if (cond) goto bb3; else goto bb1;
4367 bb1:
4368 bb3:
4369 condtmp = PHI <RHS1, RHS2>
4370 *p = condtmp; */
4372 namespace {
4374 const pass_data pass_data_cselim =
4376 GIMPLE_PASS, /* type */
4377 "cselim", /* name */
4378 OPTGROUP_NONE, /* optinfo_flags */
4379 TV_TREE_PHIOPT, /* tv_id */
4380 ( PROP_cfg | PROP_ssa ), /* properties_required */
4381 0, /* properties_provided */
4382 0, /* properties_destroyed */
4383 0, /* todo_flags_start */
4384 0, /* todo_flags_finish */
4387 class pass_cselim : public gimple_opt_pass
4389 public:
4390 pass_cselim (gcc::context *ctxt)
4391 : gimple_opt_pass (pass_data_cselim, ctxt)
4394 /* opt_pass methods: */
4395 bool gate (function *) final override { return flag_tree_cselim; }
4396 unsigned int execute (function *) final override;
4398 }; // class pass_cselim
4400 } // anon namespace
4402 gimple_opt_pass *
4403 make_pass_cselim (gcc::context *ctxt)
4405 return new pass_cselim (ctxt);
4408 unsigned int
4409 pass_cselim::execute (function *)
4411 basic_block bb;
4412 basic_block *bb_order;
4413 unsigned n, i;
4414 bool cfgchanged = false;
4415 hash_set<tree> *nontrap = 0;
4416 unsigned todo = 0;
4418 /* ??? We are not interested in loop related info, but the following
4419 will create it, ICEing as we didn't init loops with pre-headers.
4420 An interfacing issue of find_data_references_in_bb. */
4421 loop_optimizer_init (LOOPS_NORMAL);
4422 scev_initialize ();
4424 calculate_dominance_info (CDI_DOMINATORS);
4426 /* Calculate the set of non-trapping memory accesses. */
4427 nontrap = get_non_trapping ();
4429 /* Search every basic block for COND_EXPR we may be able to optimize.
4431 We walk the blocks in order that guarantees that a block with
4432 a single predecessor is processed before the predecessor.
4433 This ensures that we collapse inner ifs before visiting the
4434 outer ones, and also that we do not try to visit a removed
4435 block. */
4436 bb_order = single_pred_before_succ_order ();
4437 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
4439 for (i = 0; i < n; i++)
4441 basic_block bb1, bb2;
4442 edge e1, e2;
4443 bool diamond_p = false;
4445 bb = bb_order[i];
4447 /* Check to see if the last statement is a GIMPLE_COND. */
4448 gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
4449 if (!cond_stmt)
4450 continue;
4452 e1 = EDGE_SUCC (bb, 0);
4453 bb1 = e1->dest;
4454 e2 = EDGE_SUCC (bb, 1);
4455 bb2 = e2->dest;
4457 /* We cannot do the optimization on abnormal edges. */
4458 if ((e1->flags & EDGE_ABNORMAL) != 0
4459 || (e2->flags & EDGE_ABNORMAL) != 0)
4460 continue;
4462 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
4463 if (EDGE_COUNT (bb1->succs) == 0
4464 || EDGE_COUNT (bb2->succs) == 0)
4465 continue;
4467 /* Find the bb which is the fall through to the other. */
4468 if (EDGE_SUCC (bb1, 0)->dest == bb2)
4470 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
4472 std::swap (bb1, bb2);
4473 std::swap (e1, e2);
4475 else if (EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest
4476 && single_succ_p (bb2))
4478 diamond_p = true;
4479 e2 = EDGE_SUCC (bb2, 0);
4480 /* Make sure bb2 is just a fall through. */
4481 if ((e2->flags & EDGE_FALLTHRU) == 0)
4482 continue;
4484 else
4485 continue;
4487 e1 = EDGE_SUCC (bb1, 0);
4489 /* Make sure that bb1 is just a fall through. */
4490 if (!single_succ_p (bb1)
4491 || (e1->flags & EDGE_FALLTHRU) == 0)
4492 continue;
4494 if (diamond_p)
4496 basic_block bb3 = e1->dest;
4498 /* Only handle sinking of store from 2 bbs only,
4499 The middle bbs don't need to come from the
4500 if always since we are sinking rather than
4501 hoisting. */
4502 if (EDGE_COUNT (bb3->preds) != 2)
4503 continue;
4504 if (cond_if_else_store_replacement (bb1, bb2, bb3))
4505 cfgchanged = true;
4506 continue;
4509 /* Also make sure that bb1 only have one predecessor and that it
4510 is bb. */
4511 if (!single_pred_p (bb1)
4512 || single_pred (bb1) != bb)
4513 continue;
4515 /* bb1 is the middle block, bb2 the join block, bb the split block,
4516 e1 the fallthrough edge from bb1 to bb2. We can't do the
4517 optimization if the join block has more than two predecessors. */
4518 if (EDGE_COUNT (bb2->preds) > 2)
4519 continue;
4520 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
4521 cfgchanged = true;
4524 free (bb_order);
4526 delete nontrap;
4527 /* If the CFG has changed, we should cleanup the CFG. */
4528 if (cfgchanged)
4530 /* In cond-store replacement we have added some loads on edges
4531 and new VOPS (as we moved the store, and created a load). */
4532 gsi_commit_edge_inserts ();
4533 todo = TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4535 scev_finalize ();
4536 loop_optimizer_finalize ();
4537 return todo;