c++: fix array cleanup with throwing temp dtor
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blob714deab005a1783aa11aaed2aba8d97cfe0931e2
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "tree-ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-dfa.h"
43 #include "domwalk.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
49 #include "tree-eh.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
54 #include "dbgcnt.h"
56 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
57 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
58 tree, tree);
59 static bool match_simplify_replacement (basic_block, basic_block,
60 edge, edge, gphi *, tree, tree, bool);
61 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
62 gimple *);
63 static int value_replacement (basic_block, basic_block,
64 edge, edge, gphi *, tree, tree);
65 static bool minmax_replacement (basic_block, basic_block,
66 edge, edge, gphi *, tree, tree);
67 static bool spaceship_replacement (basic_block, basic_block,
68 edge, edge, gphi *, tree, tree);
69 static bool cond_removal_in_builtin_zero_pattern (basic_block, basic_block,
70 edge, edge, gphi *,
71 tree, tree);
72 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
73 hash_set<tree> *);
74 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
75 static hash_set<tree> * get_non_trapping ();
76 static void replace_phi_edge_with_variable (basic_block, edge, gphi *, tree);
77 static void hoist_adjacent_loads (basic_block, basic_block,
78 basic_block, basic_block);
79 static bool gate_hoist_loads (void);
81 /* This pass tries to transform conditional stores into unconditional
82 ones, enabling further simplifications with the simpler then and else
83 blocks. In particular it replaces this:
85 bb0:
86 if (cond) goto bb2; else goto bb1;
87 bb1:
88 *p = RHS;
89 bb2:
91 with
93 bb0:
94 if (cond) goto bb1; else goto bb2;
95 bb1:
96 condtmp' = *p;
97 bb2:
98 condtmp = PHI <RHS, condtmp'>
99 *p = condtmp;
101 This transformation can only be done under several constraints,
102 documented below. It also replaces:
104 bb0:
105 if (cond) goto bb2; else goto bb1;
106 bb1:
107 *p = RHS1;
108 goto bb3;
109 bb2:
110 *p = RHS2;
111 bb3:
113 with
115 bb0:
116 if (cond) goto bb3; else goto bb1;
117 bb1:
118 bb3:
119 condtmp = PHI <RHS1, RHS2>
120 *p = condtmp; */
122 static unsigned int
123 tree_ssa_cs_elim (void)
125 unsigned todo;
126 /* ??? We are not interested in loop related info, but the following
127 will create it, ICEing as we didn't init loops with pre-headers.
128 An interfacing issue of find_data_references_in_bb. */
129 loop_optimizer_init (LOOPS_NORMAL);
130 scev_initialize ();
131 todo = tree_ssa_phiopt_worker (true, false, false);
132 scev_finalize ();
133 loop_optimizer_finalize ();
134 return todo;
137 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
139 static gphi *
140 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
142 gimple_stmt_iterator i;
143 gphi *phi = NULL;
144 if (gimple_seq_singleton_p (seq))
145 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
146 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
148 gphi *p = as_a <gphi *> (gsi_stmt (i));
149 /* If the PHI arguments are equal then we can skip this PHI. */
150 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
151 gimple_phi_arg_def (p, e1->dest_idx)))
152 continue;
154 /* If we already have a PHI that has the two edge arguments are
155 different, then return it is not a singleton for these PHIs. */
156 if (phi)
157 return NULL;
159 phi = p;
161 return phi;
164 /* The core routine of conditional store replacement and normal
165 phi optimizations. Both share much of the infrastructure in how
166 to match applicable basic block patterns. DO_STORE_ELIM is true
167 when we want to do conditional store replacement, false otherwise.
168 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
169 of diamond control flow patterns, false otherwise. */
170 static unsigned int
171 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
173 basic_block bb;
174 basic_block *bb_order;
175 unsigned n, i;
176 bool cfgchanged = false;
177 hash_set<tree> *nontrap = 0;
179 calculate_dominance_info (CDI_DOMINATORS);
181 if (do_store_elim)
182 /* Calculate the set of non-trapping memory accesses. */
183 nontrap = get_non_trapping ();
185 /* Search every basic block for COND_EXPR we may be able to optimize.
187 We walk the blocks in order that guarantees that a block with
188 a single predecessor is processed before the predecessor.
189 This ensures that we collapse inner ifs before visiting the
190 outer ones, and also that we do not try to visit a removed
191 block. */
192 bb_order = single_pred_before_succ_order ();
193 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
195 for (i = 0; i < n; i++)
197 gimple *cond_stmt;
198 gphi *phi;
199 basic_block bb1, bb2;
200 edge e1, e2;
201 tree arg0, arg1;
203 bb = bb_order[i];
205 cond_stmt = last_stmt (bb);
206 /* Check to see if the last statement is a GIMPLE_COND. */
207 if (!cond_stmt
208 || gimple_code (cond_stmt) != GIMPLE_COND)
209 continue;
211 e1 = EDGE_SUCC (bb, 0);
212 bb1 = e1->dest;
213 e2 = EDGE_SUCC (bb, 1);
214 bb2 = e2->dest;
216 /* We cannot do the optimization on abnormal edges. */
217 if ((e1->flags & EDGE_ABNORMAL) != 0
218 || (e2->flags & EDGE_ABNORMAL) != 0)
219 continue;
221 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
222 if (EDGE_COUNT (bb1->succs) == 0
223 || EDGE_COUNT (bb2->succs) == 0)
224 continue;
226 /* Find the bb which is the fall through to the other. */
227 if (EDGE_SUCC (bb1, 0)->dest == bb2)
229 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
231 std::swap (bb1, bb2);
232 std::swap (e1, e2);
234 else if (do_store_elim
235 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
237 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
239 if (!single_succ_p (bb1)
240 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
241 || !single_succ_p (bb2)
242 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
243 || EDGE_COUNT (bb3->preds) != 2)
244 continue;
245 if (cond_if_else_store_replacement (bb1, bb2, bb3))
246 cfgchanged = true;
247 continue;
249 else if (do_hoist_loads
250 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
252 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
254 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
255 && single_succ_p (bb1)
256 && single_succ_p (bb2)
257 && single_pred_p (bb1)
258 && single_pred_p (bb2)
259 && EDGE_COUNT (bb->succs) == 2
260 && EDGE_COUNT (bb3->preds) == 2
261 /* If one edge or the other is dominant, a conditional move
262 is likely to perform worse than the well-predicted branch. */
263 && !predictable_edge_p (EDGE_SUCC (bb, 0))
264 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
265 hoist_adjacent_loads (bb, bb1, bb2, bb3);
266 continue;
268 else
269 continue;
271 e1 = EDGE_SUCC (bb1, 0);
273 /* Make sure that bb1 is just a fall through. */
274 if (!single_succ_p (bb1)
275 || (e1->flags & EDGE_FALLTHRU) == 0)
276 continue;
278 if (do_store_elim)
280 /* Also make sure that bb1 only have one predecessor and that it
281 is bb. */
282 if (!single_pred_p (bb1)
283 || single_pred (bb1) != bb)
284 continue;
286 /* bb1 is the middle block, bb2 the join block, bb the split block,
287 e1 the fallthrough edge from bb1 to bb2. We can't do the
288 optimization if the join block has more than two predecessors. */
289 if (EDGE_COUNT (bb2->preds) > 2)
290 continue;
291 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
292 cfgchanged = true;
294 else
296 gimple_seq phis = phi_nodes (bb2);
297 gimple_stmt_iterator gsi;
298 bool candorest = true;
300 /* Value replacement can work with more than one PHI
301 so try that first. */
302 if (!early_p)
303 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
305 phi = as_a <gphi *> (gsi_stmt (gsi));
306 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
307 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
308 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
310 candorest = false;
311 cfgchanged = true;
312 break;
316 if (!candorest)
317 continue;
319 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
320 if (!phi)
321 continue;
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
326 /* Something is wrong if we cannot find the arguments in the PHI
327 node. */
328 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
330 gphi *newphi;
331 if (single_pred_p (bb1)
332 && (newphi = factor_out_conditional_conversion (e1, e2, phi,
333 arg0, arg1,
334 cond_stmt)))
336 phi = newphi;
337 /* factor_out_conditional_conversion may create a new PHI in
338 BB2 and eliminate an existing PHI in BB2. Recompute values
339 that may be affected by that change. */
340 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
341 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
342 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
345 /* Do the replacement of conditional if it can be done. */
346 if (!early_p && two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
347 cfgchanged = true;
348 else if (match_simplify_replacement (bb, bb1, e1, e2, phi,
349 arg0, arg1,
350 early_p))
351 cfgchanged = true;
352 else if (!early_p
353 && single_pred_p (bb1)
354 && cond_removal_in_builtin_zero_pattern (bb, bb1, e1, e2,
355 phi, arg0, arg1))
356 cfgchanged = true;
357 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
358 cfgchanged = true;
359 else if (single_pred_p (bb1)
360 && spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
361 cfgchanged = true;
365 free (bb_order);
367 if (do_store_elim)
368 delete nontrap;
369 /* If the CFG has changed, we should cleanup the CFG. */
370 if (cfgchanged && do_store_elim)
372 /* In cond-store replacement we have added some loads on edges
373 and new VOPS (as we moved the store, and created a load). */
374 gsi_commit_edge_inserts ();
375 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
377 else if (cfgchanged)
378 return TODO_cleanup_cfg;
379 return 0;
382 /* Replace PHI node element whose edge is E in block BB with variable NEW.
383 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
384 is known to have two edges, one of which must reach BB). */
386 static void
387 replace_phi_edge_with_variable (basic_block cond_block,
388 edge e, gphi *phi, tree new_tree)
390 basic_block bb = gimple_bb (phi);
391 gimple_stmt_iterator gsi;
392 tree phi_result = PHI_RESULT (phi);
394 /* Duplicate range info if they are the only things setting the target PHI.
395 This is needed as later on, the new_tree will be replacing
396 The assignement of the PHI.
397 For an example:
398 bb1:
399 _4 = min<a_1, 255>
400 goto bb2
402 # RANGE [-INF, 255]
403 a_3 = PHI<_4(1)>
404 bb3:
406 use(a_3)
407 And _4 gets propagated into the use of a_3 and losing the range info.
408 This can't be done for more than 2 incoming edges as the propagation
409 won't happen.
410 The new_tree needs to be defined in the same basic block as the conditional. */
411 if (TREE_CODE (new_tree) == SSA_NAME
412 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
413 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
414 && !SSA_NAME_RANGE_INFO (new_tree)
415 && SSA_NAME_RANGE_INFO (phi_result)
416 && gimple_bb (SSA_NAME_DEF_STMT (new_tree)) == cond_block
417 && dbg_cnt (phiopt_edge_range))
418 duplicate_ssa_name_range_info (new_tree,
419 SSA_NAME_RANGE_TYPE (phi_result),
420 SSA_NAME_RANGE_INFO (phi_result));
422 /* Change the PHI argument to new. */
423 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
425 /* Remove the empty basic block. */
426 edge edge_to_remove;
427 if (EDGE_SUCC (cond_block, 0)->dest == bb)
428 edge_to_remove = EDGE_SUCC (cond_block, 1);
429 else
430 edge_to_remove = EDGE_SUCC (cond_block, 0);
431 if (EDGE_COUNT (edge_to_remove->dest->preds) == 1)
433 e->flags |= EDGE_FALLTHRU;
434 e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
435 e->probability = profile_probability::always ();
436 delete_basic_block (edge_to_remove->dest);
438 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
439 gsi = gsi_last_bb (cond_block);
440 gsi_remove (&gsi, true);
442 else
444 /* If there are other edges into the middle block make
445 CFG cleanup deal with the edge removal to avoid
446 updating dominators here in a non-trivial way. */
447 gcond *cond = as_a <gcond *> (last_stmt (cond_block));
448 if (edge_to_remove->flags & EDGE_TRUE_VALUE)
449 gimple_cond_make_false (cond);
450 else
451 gimple_cond_make_true (cond);
454 statistics_counter_event (cfun, "Replace PHI with variable", 1);
456 if (dump_file && (dump_flags & TDF_DETAILS))
457 fprintf (dump_file,
458 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
459 cond_block->index,
460 bb->index);
463 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
464 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
465 to the result of PHI stmt. COND_STMT is the controlling predicate.
466 Return the newly-created PHI, if any. */
468 static gphi *
469 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
470 tree arg0, tree arg1, gimple *cond_stmt)
472 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
473 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
474 tree temp, result;
475 gphi *newphi;
476 gimple_stmt_iterator gsi, gsi_for_def;
477 location_t locus = gimple_location (phi);
478 enum tree_code convert_code;
480 /* Handle only PHI statements with two arguments. TODO: If all
481 other arguments to PHI are INTEGER_CST or if their defining
482 statement have the same unary operation, we can handle more
483 than two arguments too. */
484 if (gimple_phi_num_args (phi) != 2)
485 return NULL;
487 /* First canonicalize to simplify tests. */
488 if (TREE_CODE (arg0) != SSA_NAME)
490 std::swap (arg0, arg1);
491 std::swap (e0, e1);
494 if (TREE_CODE (arg0) != SSA_NAME
495 || (TREE_CODE (arg1) != SSA_NAME
496 && TREE_CODE (arg1) != INTEGER_CST))
497 return NULL;
499 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
500 a conversion. */
501 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
502 if (!gimple_assign_cast_p (arg0_def_stmt))
503 return NULL;
505 /* Use the RHS as new_arg0. */
506 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
507 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
508 if (convert_code == VIEW_CONVERT_EXPR)
510 new_arg0 = TREE_OPERAND (new_arg0, 0);
511 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
512 return NULL;
514 if (TREE_CODE (new_arg0) == SSA_NAME
515 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
516 return NULL;
518 if (TREE_CODE (arg1) == SSA_NAME)
520 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
521 is a conversion. */
522 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
523 if (!is_gimple_assign (arg1_def_stmt)
524 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
525 return NULL;
527 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
528 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
529 && dominated_by_p (CDI_DOMINATORS,
530 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
531 return NULL;
533 /* Use the RHS as new_arg1. */
534 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
535 if (convert_code == VIEW_CONVERT_EXPR)
536 new_arg1 = TREE_OPERAND (new_arg1, 0);
537 if (TREE_CODE (new_arg1) == SSA_NAME
538 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
539 return NULL;
541 else
543 /* arg0_def_stmt should be conditional. */
544 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
545 return NULL;
546 /* If arg1 is an INTEGER_CST, fold it to new type. */
547 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
548 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
550 if (gimple_assign_cast_p (arg0_def_stmt))
552 /* For the INTEGER_CST case, we are just moving the
553 conversion from one place to another, which can often
554 hurt as the conversion moves further away from the
555 statement that computes the value. So, perform this
556 only if new_arg0 is an operand of COND_STMT, or
557 if arg0_def_stmt is the only non-debug stmt in
558 its basic block, because then it is possible this
559 could enable further optimizations (minmax replacement
560 etc.). See PR71016. */
561 if (new_arg0 != gimple_cond_lhs (cond_stmt)
562 && new_arg0 != gimple_cond_rhs (cond_stmt)
563 && gimple_bb (arg0_def_stmt) == e0->src)
565 gsi = gsi_for_stmt (arg0_def_stmt);
566 gsi_prev_nondebug (&gsi);
567 if (!gsi_end_p (gsi))
569 if (gassign *assign
570 = dyn_cast <gassign *> (gsi_stmt (gsi)))
572 tree lhs = gimple_assign_lhs (assign);
573 enum tree_code ass_code
574 = gimple_assign_rhs_code (assign);
575 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
576 return NULL;
577 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
578 return NULL;
579 gsi_prev_nondebug (&gsi);
580 if (!gsi_end_p (gsi))
581 return NULL;
583 else
584 return NULL;
586 gsi = gsi_for_stmt (arg0_def_stmt);
587 gsi_next_nondebug (&gsi);
588 if (!gsi_end_p (gsi))
589 return NULL;
591 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
593 else
594 return NULL;
596 else
597 return NULL;
600 /* If arg0/arg1 have > 1 use, then this transformation actually increases
601 the number of expressions evaluated at runtime. */
602 if (!has_single_use (arg0)
603 || (arg1_def_stmt && !has_single_use (arg1)))
604 return NULL;
606 /* If types of new_arg0 and new_arg1 are different bailout. */
607 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
608 return NULL;
610 /* Create a new PHI stmt. */
611 result = PHI_RESULT (phi);
612 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
613 newphi = create_phi_node (temp, gimple_bb (phi));
615 if (dump_file && (dump_flags & TDF_DETAILS))
617 fprintf (dump_file, "PHI ");
618 print_generic_expr (dump_file, gimple_phi_result (phi));
619 fprintf (dump_file,
620 " changed to factor conversion out from COND_EXPR.\n");
621 fprintf (dump_file, "New stmt with CAST that defines ");
622 print_generic_expr (dump_file, result);
623 fprintf (dump_file, ".\n");
626 /* Remove the old cast(s) that has single use. */
627 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
628 gsi_remove (&gsi_for_def, true);
629 release_defs (arg0_def_stmt);
631 if (arg1_def_stmt)
633 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
634 gsi_remove (&gsi_for_def, true);
635 release_defs (arg1_def_stmt);
638 add_phi_arg (newphi, new_arg0, e0, locus);
639 add_phi_arg (newphi, new_arg1, e1, locus);
641 /* Create the conversion stmt and insert it. */
642 if (convert_code == VIEW_CONVERT_EXPR)
644 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
645 new_stmt = gimple_build_assign (result, temp);
647 else
648 new_stmt = gimple_build_assign (result, convert_code, temp);
649 gsi = gsi_after_labels (gimple_bb (phi));
650 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
652 /* Remove the original PHI stmt. */
653 gsi = gsi_for_stmt (phi);
654 gsi_remove (&gsi, true);
656 statistics_counter_event (cfun, "factored out cast", 1);
658 return newphi;
661 /* Optimize
662 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
663 if (x_5 op cstN) # where op is == or != and N is 1 or 2
664 goto bb3;
665 else
666 goto bb4;
667 bb3:
668 bb4:
669 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
671 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
672 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
673 of cst3 and cst4 is smaller. */
675 static bool
676 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
677 edge e1, gphi *phi, tree arg0, tree arg1)
679 /* Only look for adjacent integer constants. */
680 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
681 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
682 || TREE_CODE (arg0) != INTEGER_CST
683 || TREE_CODE (arg1) != INTEGER_CST
684 || (tree_int_cst_lt (arg0, arg1)
685 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
686 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
687 return false;
689 if (!empty_block_p (middle_bb))
690 return false;
692 gimple *stmt = last_stmt (cond_bb);
693 tree lhs = gimple_cond_lhs (stmt);
694 tree rhs = gimple_cond_rhs (stmt);
696 if (TREE_CODE (lhs) != SSA_NAME
697 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
698 || TREE_CODE (rhs) != INTEGER_CST)
699 return false;
701 switch (gimple_cond_code (stmt))
703 case EQ_EXPR:
704 case NE_EXPR:
705 break;
706 default:
707 return false;
710 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
711 match_simplify_replacement. */
712 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
713 && (integer_zerop (arg0)
714 || integer_zerop (arg1)
715 || TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
716 || (TYPE_PRECISION (TREE_TYPE (arg0))
717 <= TYPE_PRECISION (TREE_TYPE (lhs)))))
718 return false;
720 wide_int min, max;
721 value_range r;
722 get_range_query (cfun)->range_of_expr (r, lhs);
724 if (r.kind () == VR_RANGE)
726 min = r.lower_bound ();
727 max = r.upper_bound ();
729 else
731 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
732 signop sgn = TYPE_SIGN (TREE_TYPE (lhs));
733 min = wi::min_value (prec, sgn);
734 max = wi::max_value (prec, sgn);
736 if (min + 1 != max
737 || (wi::to_wide (rhs) != min
738 && wi::to_wide (rhs) != max))
739 return false;
741 /* We need to know which is the true edge and which is the false
742 edge so that we know when to invert the condition below. */
743 edge true_edge, false_edge;
744 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
745 if ((gimple_cond_code (stmt) == EQ_EXPR)
746 ^ (wi::to_wide (rhs) == max)
747 ^ (e1 == false_edge))
748 std::swap (arg0, arg1);
750 tree type;
751 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
753 /* Avoid performing the arithmetics in bool type which has different
754 semantics, otherwise prefer unsigned types from the two with
755 the same precision. */
756 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
757 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
758 type = TREE_TYPE (lhs);
759 else
760 type = TREE_TYPE (arg0);
762 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
763 type = TREE_TYPE (lhs);
764 else
765 type = TREE_TYPE (arg0);
767 min = wide_int::from (min, TYPE_PRECISION (type),
768 TYPE_SIGN (TREE_TYPE (lhs)));
769 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
770 TYPE_SIGN (TREE_TYPE (arg0)));
771 enum tree_code code;
772 wi::overflow_type ovf;
773 if (tree_int_cst_lt (arg0, arg1))
775 code = PLUS_EXPR;
776 a -= min;
777 if (!TYPE_UNSIGNED (type))
779 /* lhs is known to be in range [min, min+1] and we want to add a
780 to it. Check if that operation can overflow for those 2 values
781 and if yes, force unsigned type. */
782 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
783 if (ovf)
784 type = unsigned_type_for (type);
787 else
789 code = MINUS_EXPR;
790 a += min;
791 if (!TYPE_UNSIGNED (type))
793 /* lhs is known to be in range [min, min+1] and we want to subtract
794 it from a. Check if that operation can overflow for those 2
795 values and if yes, force unsigned type. */
796 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
797 if (ovf)
798 type = unsigned_type_for (type);
802 tree arg = wide_int_to_tree (type, a);
803 gimple_seq stmts = NULL;
804 lhs = gimple_convert (&stmts, type, lhs);
805 tree new_rhs;
806 if (code == PLUS_EXPR)
807 new_rhs = gimple_build (&stmts, PLUS_EXPR, type, lhs, arg);
808 else
809 new_rhs = gimple_build (&stmts, MINUS_EXPR, type, arg, lhs);
810 new_rhs = gimple_convert (&stmts, TREE_TYPE (arg0), new_rhs);
811 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
812 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
814 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
816 /* Note that we optimized this PHI. */
817 return true;
820 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
821 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
822 static bool
823 phiopt_early_allow (gimple_seq &seq, gimple_match_op &op)
825 /* Don't allow functions. */
826 if (!op.code.is_tree_code ())
827 return false;
828 tree_code code = (tree_code)op.code;
830 /* For non-empty sequence, only allow one statement. */
831 if (!gimple_seq_empty_p (seq))
833 /* Check to make sure op was already a SSA_NAME. */
834 if (code != SSA_NAME)
835 return false;
836 if (!gimple_seq_singleton_p (seq))
837 return false;
838 gimple *stmt = gimple_seq_first_stmt (seq);
839 /* Only allow assignments. */
840 if (!is_gimple_assign (stmt))
841 return false;
842 if (gimple_assign_lhs (stmt) != op.ops[0])
843 return false;
844 code = gimple_assign_rhs_code (stmt);
847 switch (code)
849 case MIN_EXPR:
850 case MAX_EXPR:
851 case ABS_EXPR:
852 case ABSU_EXPR:
853 case NEGATE_EXPR:
854 case SSA_NAME:
855 return true;
856 case INTEGER_CST:
857 case REAL_CST:
858 case VECTOR_CST:
859 case FIXED_CST:
860 return true;
861 default:
862 return false;
866 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
867 Return NULL if nothing can be simplified or the resulting simplified value
868 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
869 if EARLY_P is set.
870 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
871 to simplify CMP ? ARG0 : ARG1.
872 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
873 static tree
874 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
875 tree arg0, tree arg1,
876 gimple_seq *seq)
878 tree result;
879 gimple_seq seq1 = NULL;
880 enum tree_code comp_code = gimple_cond_code (comp_stmt);
881 location_t loc = gimple_location (comp_stmt);
882 tree cmp0 = gimple_cond_lhs (comp_stmt);
883 tree cmp1 = gimple_cond_rhs (comp_stmt);
884 /* To handle special cases like floating point comparison, it is easier and
885 less error-prone to build a tree and gimplify it on the fly though it is
886 less efficient.
887 Don't use fold_build2 here as that might create (bool)a instead of just
888 "a != 0". */
889 tree cond = build2_loc (loc, comp_code, boolean_type_node,
890 cmp0, cmp1);
891 gimple_match_op op (gimple_match_cond::UNCOND,
892 COND_EXPR, type, cond, arg0, arg1);
894 if (op.resimplify (&seq1, follow_all_ssa_edges))
896 /* Early we want only to allow some generated tree codes. */
897 if (!early_p
898 || phiopt_early_allow (seq1, op))
900 result = maybe_push_res_to_seq (&op, &seq1);
901 if (result)
903 if (loc != UNKNOWN_LOCATION)
904 annotate_all_with_location (seq1, loc);
905 gimple_seq_add_seq_without_update (seq, seq1);
906 return result;
910 gimple_seq_discard (seq1);
911 seq1 = NULL;
913 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
914 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
916 if (comp_code == ERROR_MARK)
917 return NULL;
919 cond = build2_loc (loc,
920 comp_code, boolean_type_node,
921 cmp0, cmp1);
922 gimple_match_op op1 (gimple_match_cond::UNCOND,
923 COND_EXPR, type, cond, arg1, arg0);
925 if (op1.resimplify (&seq1, follow_all_ssa_edges))
927 /* Early we want only to allow some generated tree codes. */
928 if (!early_p
929 || phiopt_early_allow (seq1, op1))
931 result = maybe_push_res_to_seq (&op1, &seq1);
932 if (result)
934 if (loc != UNKNOWN_LOCATION)
935 annotate_all_with_location (seq1, loc);
936 gimple_seq_add_seq_without_update (seq, seq1);
937 return result;
941 gimple_seq_discard (seq1);
943 return NULL;
946 /* The function match_simplify_replacement does the main work of doing the
947 replacement using match and simplify. Return true if the replacement is done.
948 Otherwise return false.
949 BB is the basic block where the replacement is going to be done on. ARG0
950 is argument 0 from PHI. Likewise for ARG1. */
952 static bool
953 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
954 edge e0, edge e1, gphi *phi,
955 tree arg0, tree arg1, bool early_p)
957 gimple *stmt;
958 gimple_stmt_iterator gsi;
959 edge true_edge, false_edge;
960 gimple_seq seq = NULL;
961 tree result;
962 gimple *stmt_to_move = NULL;
964 /* Special case A ? B : B as this will always simplify to B. */
965 if (operand_equal_for_phi_arg_p (arg0, arg1))
966 return false;
968 /* If the basic block only has a cheap preparation statement,
969 allow it and move it once the transformation is done. */
970 if (!empty_block_p (middle_bb))
972 if (!single_pred_p (middle_bb))
973 return false;
975 stmt_to_move = last_and_only_stmt (middle_bb);
976 if (!stmt_to_move)
977 return false;
979 if (gimple_vuse (stmt_to_move))
980 return false;
982 if (gimple_could_trap_p (stmt_to_move)
983 || gimple_has_side_effects (stmt_to_move))
984 return false;
986 if (gimple_uses_undefined_value_p (stmt_to_move))
987 return false;
989 /* Allow assignments and not no calls.
990 As const calls don't match any of the above, yet they could
991 still have some side-effects - they could contain
992 gimple_could_trap_p statements, like floating point
993 exceptions or integer division by zero. See PR70586.
994 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
995 should handle this. */
996 if (!is_gimple_assign (stmt_to_move))
997 return false;
999 tree lhs = gimple_assign_lhs (stmt_to_move);
1000 gimple *use_stmt;
1001 use_operand_p use_p;
1003 /* Allow only a statement which feeds into the phi. */
1004 if (!lhs || TREE_CODE (lhs) != SSA_NAME
1005 || !single_imm_use (lhs, &use_p, &use_stmt)
1006 || use_stmt != phi)
1007 return false;
1010 /* At this point we know we have a GIMPLE_COND with two successors.
1011 One successor is BB, the other successor is an empty block which
1012 falls through into BB.
1014 There is a single PHI node at the join point (BB).
1016 So, given the condition COND, and the two PHI arguments, match and simplify
1017 can happen on (COND) ? arg0 : arg1. */
1019 stmt = last_stmt (cond_bb);
1021 /* We need to know which is the true edge and which is the false
1022 edge so that we know when to invert the condition below. */
1023 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1024 if (e1 == true_edge || e0 == false_edge)
1025 std::swap (arg0, arg1);
1027 tree type = TREE_TYPE (gimple_phi_result (phi));
1028 result = gimple_simplify_phiopt (early_p, type, stmt,
1029 arg0, arg1,
1030 &seq);
1031 if (!result)
1032 return false;
1034 gsi = gsi_last_bb (cond_bb);
1035 /* Insert the sequence generated from gimple_simplify_phiopt. */
1036 if (seq)
1037 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
1039 /* If there was a statement to move and the result of the statement
1040 is going to be used, move it to right before the original
1041 conditional. */
1042 if (stmt_to_move
1043 && (gimple_assign_lhs (stmt_to_move) == result
1044 || !has_single_use (gimple_assign_lhs (stmt_to_move))))
1046 if (dump_file && (dump_flags & TDF_DETAILS))
1048 fprintf (dump_file, "statement un-sinked:\n");
1049 print_gimple_stmt (dump_file, stmt_to_move, 0,
1050 TDF_VOPS|TDF_MEMSYMS);
1052 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt_to_move);
1053 gsi_move_before (&gsi1, &gsi);
1054 reset_flow_sensitive_info (gimple_assign_lhs (stmt_to_move));
1057 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1059 /* Add Statistic here even though replace_phi_edge_with_variable already
1060 does it as we want to be able to count when match-simplify happens vs
1061 the others. */
1062 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
1064 /* Note that we optimized this PHI. */
1065 return true;
1068 /* Update *ARG which is defined in STMT so that it contains the
1069 computed value if that seems profitable. Return true if the
1070 statement is made dead by that rewriting. */
1072 static bool
1073 jump_function_from_stmt (tree *arg, gimple *stmt)
1075 enum tree_code code = gimple_assign_rhs_code (stmt);
1076 if (code == ADDR_EXPR)
1078 /* For arg = &p->i transform it to p, if possible. */
1079 tree rhs1 = gimple_assign_rhs1 (stmt);
1080 poly_int64 offset;
1081 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
1082 &offset);
1083 if (tem
1084 && TREE_CODE (tem) == MEM_REF
1085 && known_eq (mem_ref_offset (tem) + offset, 0))
1087 *arg = TREE_OPERAND (tem, 0);
1088 return true;
1091 /* TODO: Much like IPA-CP jump-functions we want to handle constant
1092 additions symbolically here, and we'd need to update the comparison
1093 code that compares the arg + cst tuples in our caller. For now the
1094 code above exactly handles the VEC_BASE pattern from vec.h. */
1095 return false;
1098 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
1099 of the form SSA_NAME NE 0.
1101 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
1102 the two input values of the EQ_EXPR match arg0 and arg1.
1104 If so update *code and return TRUE. Otherwise return FALSE. */
1106 static bool
1107 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
1108 enum tree_code *code, const_tree rhs)
1110 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
1111 statement. */
1112 if (TREE_CODE (rhs) == SSA_NAME)
1114 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
1116 /* Verify the defining statement has an EQ_EXPR on the RHS. */
1117 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
1119 /* Finally verify the source operands of the EQ_EXPR are equal
1120 to arg0 and arg1. */
1121 tree op0 = gimple_assign_rhs1 (def1);
1122 tree op1 = gimple_assign_rhs2 (def1);
1123 if ((operand_equal_for_phi_arg_p (arg0, op0)
1124 && operand_equal_for_phi_arg_p (arg1, op1))
1125 || (operand_equal_for_phi_arg_p (arg0, op1)
1126 && operand_equal_for_phi_arg_p (arg1, op0)))
1128 /* We will perform the optimization. */
1129 *code = gimple_assign_rhs_code (def1);
1130 return true;
1134 return false;
1137 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
1139 Also return TRUE if arg0/arg1 are equal to the source arguments of a
1140 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
1142 Return FALSE otherwise. */
1144 static bool
1145 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1146 enum tree_code *code, gimple *cond)
1148 gimple *def;
1149 tree lhs = gimple_cond_lhs (cond);
1150 tree rhs = gimple_cond_rhs (cond);
1152 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1153 && operand_equal_for_phi_arg_p (arg1, rhs))
1154 || (operand_equal_for_phi_arg_p (arg1, lhs)
1155 && operand_equal_for_phi_arg_p (arg0, rhs)))
1156 return true;
1158 /* Now handle more complex case where we have an EQ comparison
1159 which feeds a BIT_AND_EXPR which feeds COND.
1161 First verify that COND is of the form SSA_NAME NE 0. */
1162 if (*code != NE_EXPR || !integer_zerop (rhs)
1163 || TREE_CODE (lhs) != SSA_NAME)
1164 return false;
1166 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1167 def = SSA_NAME_DEF_STMT (lhs);
1168 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1169 return false;
1171 /* Now verify arg0/arg1 correspond to the source arguments of an
1172 EQ comparison feeding the BIT_AND_EXPR. */
1174 tree tmp = gimple_assign_rhs1 (def);
1175 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1176 return true;
1178 tmp = gimple_assign_rhs2 (def);
1179 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1180 return true;
1182 return false;
1185 /* Returns true if ARG is a neutral element for operation CODE
1186 on the RIGHT side. */
1188 static bool
1189 neutral_element_p (tree_code code, tree arg, bool right)
1191 switch (code)
1193 case PLUS_EXPR:
1194 case BIT_IOR_EXPR:
1195 case BIT_XOR_EXPR:
1196 return integer_zerop (arg);
1198 case LROTATE_EXPR:
1199 case RROTATE_EXPR:
1200 case LSHIFT_EXPR:
1201 case RSHIFT_EXPR:
1202 case MINUS_EXPR:
1203 case POINTER_PLUS_EXPR:
1204 return right && integer_zerop (arg);
1206 case MULT_EXPR:
1207 return integer_onep (arg);
1209 case TRUNC_DIV_EXPR:
1210 case CEIL_DIV_EXPR:
1211 case FLOOR_DIV_EXPR:
1212 case ROUND_DIV_EXPR:
1213 case EXACT_DIV_EXPR:
1214 return right && integer_onep (arg);
1216 case BIT_AND_EXPR:
1217 return integer_all_onesp (arg);
1219 default:
1220 return false;
1224 /* Returns true if ARG is an absorbing element for operation CODE. */
1226 static bool
1227 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1229 switch (code)
1231 case BIT_IOR_EXPR:
1232 return integer_all_onesp (arg);
1234 case MULT_EXPR:
1235 case BIT_AND_EXPR:
1236 return integer_zerop (arg);
1238 case LSHIFT_EXPR:
1239 case RSHIFT_EXPR:
1240 case LROTATE_EXPR:
1241 case RROTATE_EXPR:
1242 return !right && integer_zerop (arg);
1244 case TRUNC_DIV_EXPR:
1245 case CEIL_DIV_EXPR:
1246 case FLOOR_DIV_EXPR:
1247 case ROUND_DIV_EXPR:
1248 case EXACT_DIV_EXPR:
1249 case TRUNC_MOD_EXPR:
1250 case CEIL_MOD_EXPR:
1251 case FLOOR_MOD_EXPR:
1252 case ROUND_MOD_EXPR:
1253 return (!right
1254 && integer_zerop (arg)
1255 && tree_single_nonzero_warnv_p (rval, NULL));
1257 default:
1258 return false;
1262 /* The function value_replacement does the main work of doing the value
1263 replacement. Return non-zero if the replacement is done. Otherwise return
1264 0. If we remove the middle basic block, return 2.
1265 BB is the basic block where the replacement is going to be done on. ARG0
1266 is argument 0 from the PHI. Likewise for ARG1. */
1268 static int
1269 value_replacement (basic_block cond_bb, basic_block middle_bb,
1270 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1272 gimple_stmt_iterator gsi;
1273 gimple *cond;
1274 edge true_edge, false_edge;
1275 enum tree_code code;
1276 bool empty_or_with_defined_p = true;
1278 /* If the type says honor signed zeros we cannot do this
1279 optimization. */
1280 if (HONOR_SIGNED_ZEROS (arg1))
1281 return 0;
1283 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1284 arguments, then adjust arg0 or arg1. */
1285 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1286 while (!gsi_end_p (gsi))
1288 gimple *stmt = gsi_stmt (gsi);
1289 tree lhs;
1290 gsi_next_nondebug (&gsi);
1291 if (!is_gimple_assign (stmt))
1293 if (gimple_code (stmt) != GIMPLE_PREDICT
1294 && gimple_code (stmt) != GIMPLE_NOP)
1295 empty_or_with_defined_p = false;
1296 continue;
1298 /* Now try to adjust arg0 or arg1 according to the computation
1299 in the statement. */
1300 lhs = gimple_assign_lhs (stmt);
1301 if (!(lhs == arg0
1302 && jump_function_from_stmt (&arg0, stmt))
1303 || (lhs == arg1
1304 && jump_function_from_stmt (&arg1, stmt)))
1305 empty_or_with_defined_p = false;
1308 cond = last_stmt (cond_bb);
1309 code = gimple_cond_code (cond);
1311 /* This transformation is only valid for equality comparisons. */
1312 if (code != NE_EXPR && code != EQ_EXPR)
1313 return 0;
1315 /* We need to know which is the true edge and which is the false
1316 edge so that we know if have abs or negative abs. */
1317 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1319 /* At this point we know we have a COND_EXPR with two successors.
1320 One successor is BB, the other successor is an empty block which
1321 falls through into BB.
1323 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1325 There is a single PHI node at the join point (BB) with two arguments.
1327 We now need to verify that the two arguments in the PHI node match
1328 the two arguments to the equality comparison. */
1330 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1332 edge e;
1333 tree arg;
1335 /* For NE_EXPR, we want to build an assignment result = arg where
1336 arg is the PHI argument associated with the true edge. For
1337 EQ_EXPR we want the PHI argument associated with the false edge. */
1338 e = (code == NE_EXPR ? true_edge : false_edge);
1340 /* Unfortunately, E may not reach BB (it may instead have gone to
1341 OTHER_BLOCK). If that is the case, then we want the single outgoing
1342 edge from OTHER_BLOCK which reaches BB and represents the desired
1343 path from COND_BLOCK. */
1344 if (e->dest == middle_bb)
1345 e = single_succ_edge (e->dest);
1347 /* Now we know the incoming edge to BB that has the argument for the
1348 RHS of our new assignment statement. */
1349 if (e0 == e)
1350 arg = arg0;
1351 else
1352 arg = arg1;
1354 /* If the middle basic block was empty or is defining the
1355 PHI arguments and this is a single phi where the args are different
1356 for the edges e0 and e1 then we can remove the middle basic block. */
1357 if (empty_or_with_defined_p
1358 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1359 e0, e1) == phi)
1361 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1362 /* Note that we optimized this PHI. */
1363 return 2;
1365 else
1367 if (!single_pred_p (middle_bb))
1368 return 0;
1369 statistics_counter_event (cfun, "Replace PHI with "
1370 "variable/value_replacement", 1);
1372 /* Replace the PHI arguments with arg. */
1373 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1374 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1375 if (dump_file && (dump_flags & TDF_DETAILS))
1377 fprintf (dump_file, "PHI ");
1378 print_generic_expr (dump_file, gimple_phi_result (phi));
1379 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1380 cond_bb->index);
1381 print_generic_expr (dump_file, arg);
1382 fprintf (dump_file, ".\n");
1384 return 1;
1388 if (!single_pred_p (middle_bb))
1389 return 0;
1391 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1392 gsi = gsi_last_nondebug_bb (middle_bb);
1393 if (gsi_end_p (gsi))
1394 return 0;
1396 gimple *assign = gsi_stmt (gsi);
1397 if (!is_gimple_assign (assign)
1398 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1399 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1400 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1401 return 0;
1403 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1404 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1405 return 0;
1407 /* Allow up to 2 cheap preparation statements that prepare argument
1408 for assign, e.g.:
1409 if (y_4 != 0)
1410 goto <bb 3>;
1411 else
1412 goto <bb 4>;
1413 <bb 3>:
1414 _1 = (int) y_4;
1415 iftmp.0_6 = x_5(D) r<< _1;
1416 <bb 4>:
1417 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1419 if (y_3(D) == 0)
1420 goto <bb 4>;
1421 else
1422 goto <bb 3>;
1423 <bb 3>:
1424 y_4 = y_3(D) & 31;
1425 _1 = (int) y_4;
1426 _6 = x_5(D) r<< _1;
1427 <bb 4>:
1428 # _2 = PHI <x_5(D)(2), _6(3)> */
1429 gimple *prep_stmt[2] = { NULL, NULL };
1430 int prep_cnt;
1431 for (prep_cnt = 0; ; prep_cnt++)
1433 gsi_prev_nondebug (&gsi);
1434 if (gsi_end_p (gsi))
1435 break;
1437 gimple *g = gsi_stmt (gsi);
1438 if (gimple_code (g) == GIMPLE_LABEL)
1439 break;
1441 if (prep_cnt == 2 || !is_gimple_assign (g))
1442 return 0;
1444 tree lhs = gimple_assign_lhs (g);
1445 tree rhs1 = gimple_assign_rhs1 (g);
1446 use_operand_p use_p;
1447 gimple *use_stmt;
1448 if (TREE_CODE (lhs) != SSA_NAME
1449 || TREE_CODE (rhs1) != SSA_NAME
1450 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1451 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1452 || !single_imm_use (lhs, &use_p, &use_stmt)
1453 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1454 return 0;
1455 switch (gimple_assign_rhs_code (g))
1457 CASE_CONVERT:
1458 break;
1459 case PLUS_EXPR:
1460 case BIT_AND_EXPR:
1461 case BIT_IOR_EXPR:
1462 case BIT_XOR_EXPR:
1463 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1464 return 0;
1465 break;
1466 default:
1467 return 0;
1469 prep_stmt[prep_cnt] = g;
1472 /* Only transform if it removes the condition. */
1473 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1474 return 0;
1476 /* Size-wise, this is always profitable. */
1477 if (optimize_bb_for_speed_p (cond_bb)
1478 /* The special case is useless if it has a low probability. */
1479 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1480 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1481 /* If assign is cheap, there is no point avoiding it. */
1482 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1483 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1484 return 0;
1486 tree lhs = gimple_assign_lhs (assign);
1487 tree rhs1 = gimple_assign_rhs1 (assign);
1488 tree rhs2 = gimple_assign_rhs2 (assign);
1489 enum tree_code code_def = gimple_assign_rhs_code (assign);
1490 tree cond_lhs = gimple_cond_lhs (cond);
1491 tree cond_rhs = gimple_cond_rhs (cond);
1493 /* Propagate the cond_rhs constant through preparation stmts,
1494 make sure UB isn't invoked while doing that. */
1495 for (int i = prep_cnt - 1; i >= 0; --i)
1497 gimple *g = prep_stmt[i];
1498 tree grhs1 = gimple_assign_rhs1 (g);
1499 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1500 return 0;
1501 cond_lhs = gimple_assign_lhs (g);
1502 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1503 if (TREE_CODE (cond_rhs) != INTEGER_CST
1504 || TREE_OVERFLOW (cond_rhs))
1505 return 0;
1506 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1508 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1509 gimple_assign_rhs2 (g));
1510 if (TREE_OVERFLOW (cond_rhs))
1511 return 0;
1513 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1514 if (TREE_CODE (cond_rhs) != INTEGER_CST
1515 || TREE_OVERFLOW (cond_rhs))
1516 return 0;
1519 if (((code == NE_EXPR && e1 == false_edge)
1520 || (code == EQ_EXPR && e1 == true_edge))
1521 && arg0 == lhs
1522 && ((arg1 == rhs1
1523 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1524 && neutral_element_p (code_def, cond_rhs, true))
1525 || (arg1 == rhs2
1526 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1527 && neutral_element_p (code_def, cond_rhs, false))
1528 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1529 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1530 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1531 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1532 && absorbing_element_p (code_def,
1533 cond_rhs, false, rhs2))))))
1535 gsi = gsi_for_stmt (cond);
1536 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1537 def-stmt in:
1538 if (n_5 != 0)
1539 goto <bb 3>;
1540 else
1541 goto <bb 4>;
1543 <bb 3>:
1544 # RANGE [0, 4294967294]
1545 u_6 = n_5 + 4294967295;
1547 <bb 4>:
1548 # u_3 = PHI <u_6(3), 4294967295(2)> */
1549 reset_flow_sensitive_info (lhs);
1550 gimple_stmt_iterator gsi_from;
1551 for (int i = prep_cnt - 1; i >= 0; --i)
1553 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1554 reset_flow_sensitive_info (plhs);
1555 gsi_from = gsi_for_stmt (prep_stmt[i]);
1556 gsi_move_before (&gsi_from, &gsi);
1558 gsi_from = gsi_for_stmt (assign);
1559 gsi_move_before (&gsi_from, &gsi);
1560 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1561 return 2;
1564 return 0;
1567 /* The function minmax_replacement does the main work of doing the minmax
1568 replacement. Return true if the replacement is done. Otherwise return
1569 false.
1570 BB is the basic block where the replacement is going to be done on. ARG0
1571 is argument 0 from the PHI. Likewise for ARG1. */
1573 static bool
1574 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1575 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1577 tree result;
1578 edge true_edge, false_edge;
1579 enum tree_code minmax, ass_code;
1580 tree smaller, larger, arg_true, arg_false;
1581 gimple_stmt_iterator gsi, gsi_from;
1583 tree type = TREE_TYPE (PHI_RESULT (phi));
1585 /* The optimization may be unsafe due to NaNs. */
1586 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1587 return false;
1589 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1590 enum tree_code cmp = gimple_cond_code (cond);
1591 tree rhs = gimple_cond_rhs (cond);
1593 /* Turn EQ/NE of extreme values to order comparisons. */
1594 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1595 && TREE_CODE (rhs) == INTEGER_CST
1596 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1598 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1600 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1601 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1602 wi::min_value (TREE_TYPE (rhs)) + 1);
1604 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1606 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1607 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1608 wi::max_value (TREE_TYPE (rhs)) - 1);
1612 /* This transformation is only valid for order comparisons. Record which
1613 operand is smaller/larger if the result of the comparison is true. */
1614 tree alt_smaller = NULL_TREE;
1615 tree alt_larger = NULL_TREE;
1616 if (cmp == LT_EXPR || cmp == LE_EXPR)
1618 smaller = gimple_cond_lhs (cond);
1619 larger = rhs;
1620 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1621 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1622 if (TREE_CODE (larger) == INTEGER_CST
1623 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1625 if (cmp == LT_EXPR)
1627 wi::overflow_type overflow;
1628 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1629 TYPE_SIGN (TREE_TYPE (larger)),
1630 &overflow);
1631 if (! overflow)
1632 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1634 else
1636 wi::overflow_type overflow;
1637 wide_int alt = wi::add (wi::to_wide (larger), 1,
1638 TYPE_SIGN (TREE_TYPE (larger)),
1639 &overflow);
1640 if (! overflow)
1641 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1645 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1647 smaller = rhs;
1648 larger = gimple_cond_lhs (cond);
1649 /* If we have larger > CST it is equivalent to larger >= CST+1.
1650 Likewise larger >= CST is equivalent to larger > CST-1. */
1651 if (TREE_CODE (smaller) == INTEGER_CST
1652 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1654 wi::overflow_type overflow;
1655 if (cmp == GT_EXPR)
1657 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1658 TYPE_SIGN (TREE_TYPE (smaller)),
1659 &overflow);
1660 if (! overflow)
1661 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1663 else
1665 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1666 TYPE_SIGN (TREE_TYPE (smaller)),
1667 &overflow);
1668 if (! overflow)
1669 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1673 else
1674 return false;
1676 /* Handle the special case of (signed_type)x < 0 being equivalent
1677 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1678 to x <= MAX_VAL(signed_type). */
1679 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1680 && INTEGRAL_TYPE_P (type)
1681 && TYPE_UNSIGNED (type)
1682 && integer_zerop (rhs))
1684 tree op = gimple_cond_lhs (cond);
1685 if (TREE_CODE (op) == SSA_NAME
1686 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1687 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1689 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1690 if (gimple_assign_cast_p (def_stmt))
1692 tree op1 = gimple_assign_rhs1 (def_stmt);
1693 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1694 && TYPE_UNSIGNED (TREE_TYPE (op1))
1695 && (TYPE_PRECISION (TREE_TYPE (op))
1696 == TYPE_PRECISION (TREE_TYPE (op1)))
1697 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1699 wide_int w1 = wi::max_value (TREE_TYPE (op));
1700 wide_int w2 = wi::add (w1, 1);
1701 if (cmp == LT_EXPR)
1703 larger = op1;
1704 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1705 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1706 alt_larger = NULL_TREE;
1708 else
1710 smaller = op1;
1711 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1712 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1713 alt_smaller = NULL_TREE;
1720 /* We need to know which is the true edge and which is the false
1721 edge so that we know if have abs or negative abs. */
1722 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1724 /* Forward the edges over the middle basic block. */
1725 if (true_edge->dest == middle_bb)
1726 true_edge = EDGE_SUCC (true_edge->dest, 0);
1727 if (false_edge->dest == middle_bb)
1728 false_edge = EDGE_SUCC (false_edge->dest, 0);
1730 if (true_edge == e0)
1732 gcc_assert (false_edge == e1);
1733 arg_true = arg0;
1734 arg_false = arg1;
1736 else
1738 gcc_assert (false_edge == e0);
1739 gcc_assert (true_edge == e1);
1740 arg_true = arg1;
1741 arg_false = arg0;
1744 if (empty_block_p (middle_bb))
1746 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1747 || (alt_smaller
1748 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1749 && (operand_equal_for_phi_arg_p (arg_false, larger)
1750 || (alt_larger
1751 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1753 /* Case
1755 if (smaller < larger)
1756 rslt = smaller;
1757 else
1758 rslt = larger; */
1759 minmax = MIN_EXPR;
1761 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1762 || (alt_smaller
1763 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1764 && (operand_equal_for_phi_arg_p (arg_true, larger)
1765 || (alt_larger
1766 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1767 minmax = MAX_EXPR;
1768 else
1769 return false;
1771 else
1773 /* Recognize the following case, assuming d <= u:
1775 if (a <= u)
1776 b = MAX (a, d);
1777 x = PHI <b, u>
1779 This is equivalent to
1781 b = MAX (a, d);
1782 x = MIN (b, u); */
1784 gimple *assign = last_and_only_stmt (middle_bb);
1785 tree lhs, op0, op1, bound;
1787 if (!single_pred_p (middle_bb))
1788 return false;
1790 if (!assign
1791 || gimple_code (assign) != GIMPLE_ASSIGN)
1792 return false;
1794 lhs = gimple_assign_lhs (assign);
1795 ass_code = gimple_assign_rhs_code (assign);
1796 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1797 return false;
1798 op0 = gimple_assign_rhs1 (assign);
1799 op1 = gimple_assign_rhs2 (assign);
1801 if (true_edge->src == middle_bb)
1803 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1804 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1805 return false;
1807 if (operand_equal_for_phi_arg_p (arg_false, larger)
1808 || (alt_larger
1809 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1811 /* Case
1813 if (smaller < larger)
1815 r' = MAX_EXPR (smaller, bound)
1817 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1818 if (ass_code != MAX_EXPR)
1819 return false;
1821 minmax = MIN_EXPR;
1822 if (operand_equal_for_phi_arg_p (op0, smaller)
1823 || (alt_smaller
1824 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1825 bound = op1;
1826 else if (operand_equal_for_phi_arg_p (op1, smaller)
1827 || (alt_smaller
1828 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1829 bound = op0;
1830 else
1831 return false;
1833 /* We need BOUND <= LARGER. */
1834 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1835 bound, larger)))
1836 return false;
1838 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1839 || (alt_smaller
1840 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1842 /* Case
1844 if (smaller < larger)
1846 r' = MIN_EXPR (larger, bound)
1848 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1849 if (ass_code != MIN_EXPR)
1850 return false;
1852 minmax = MAX_EXPR;
1853 if (operand_equal_for_phi_arg_p (op0, larger)
1854 || (alt_larger
1855 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1856 bound = op1;
1857 else if (operand_equal_for_phi_arg_p (op1, larger)
1858 || (alt_larger
1859 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1860 bound = op0;
1861 else
1862 return false;
1864 /* We need BOUND >= SMALLER. */
1865 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1866 bound, smaller)))
1867 return false;
1869 else
1870 return false;
1872 else
1874 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1875 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1876 return false;
1878 if (operand_equal_for_phi_arg_p (arg_true, larger)
1879 || (alt_larger
1880 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1882 /* Case
1884 if (smaller > larger)
1886 r' = MIN_EXPR (smaller, bound)
1888 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1889 if (ass_code != MIN_EXPR)
1890 return false;
1892 minmax = MAX_EXPR;
1893 if (operand_equal_for_phi_arg_p (op0, smaller)
1894 || (alt_smaller
1895 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1896 bound = op1;
1897 else if (operand_equal_for_phi_arg_p (op1, smaller)
1898 || (alt_smaller
1899 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1900 bound = op0;
1901 else
1902 return false;
1904 /* We need BOUND >= LARGER. */
1905 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1906 bound, larger)))
1907 return false;
1909 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1910 || (alt_smaller
1911 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1913 /* Case
1915 if (smaller > larger)
1917 r' = MAX_EXPR (larger, bound)
1919 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1920 if (ass_code != MAX_EXPR)
1921 return false;
1923 minmax = MIN_EXPR;
1924 if (operand_equal_for_phi_arg_p (op0, larger))
1925 bound = op1;
1926 else if (operand_equal_for_phi_arg_p (op1, larger))
1927 bound = op0;
1928 else
1929 return false;
1931 /* We need BOUND <= SMALLER. */
1932 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1933 bound, smaller)))
1934 return false;
1936 else
1937 return false;
1940 /* Move the statement from the middle block. */
1941 gsi = gsi_last_bb (cond_bb);
1942 gsi_from = gsi_last_nondebug_bb (middle_bb);
1943 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1944 SSA_OP_DEF));
1945 gsi_move_before (&gsi_from, &gsi);
1948 /* Emit the statement to compute min/max. */
1949 gimple_seq stmts = NULL;
1950 tree phi_result = PHI_RESULT (phi);
1951 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1953 gsi = gsi_last_bb (cond_bb);
1954 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1956 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1958 return true;
1961 /* Return true if the only executable statement in BB is a GIMPLE_COND. */
1963 static bool
1964 cond_only_block_p (basic_block bb)
1966 /* BB must have no executable statements. */
1967 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1968 if (phi_nodes (bb))
1969 return false;
1970 while (!gsi_end_p (gsi))
1972 gimple *stmt = gsi_stmt (gsi);
1973 if (is_gimple_debug (stmt))
1975 else if (gimple_code (stmt) == GIMPLE_NOP
1976 || gimple_code (stmt) == GIMPLE_PREDICT
1977 || gimple_code (stmt) == GIMPLE_COND)
1979 else
1980 return false;
1981 gsi_next (&gsi);
1983 return true;
1986 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
1987 For strong ordering <=> try to match something like:
1988 <bb 2> : // cond3_bb (== cond2_bb)
1989 if (x_4(D) != y_5(D))
1990 goto <bb 3>; [INV]
1991 else
1992 goto <bb 6>; [INV]
1994 <bb 3> : // cond_bb
1995 if (x_4(D) < y_5(D))
1996 goto <bb 6>; [INV]
1997 else
1998 goto <bb 4>; [INV]
2000 <bb 4> : // middle_bb
2002 <bb 6> : // phi_bb
2003 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2004 _1 = iftmp.0_2 == 0;
2006 and for partial ordering <=> something like:
2008 <bb 2> : // cond3_bb
2009 if (a_3(D) == b_5(D))
2010 goto <bb 6>; [50.00%]
2011 else
2012 goto <bb 3>; [50.00%]
2014 <bb 3> [local count: 536870913]: // cond2_bb
2015 if (a_3(D) < b_5(D))
2016 goto <bb 6>; [50.00%]
2017 else
2018 goto <bb 4>; [50.00%]
2020 <bb 4> [local count: 268435456]: // cond_bb
2021 if (a_3(D) > b_5(D))
2022 goto <bb 6>; [50.00%]
2023 else
2024 goto <bb 5>; [50.00%]
2026 <bb 5> [local count: 134217728]: // middle_bb
2028 <bb 6> [local count: 1073741824]: // phi_bb
2029 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2030 _2 = SR.27_4 > 0; */
2032 static bool
2033 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
2034 edge e0, edge e1, gphi *phi,
2035 tree arg0, tree arg1)
2037 tree phires = PHI_RESULT (phi);
2038 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
2039 || TYPE_UNSIGNED (TREE_TYPE (phires))
2040 || !tree_fits_shwi_p (arg0)
2041 || !tree_fits_shwi_p (arg1)
2042 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
2043 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
2044 return false;
2046 basic_block phi_bb = gimple_bb (phi);
2047 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
2048 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
2049 return false;
2051 use_operand_p use_p;
2052 gimple *use_stmt;
2053 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
2054 return false;
2055 if (!single_imm_use (phires, &use_p, &use_stmt))
2056 return false;
2057 enum tree_code cmp;
2058 tree lhs, rhs;
2059 gimple *orig_use_stmt = use_stmt;
2060 tree orig_use_lhs = NULL_TREE;
2061 int prec = TYPE_PRECISION (TREE_TYPE (phires));
2062 bool is_cast = false;
2064 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2065 into res <= 1 and has left a type-cast for signed types. */
2066 if (gimple_assign_cast_p (use_stmt))
2068 orig_use_lhs = gimple_assign_lhs (use_stmt);
2069 /* match.pd would have only done this for a signed type,
2070 so the conversion must be to an unsigned one. */
2071 tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
2072 tree ty2 = TREE_TYPE (orig_use_lhs);
2074 if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
2075 return false;
2076 if (TYPE_PRECISION (ty1) != TYPE_PRECISION (ty2))
2077 return false;
2078 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2079 return false;
2080 if (EDGE_COUNT (phi_bb->preds) != 4)
2081 return false;
2082 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2083 return false;
2085 is_cast = true;
2087 else if (is_gimple_assign (use_stmt)
2088 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2089 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2090 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2091 == wi::shifted_mask (1, prec - 1, false, prec)))
2093 /* For partial_ordering result operator>= with unspec as second
2094 argument is (res & 1) == res, folded by match.pd into
2095 (res & ~1) == 0. */
2096 orig_use_lhs = gimple_assign_lhs (use_stmt);
2097 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2098 return false;
2099 if (EDGE_COUNT (phi_bb->preds) != 4)
2100 return false;
2101 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2102 return false;
2104 if (gimple_code (use_stmt) == GIMPLE_COND)
2106 cmp = gimple_cond_code (use_stmt);
2107 lhs = gimple_cond_lhs (use_stmt);
2108 rhs = gimple_cond_rhs (use_stmt);
2110 else if (is_gimple_assign (use_stmt))
2112 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2114 cmp = gimple_assign_rhs_code (use_stmt);
2115 lhs = gimple_assign_rhs1 (use_stmt);
2116 rhs = gimple_assign_rhs2 (use_stmt);
2118 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2120 tree cond = gimple_assign_rhs1 (use_stmt);
2121 if (!COMPARISON_CLASS_P (cond))
2122 return false;
2123 cmp = TREE_CODE (cond);
2124 lhs = TREE_OPERAND (cond, 0);
2125 rhs = TREE_OPERAND (cond, 1);
2127 else
2128 return false;
2130 else
2131 return false;
2132 switch (cmp)
2134 case EQ_EXPR:
2135 case NE_EXPR:
2136 case LT_EXPR:
2137 case GT_EXPR:
2138 case LE_EXPR:
2139 case GE_EXPR:
2140 break;
2141 default:
2142 return false;
2144 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2145 || !tree_fits_shwi_p (rhs)
2146 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2147 return false;
2149 if (is_cast)
2151 if (TREE_CODE (rhs) != INTEGER_CST)
2152 return false;
2153 /* As for -ffast-math we assume the 2 return to be
2154 impossible, canonicalize (unsigned) res <= 1U or
2155 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2156 or (unsigned) res >= 2U as res < 0. */
2157 switch (cmp)
2159 case LE_EXPR:
2160 if (!integer_onep (rhs))
2161 return false;
2162 cmp = GE_EXPR;
2163 break;
2164 case LT_EXPR:
2165 if (wi::ne_p (wi::to_widest (rhs), 2))
2166 return false;
2167 cmp = GE_EXPR;
2168 break;
2169 case GT_EXPR:
2170 if (!integer_onep (rhs))
2171 return false;
2172 cmp = LT_EXPR;
2173 break;
2174 case GE_EXPR:
2175 if (wi::ne_p (wi::to_widest (rhs), 2))
2176 return false;
2177 cmp = LT_EXPR;
2178 break;
2179 default:
2180 return false;
2182 rhs = build_zero_cst (TREE_TYPE (phires));
2184 else if (orig_use_lhs)
2186 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2187 return false;
2188 /* As for -ffast-math we assume the 2 return to be
2189 impossible, canonicalize (res & ~1) == 0 into
2190 res >= 0 and (res & ~1) != 0 as res < 0. */
2191 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2194 if (!empty_block_p (middle_bb))
2195 return false;
2197 gcond *cond1 = as_a <gcond *> (last_stmt (cond_bb));
2198 enum tree_code cmp1 = gimple_cond_code (cond1);
2199 switch (cmp1)
2201 case LT_EXPR:
2202 case LE_EXPR:
2203 case GT_EXPR:
2204 case GE_EXPR:
2205 break;
2206 default:
2207 return false;
2209 tree lhs1 = gimple_cond_lhs (cond1);
2210 tree rhs1 = gimple_cond_rhs (cond1);
2211 /* The optimization may be unsafe due to NaNs. */
2212 if (HONOR_NANS (TREE_TYPE (lhs1)))
2213 return false;
2214 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2215 return false;
2216 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2217 return false;
2219 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2220 return false;
2222 basic_block cond2_bb = single_pred (cond_bb);
2223 if (EDGE_COUNT (cond2_bb->succs) != 2)
2224 return false;
2225 edge cond2_phi_edge;
2226 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2228 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2229 return false;
2230 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2232 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2233 return false;
2234 else
2235 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2236 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2237 if (!tree_fits_shwi_p (arg2))
2238 return false;
2239 gimple *cond2 = last_stmt (cond2_bb);
2240 if (cond2 == NULL || gimple_code (cond2) != GIMPLE_COND)
2241 return false;
2242 enum tree_code cmp2 = gimple_cond_code (cond2);
2243 tree lhs2 = gimple_cond_lhs (cond2);
2244 tree rhs2 = gimple_cond_rhs (cond2);
2245 if (lhs2 == lhs1)
2247 if (!operand_equal_p (rhs2, rhs1, 0))
2249 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2250 && TREE_CODE (rhs1) == INTEGER_CST
2251 && TREE_CODE (rhs2) == INTEGER_CST)
2253 /* For integers, we can have cond2 x == 5
2254 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2255 x > 5, x >= 6, x >= 5 or x > 4. */
2256 if (tree_int_cst_lt (rhs1, rhs2))
2258 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2259 return false;
2260 if (cmp1 == LE_EXPR)
2261 cmp1 = LT_EXPR;
2262 else if (cmp1 == GT_EXPR)
2263 cmp1 = GE_EXPR;
2264 else
2265 return false;
2267 else
2269 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2270 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2271 return false;
2272 if (cmp1 == LT_EXPR)
2273 cmp1 = LE_EXPR;
2274 else if (cmp1 == GE_EXPR)
2275 cmp1 = GT_EXPR;
2276 else
2277 return false;
2279 rhs1 = rhs2;
2281 else
2282 return false;
2285 else if (lhs2 == rhs1)
2287 if (rhs2 != lhs1)
2288 return false;
2290 else
2291 return false;
2293 tree arg3 = arg2;
2294 basic_block cond3_bb = cond2_bb;
2295 edge cond3_phi_edge = cond2_phi_edge;
2296 gimple *cond3 = cond2;
2297 enum tree_code cmp3 = cmp2;
2298 tree lhs3 = lhs2;
2299 tree rhs3 = rhs2;
2300 if (EDGE_COUNT (phi_bb->preds) == 4)
2302 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2303 return false;
2304 if (e1->flags & EDGE_TRUE_VALUE)
2306 if (tree_to_shwi (arg0) != 2
2307 || absu_hwi (tree_to_shwi (arg1)) != 1
2308 || wi::to_widest (arg1) == wi::to_widest (arg2))
2309 return false;
2311 else if (tree_to_shwi (arg1) != 2
2312 || absu_hwi (tree_to_shwi (arg0)) != 1
2313 || wi::to_widest (arg0) == wi::to_widest (arg1))
2314 return false;
2315 switch (cmp2)
2317 case LT_EXPR:
2318 case LE_EXPR:
2319 case GT_EXPR:
2320 case GE_EXPR:
2321 break;
2322 default:
2323 return false;
2325 /* if (x < y) goto phi_bb; else fallthru;
2326 if (x > y) goto phi_bb; else fallthru;
2327 bbx:;
2328 phi_bb:;
2329 is ok, but if x and y are swapped in one of the comparisons,
2330 or the comparisons are the same and operands not swapped,
2331 or the true and false edges are swapped, it is not. */
2332 if ((lhs2 == lhs1)
2333 ^ (((cond2_phi_edge->flags
2334 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2335 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2336 != ((e1->flags
2337 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2338 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2339 return false;
2340 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2341 return false;
2342 cond3_bb = single_pred (cond2_bb);
2343 if (EDGE_COUNT (cond2_bb->succs) != 2)
2344 return false;
2345 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2347 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2348 return false;
2349 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2351 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2352 return false;
2353 else
2354 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2355 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2356 cond3 = last_stmt (cond3_bb);
2357 if (cond3 == NULL || gimple_code (cond3) != GIMPLE_COND)
2358 return false;
2359 cmp3 = gimple_cond_code (cond3);
2360 lhs3 = gimple_cond_lhs (cond3);
2361 rhs3 = gimple_cond_rhs (cond3);
2362 if (lhs3 == lhs1)
2364 if (!operand_equal_p (rhs3, rhs1, 0))
2365 return false;
2367 else if (lhs3 == rhs1)
2369 if (rhs3 != lhs1)
2370 return false;
2372 else
2373 return false;
2375 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2376 || absu_hwi (tree_to_shwi (arg1)) != 1
2377 || wi::to_widest (arg0) == wi::to_widest (arg1))
2378 return false;
2380 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2381 return false;
2382 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2383 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2384 return false;
2386 /* lhs1 one_cmp rhs1 results in phires of 1. */
2387 enum tree_code one_cmp;
2388 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2389 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2390 one_cmp = LT_EXPR;
2391 else
2392 one_cmp = GT_EXPR;
2394 enum tree_code res_cmp;
2395 switch (cmp)
2397 case EQ_EXPR:
2398 if (integer_zerop (rhs))
2399 res_cmp = EQ_EXPR;
2400 else if (integer_minus_onep (rhs))
2401 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2402 else if (integer_onep (rhs))
2403 res_cmp = one_cmp;
2404 else
2405 return false;
2406 break;
2407 case NE_EXPR:
2408 if (integer_zerop (rhs))
2409 res_cmp = NE_EXPR;
2410 else if (integer_minus_onep (rhs))
2411 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2412 else if (integer_onep (rhs))
2413 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2414 else
2415 return false;
2416 break;
2417 case LT_EXPR:
2418 if (integer_onep (rhs))
2419 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2420 else if (integer_zerop (rhs))
2421 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2422 else
2423 return false;
2424 break;
2425 case LE_EXPR:
2426 if (integer_zerop (rhs))
2427 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2428 else if (integer_minus_onep (rhs))
2429 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2430 else
2431 return false;
2432 break;
2433 case GT_EXPR:
2434 if (integer_minus_onep (rhs))
2435 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2436 else if (integer_zerop (rhs))
2437 res_cmp = one_cmp;
2438 else
2439 return false;
2440 break;
2441 case GE_EXPR:
2442 if (integer_zerop (rhs))
2443 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2444 else if (integer_onep (rhs))
2445 res_cmp = one_cmp;
2446 else
2447 return false;
2448 break;
2449 default:
2450 gcc_unreachable ();
2453 if (gimple_code (use_stmt) == GIMPLE_COND)
2455 gcond *use_cond = as_a <gcond *> (use_stmt);
2456 gimple_cond_set_code (use_cond, res_cmp);
2457 gimple_cond_set_lhs (use_cond, lhs1);
2458 gimple_cond_set_rhs (use_cond, rhs1);
2460 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2462 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2463 gimple_assign_set_rhs1 (use_stmt, lhs1);
2464 gimple_assign_set_rhs2 (use_stmt, rhs1);
2466 else
2468 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2469 lhs1, rhs1);
2470 gimple_assign_set_rhs1 (use_stmt, cond);
2472 update_stmt (use_stmt);
2474 if (MAY_HAVE_DEBUG_BIND_STMTS)
2476 use_operand_p use_p;
2477 imm_use_iterator iter;
2478 bool has_debug_uses = false;
2479 bool has_cast_debug_uses = false;
2480 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2482 gimple *use_stmt = USE_STMT (use_p);
2483 if (orig_use_lhs && use_stmt == orig_use_stmt)
2484 continue;
2485 gcc_assert (is_gimple_debug (use_stmt));
2486 has_debug_uses = true;
2487 break;
2489 if (orig_use_lhs)
2491 if (!has_debug_uses || is_cast)
2492 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2494 gimple *use_stmt = USE_STMT (use_p);
2495 gcc_assert (is_gimple_debug (use_stmt));
2496 has_debug_uses = true;
2497 if (is_cast)
2498 has_cast_debug_uses = true;
2500 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2501 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2502 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2503 update_stmt (orig_use_stmt);
2506 if (has_debug_uses)
2508 /* If there are debug uses, emit something like:
2509 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2510 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2511 where > stands for the comparison that yielded 1
2512 and replace debug uses of phi result with that D#2.
2513 Ignore the value of 2, because if NaNs aren't expected,
2514 all floating point numbers should be comparable. */
2515 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2516 tree type = TREE_TYPE (phires);
2517 tree temp1 = build_debug_expr_decl (type);
2518 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2519 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2520 build_int_cst (type, -1));
2521 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2522 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2523 tree temp2 = build_debug_expr_decl (type);
2524 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2525 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2526 g = gimple_build_debug_bind (temp2, t, phi);
2527 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2528 replace_uses_by (phires, temp2);
2529 if (orig_use_lhs)
2531 if (has_cast_debug_uses)
2533 tree temp3 = make_node (DEBUG_EXPR_DECL);
2534 DECL_ARTIFICIAL (temp3) = 1;
2535 TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
2536 SET_DECL_MODE (temp3, TYPE_MODE (type));
2537 t = fold_convert (TREE_TYPE (temp3), temp2);
2538 g = gimple_build_debug_bind (temp3, t, phi);
2539 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2540 replace_uses_by (orig_use_lhs, temp3);
2542 else
2543 replace_uses_by (orig_use_lhs, temp2);
2548 if (orig_use_lhs)
2550 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2551 gsi_remove (&gsi, true);
2554 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2555 remove_phi_node (&psi, true);
2556 statistics_counter_event (cfun, "spaceship replacement", 1);
2558 return true;
2561 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2562 Convert
2564 <bb 2>
2565 if (b_4(D) != 0)
2566 goto <bb 3>
2567 else
2568 goto <bb 4>
2570 <bb 3>
2571 _2 = (unsigned long) b_4(D);
2572 _9 = __builtin_popcountl (_2);
2574 _9 = __builtin_popcountl (b_4(D));
2576 <bb 4>
2577 c_12 = PHI <0(2), _9(3)>
2579 Into
2580 <bb 2>
2581 _2 = (unsigned long) b_4(D);
2582 _9 = __builtin_popcountl (_2);
2584 _9 = __builtin_popcountl (b_4(D));
2586 <bb 4>
2587 c_12 = PHI <_9(2)>
2589 Similarly for __builtin_clz or __builtin_ctz if
2590 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2591 instead of 0 above it uses the value from that macro. */
2593 static bool
2594 cond_removal_in_builtin_zero_pattern (basic_block cond_bb,
2595 basic_block middle_bb,
2596 edge e1, edge e2, gphi *phi,
2597 tree arg0, tree arg1)
2599 gimple *cond;
2600 gimple_stmt_iterator gsi, gsi_from;
2601 gimple *call;
2602 gimple *cast = NULL;
2603 tree lhs, arg;
2605 /* Check that
2606 _2 = (unsigned long) b_4(D);
2607 _9 = __builtin_popcountl (_2);
2609 _9 = __builtin_popcountl (b_4(D));
2610 are the only stmts in the middle_bb. */
2612 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2613 if (gsi_end_p (gsi))
2614 return false;
2615 cast = gsi_stmt (gsi);
2616 gsi_next_nondebug (&gsi);
2617 if (!gsi_end_p (gsi))
2619 call = gsi_stmt (gsi);
2620 gsi_next_nondebug (&gsi);
2621 if (!gsi_end_p (gsi))
2622 return false;
2624 else
2626 call = cast;
2627 cast = NULL;
2630 /* Check that we have a popcount/clz/ctz builtin. */
2631 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
2632 return false;
2634 arg = gimple_call_arg (call, 0);
2635 lhs = gimple_get_lhs (call);
2637 if (lhs == NULL_TREE)
2638 return false;
2640 combined_fn cfn = gimple_call_combined_fn (call);
2641 internal_fn ifn = IFN_LAST;
2642 int val = 0;
2643 switch (cfn)
2645 case CFN_BUILT_IN_BSWAP16:
2646 case CFN_BUILT_IN_BSWAP32:
2647 case CFN_BUILT_IN_BSWAP64:
2648 case CFN_BUILT_IN_BSWAP128:
2649 CASE_CFN_FFS:
2650 CASE_CFN_PARITY:
2651 CASE_CFN_POPCOUNT:
2652 break;
2653 CASE_CFN_CLZ:
2654 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2656 tree type = TREE_TYPE (arg);
2657 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2658 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2659 val) == 2)
2661 ifn = IFN_CLZ;
2662 break;
2665 return false;
2666 CASE_CFN_CTZ:
2667 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2669 tree type = TREE_TYPE (arg);
2670 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2671 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2672 val) == 2)
2674 ifn = IFN_CTZ;
2675 break;
2678 return false;
2679 case CFN_BUILT_IN_CLRSB:
2680 val = TYPE_PRECISION (integer_type_node) - 1;
2681 break;
2682 case CFN_BUILT_IN_CLRSBL:
2683 val = TYPE_PRECISION (long_integer_type_node) - 1;
2684 break;
2685 case CFN_BUILT_IN_CLRSBLL:
2686 val = TYPE_PRECISION (long_long_integer_type_node) - 1;
2687 break;
2688 default:
2689 return false;
2692 if (cast)
2694 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2695 /* Check that we have a cast prior to that. */
2696 if (gimple_code (cast) != GIMPLE_ASSIGN
2697 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
2698 return false;
2699 /* Result of the cast stmt is the argument to the builtin. */
2700 if (arg != gimple_assign_lhs (cast))
2701 return false;
2702 arg = gimple_assign_rhs1 (cast);
2705 cond = last_stmt (cond_bb);
2707 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
2708 builtin. */
2709 if (gimple_code (cond) != GIMPLE_COND
2710 || (gimple_cond_code (cond) != NE_EXPR
2711 && gimple_cond_code (cond) != EQ_EXPR)
2712 || !integer_zerop (gimple_cond_rhs (cond))
2713 || arg != gimple_cond_lhs (cond))
2714 return false;
2716 /* Canonicalize. */
2717 if ((e2->flags & EDGE_TRUE_VALUE
2718 && gimple_cond_code (cond) == NE_EXPR)
2719 || (e1->flags & EDGE_TRUE_VALUE
2720 && gimple_cond_code (cond) == EQ_EXPR))
2722 std::swap (arg0, arg1);
2723 std::swap (e1, e2);
2726 /* Check PHI arguments. */
2727 if (lhs != arg0
2728 || TREE_CODE (arg1) != INTEGER_CST
2729 || wi::to_wide (arg1) != val)
2730 return false;
2732 /* And insert the popcount/clz/ctz builtin and cast stmt before the
2733 cond_bb. */
2734 gsi = gsi_last_bb (cond_bb);
2735 if (cast)
2737 gsi_from = gsi_for_stmt (cast);
2738 gsi_move_before (&gsi_from, &gsi);
2739 reset_flow_sensitive_info (gimple_get_lhs (cast));
2741 gsi_from = gsi_for_stmt (call);
2742 if (ifn == IFN_LAST || gimple_call_internal_p (call))
2743 gsi_move_before (&gsi_from, &gsi);
2744 else
2746 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
2747 the latter is well defined at zero. */
2748 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
2749 gimple_call_set_lhs (call, lhs);
2750 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2751 gsi_remove (&gsi_from, true);
2753 reset_flow_sensitive_info (lhs);
2755 /* Now update the PHI and remove unneeded bbs. */
2756 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
2757 return true;
2760 /* Auxiliary functions to determine the set of memory accesses which
2761 can't trap because they are preceded by accesses to the same memory
2762 portion. We do that for MEM_REFs, so we only need to track
2763 the SSA_NAME of the pointer indirectly referenced. The algorithm
2764 simply is a walk over all instructions in dominator order. When
2765 we see an MEM_REF we determine if we've already seen a same
2766 ref anywhere up to the root of the dominator tree. If we do the
2767 current access can't trap. If we don't see any dominating access
2768 the current access might trap, but might also make later accesses
2769 non-trapping, so we remember it. We need to be careful with loads
2770 or stores, for instance a load might not trap, while a store would,
2771 so if we see a dominating read access this doesn't mean that a later
2772 write access would not trap. Hence we also need to differentiate the
2773 type of access(es) seen.
2775 ??? We currently are very conservative and assume that a load might
2776 trap even if a store doesn't (write-only memory). This probably is
2777 overly conservative.
2779 We currently support a special case that for !TREE_ADDRESSABLE automatic
2780 variables, it could ignore whether something is a load or store because the
2781 local stack should be always writable. */
2783 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2784 basic block an *_REF through it was seen, which would constitute a
2785 no-trap region for same accesses.
2787 Size is needed to support 2 MEM_REFs of different types, like
2788 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2789 OEP_ADDRESS_OF. */
2790 struct ref_to_bb
2792 tree exp;
2793 HOST_WIDE_INT size;
2794 unsigned int phase;
2795 basic_block bb;
2798 /* Hashtable helpers. */
2800 struct refs_hasher : free_ptr_hash<ref_to_bb>
2802 static inline hashval_t hash (const ref_to_bb *);
2803 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2806 /* Used for quick clearing of the hash-table when we see calls.
2807 Hash entries with phase < nt_call_phase are invalid. */
2808 static unsigned int nt_call_phase;
2810 /* The hash function. */
2812 inline hashval_t
2813 refs_hasher::hash (const ref_to_bb *n)
2815 inchash::hash hstate;
2816 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2817 hstate.add_hwi (n->size);
2818 return hstate.end ();
2821 /* The equality function of *P1 and *P2. */
2823 inline bool
2824 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2826 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2827 && n1->size == n2->size;
2830 class nontrapping_dom_walker : public dom_walker
2832 public:
2833 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2834 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2837 virtual edge before_dom_children (basic_block);
2838 virtual void after_dom_children (basic_block);
2840 private:
2842 /* We see the expression EXP in basic block BB. If it's an interesting
2843 expression (an MEM_REF through an SSA_NAME) possibly insert the
2844 expression into the set NONTRAP or the hash table of seen expressions.
2845 STORE is true if this expression is on the LHS, otherwise it's on
2846 the RHS. */
2847 void add_or_mark_expr (basic_block, tree, bool);
2849 hash_set<tree> *m_nontrapping;
2851 /* The hash table for remembering what we've seen. */
2852 hash_table<refs_hasher> m_seen_refs;
2855 /* Called by walk_dominator_tree, when entering the block BB. */
2856 edge
2857 nontrapping_dom_walker::before_dom_children (basic_block bb)
2859 edge e;
2860 edge_iterator ei;
2861 gimple_stmt_iterator gsi;
2863 /* If we haven't seen all our predecessors, clear the hash-table. */
2864 FOR_EACH_EDGE (e, ei, bb->preds)
2865 if ((((size_t)e->src->aux) & 2) == 0)
2867 nt_call_phase++;
2868 break;
2871 /* Mark this BB as being on the path to dominator root and as visited. */
2872 bb->aux = (void*)(1 | 2);
2874 /* And walk the statements in order. */
2875 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2877 gimple *stmt = gsi_stmt (gsi);
2879 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2880 || (is_gimple_call (stmt)
2881 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2882 nt_call_phase++;
2883 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2885 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2886 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2889 return NULL;
2892 /* Called by walk_dominator_tree, when basic block BB is exited. */
2893 void
2894 nontrapping_dom_walker::after_dom_children (basic_block bb)
2896 /* This BB isn't on the path to dominator root anymore. */
2897 bb->aux = (void*)2;
2900 /* We see the expression EXP in basic block BB. If it's an interesting
2901 expression of:
2902 1) MEM_REF
2903 2) ARRAY_REF
2904 3) COMPONENT_REF
2905 possibly insert the expression into the set NONTRAP or the hash table
2906 of seen expressions. STORE is true if this expression is on the LHS,
2907 otherwise it's on the RHS. */
2908 void
2909 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2911 HOST_WIDE_INT size;
2913 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2914 || TREE_CODE (exp) == COMPONENT_REF)
2915 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2917 struct ref_to_bb map;
2918 ref_to_bb **slot;
2919 struct ref_to_bb *r2bb;
2920 basic_block found_bb = 0;
2922 if (!store)
2924 tree base = get_base_address (exp);
2925 /* Only record a LOAD of a local variable without address-taken, as
2926 the local stack is always writable. This allows cselim on a STORE
2927 with a dominating LOAD. */
2928 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2929 return;
2932 /* Try to find the last seen *_REF, which can trap. */
2933 map.exp = exp;
2934 map.size = size;
2935 slot = m_seen_refs.find_slot (&map, INSERT);
2936 r2bb = *slot;
2937 if (r2bb && r2bb->phase >= nt_call_phase)
2938 found_bb = r2bb->bb;
2940 /* If we've found a trapping *_REF, _and_ it dominates EXP
2941 (it's in a basic block on the path from us to the dominator root)
2942 then we can't trap. */
2943 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2945 m_nontrapping->add (exp);
2947 else
2949 /* EXP might trap, so insert it into the hash table. */
2950 if (r2bb)
2952 r2bb->phase = nt_call_phase;
2953 r2bb->bb = bb;
2955 else
2957 r2bb = XNEW (struct ref_to_bb);
2958 r2bb->phase = nt_call_phase;
2959 r2bb->bb = bb;
2960 r2bb->exp = exp;
2961 r2bb->size = size;
2962 *slot = r2bb;
2968 /* This is the entry point of gathering non trapping memory accesses.
2969 It will do a dominator walk over the whole function, and it will
2970 make use of the bb->aux pointers. It returns a set of trees
2971 (the MEM_REFs itself) which can't trap. */
2972 static hash_set<tree> *
2973 get_non_trapping (void)
2975 nt_call_phase = 0;
2976 hash_set<tree> *nontrap = new hash_set<tree>;
2978 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2979 .walk (cfun->cfg->x_entry_block_ptr);
2981 clear_aux_for_blocks ();
2982 return nontrap;
2985 /* Do the main work of conditional store replacement. We already know
2986 that the recognized pattern looks like so:
2988 split:
2989 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2990 MIDDLE_BB:
2991 something
2992 fallthrough (edge E0)
2993 JOIN_BB:
2994 some more
2996 We check that MIDDLE_BB contains only one store, that that store
2997 doesn't trap (not via NOTRAP, but via checking if an access to the same
2998 memory location dominates us, or the store is to a local addressable
2999 object) and that the store has a "simple" RHS. */
3001 static bool
3002 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
3003 edge e0, edge e1, hash_set<tree> *nontrap)
3005 gimple *assign = last_and_only_stmt (middle_bb);
3006 tree lhs, rhs, name, name2;
3007 gphi *newphi;
3008 gassign *new_stmt;
3009 gimple_stmt_iterator gsi;
3010 location_t locus;
3012 /* Check if middle_bb contains of only one store. */
3013 if (!assign
3014 || !gimple_assign_single_p (assign)
3015 || gimple_has_volatile_ops (assign))
3016 return false;
3018 /* And no PHI nodes so all uses in the single stmt are also
3019 available where we insert to. */
3020 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
3021 return false;
3023 locus = gimple_location (assign);
3024 lhs = gimple_assign_lhs (assign);
3025 rhs = gimple_assign_rhs1 (assign);
3026 if ((!REFERENCE_CLASS_P (lhs)
3027 && !DECL_P (lhs))
3028 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3029 return false;
3031 /* Prove that we can move the store down. We could also check
3032 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3033 whose value is not available readily, which we want to avoid. */
3034 if (!nontrap->contains (lhs))
3036 /* If LHS is an access to a local variable without address-taken
3037 (or when we allow data races) and known not to trap, we could
3038 always safely move down the store. */
3039 tree base = get_base_address (lhs);
3040 if (!auto_var_p (base)
3041 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
3042 || tree_could_trap_p (lhs))
3043 return false;
3046 /* Now we've checked the constraints, so do the transformation:
3047 1) Remove the single store. */
3048 gsi = gsi_for_stmt (assign);
3049 unlink_stmt_vdef (assign);
3050 gsi_remove (&gsi, true);
3051 release_defs (assign);
3053 /* Make both store and load use alias-set zero as we have to
3054 deal with the case of the store being a conditional change
3055 of the dynamic type. */
3056 lhs = unshare_expr (lhs);
3057 tree *basep = &lhs;
3058 while (handled_component_p (*basep))
3059 basep = &TREE_OPERAND (*basep, 0);
3060 if (TREE_CODE (*basep) == MEM_REF
3061 || TREE_CODE (*basep) == TARGET_MEM_REF)
3062 TREE_OPERAND (*basep, 1)
3063 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3064 else
3065 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3066 build_fold_addr_expr (*basep),
3067 build_zero_cst (ptr_type_node));
3069 /* 2) Insert a load from the memory of the store to the temporary
3070 on the edge which did not contain the store. */
3071 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3072 new_stmt = gimple_build_assign (name, lhs);
3073 gimple_set_location (new_stmt, locus);
3074 lhs = unshare_expr (lhs);
3076 /* Set the no-warning bit on the rhs of the load to avoid uninit
3077 warnings. */
3078 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3079 suppress_warning (rhs1, OPT_Wuninitialized);
3081 gsi_insert_on_edge (e1, new_stmt);
3083 /* 3) Create a PHI node at the join block, with one argument
3084 holding the old RHS, and the other holding the temporary
3085 where we stored the old memory contents. */
3086 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3087 newphi = create_phi_node (name2, join_bb);
3088 add_phi_arg (newphi, rhs, e0, locus);
3089 add_phi_arg (newphi, name, e1, locus);
3091 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3093 /* 4) Insert that PHI node. */
3094 gsi = gsi_after_labels (join_bb);
3095 if (gsi_end_p (gsi))
3097 gsi = gsi_last_bb (join_bb);
3098 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3100 else
3101 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3103 if (dump_file && (dump_flags & TDF_DETAILS))
3105 fprintf (dump_file, "\nConditional store replacement happened!");
3106 fprintf (dump_file, "\nReplaced the store with a load.");
3107 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3108 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3110 statistics_counter_event (cfun, "conditional store replacement", 1);
3112 return true;
3115 /* Do the main work of conditional store replacement. */
3117 static bool
3118 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3119 basic_block join_bb, gimple *then_assign,
3120 gimple *else_assign)
3122 tree lhs_base, lhs, then_rhs, else_rhs, name;
3123 location_t then_locus, else_locus;
3124 gimple_stmt_iterator gsi;
3125 gphi *newphi;
3126 gassign *new_stmt;
3128 if (then_assign == NULL
3129 || !gimple_assign_single_p (then_assign)
3130 || gimple_clobber_p (then_assign)
3131 || gimple_has_volatile_ops (then_assign)
3132 || else_assign == NULL
3133 || !gimple_assign_single_p (else_assign)
3134 || gimple_clobber_p (else_assign)
3135 || gimple_has_volatile_ops (else_assign))
3136 return false;
3138 lhs = gimple_assign_lhs (then_assign);
3139 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3140 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3141 return false;
3143 lhs_base = get_base_address (lhs);
3144 if (lhs_base == NULL_TREE
3145 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3146 return false;
3148 then_rhs = gimple_assign_rhs1 (then_assign);
3149 else_rhs = gimple_assign_rhs1 (else_assign);
3150 then_locus = gimple_location (then_assign);
3151 else_locus = gimple_location (else_assign);
3153 /* Now we've checked the constraints, so do the transformation:
3154 1) Remove the stores. */
3155 gsi = gsi_for_stmt (then_assign);
3156 unlink_stmt_vdef (then_assign);
3157 gsi_remove (&gsi, true);
3158 release_defs (then_assign);
3160 gsi = gsi_for_stmt (else_assign);
3161 unlink_stmt_vdef (else_assign);
3162 gsi_remove (&gsi, true);
3163 release_defs (else_assign);
3165 /* 2) Create a PHI node at the join block, with one argument
3166 holding the old RHS, and the other holding the temporary
3167 where we stored the old memory contents. */
3168 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3169 newphi = create_phi_node (name, join_bb);
3170 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3171 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3173 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3175 /* 3) Insert that PHI node. */
3176 gsi = gsi_after_labels (join_bb);
3177 if (gsi_end_p (gsi))
3179 gsi = gsi_last_bb (join_bb);
3180 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3182 else
3183 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3185 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3187 return true;
3190 /* Return the single store in BB with VDEF or NULL if there are
3191 other stores in the BB or loads following the store. */
3193 static gimple *
3194 single_trailing_store_in_bb (basic_block bb, tree vdef)
3196 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3197 return NULL;
3198 gimple *store = SSA_NAME_DEF_STMT (vdef);
3199 if (gimple_bb (store) != bb
3200 || gimple_code (store) == GIMPLE_PHI)
3201 return NULL;
3203 /* Verify there is no other store in this BB. */
3204 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3205 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3206 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3207 return NULL;
3209 /* Verify there is no load or store after the store. */
3210 use_operand_p use_p;
3211 imm_use_iterator imm_iter;
3212 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3213 if (USE_STMT (use_p) != store
3214 && gimple_bb (USE_STMT (use_p)) == bb)
3215 return NULL;
3217 return store;
3220 /* Conditional store replacement. We already know
3221 that the recognized pattern looks like so:
3223 split:
3224 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3225 THEN_BB:
3227 X = Y;
3229 goto JOIN_BB;
3230 ELSE_BB:
3232 X = Z;
3234 fallthrough (edge E0)
3235 JOIN_BB:
3236 some more
3238 We check that it is safe to sink the store to JOIN_BB by verifying that
3239 there are no read-after-write or write-after-write dependencies in
3240 THEN_BB and ELSE_BB. */
3242 static bool
3243 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3244 basic_block join_bb)
3246 vec<data_reference_p> then_datarefs, else_datarefs;
3247 vec<ddr_p> then_ddrs, else_ddrs;
3248 gimple *then_store, *else_store;
3249 bool found, ok = false, res;
3250 struct data_dependence_relation *ddr;
3251 data_reference_p then_dr, else_dr;
3252 int i, j;
3253 tree then_lhs, else_lhs;
3254 basic_block blocks[3];
3256 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3257 cheap enough to always handle as it allows us to elide dependence
3258 checking. */
3259 gphi *vphi = NULL;
3260 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3261 gsi_next (&si))
3262 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3264 vphi = si.phi ();
3265 break;
3267 if (!vphi)
3268 return false;
3269 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3270 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3271 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3272 if (then_assign)
3274 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3275 if (else_assign)
3276 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3277 then_assign, else_assign);
3280 /* If either vectorization or if-conversion is disabled then do
3281 not sink any stores. */
3282 if (param_max_stores_to_sink == 0
3283 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3284 || !flag_tree_loop_if_convert)
3285 return false;
3287 /* Find data references. */
3288 then_datarefs.create (1);
3289 else_datarefs.create (1);
3290 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3291 == chrec_dont_know)
3292 || !then_datarefs.length ()
3293 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3294 == chrec_dont_know)
3295 || !else_datarefs.length ())
3297 free_data_refs (then_datarefs);
3298 free_data_refs (else_datarefs);
3299 return false;
3302 /* Find pairs of stores with equal LHS. */
3303 auto_vec<gimple *, 1> then_stores, else_stores;
3304 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3306 if (DR_IS_READ (then_dr))
3307 continue;
3309 then_store = DR_STMT (then_dr);
3310 then_lhs = gimple_get_lhs (then_store);
3311 if (then_lhs == NULL_TREE)
3312 continue;
3313 found = false;
3315 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3317 if (DR_IS_READ (else_dr))
3318 continue;
3320 else_store = DR_STMT (else_dr);
3321 else_lhs = gimple_get_lhs (else_store);
3322 if (else_lhs == NULL_TREE)
3323 continue;
3325 if (operand_equal_p (then_lhs, else_lhs, 0))
3327 found = true;
3328 break;
3332 if (!found)
3333 continue;
3335 then_stores.safe_push (then_store);
3336 else_stores.safe_push (else_store);
3339 /* No pairs of stores found. */
3340 if (!then_stores.length ()
3341 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3343 free_data_refs (then_datarefs);
3344 free_data_refs (else_datarefs);
3345 return false;
3348 /* Compute and check data dependencies in both basic blocks. */
3349 then_ddrs.create (1);
3350 else_ddrs.create (1);
3351 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3352 vNULL, false)
3353 || !compute_all_dependences (else_datarefs, &else_ddrs,
3354 vNULL, false))
3356 free_dependence_relations (then_ddrs);
3357 free_dependence_relations (else_ddrs);
3358 free_data_refs (then_datarefs);
3359 free_data_refs (else_datarefs);
3360 return false;
3362 blocks[0] = then_bb;
3363 blocks[1] = else_bb;
3364 blocks[2] = join_bb;
3365 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3367 /* Check that there are no read-after-write or write-after-write dependencies
3368 in THEN_BB. */
3369 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3371 struct data_reference *dra = DDR_A (ddr);
3372 struct data_reference *drb = DDR_B (ddr);
3374 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3375 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3376 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3377 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3378 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3379 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3381 free_dependence_relations (then_ddrs);
3382 free_dependence_relations (else_ddrs);
3383 free_data_refs (then_datarefs);
3384 free_data_refs (else_datarefs);
3385 return false;
3389 /* Check that there are no read-after-write or write-after-write dependencies
3390 in ELSE_BB. */
3391 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3393 struct data_reference *dra = DDR_A (ddr);
3394 struct data_reference *drb = DDR_B (ddr);
3396 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3397 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3398 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3399 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3400 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3401 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3403 free_dependence_relations (then_ddrs);
3404 free_dependence_relations (else_ddrs);
3405 free_data_refs (then_datarefs);
3406 free_data_refs (else_datarefs);
3407 return false;
3411 /* Sink stores with same LHS. */
3412 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3414 else_store = else_stores[i];
3415 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3416 then_store, else_store);
3417 ok = ok || res;
3420 free_dependence_relations (then_ddrs);
3421 free_dependence_relations (else_ddrs);
3422 free_data_refs (then_datarefs);
3423 free_data_refs (else_datarefs);
3425 return ok;
3428 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3430 static bool
3431 local_mem_dependence (gimple *stmt, basic_block bb)
3433 tree vuse = gimple_vuse (stmt);
3434 gimple *def;
3436 if (!vuse)
3437 return false;
3439 def = SSA_NAME_DEF_STMT (vuse);
3440 return (def && gimple_bb (def) == bb);
3443 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3444 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3445 and BB3 rejoins control flow following BB1 and BB2, look for
3446 opportunities to hoist loads as follows. If BB3 contains a PHI of
3447 two loads, one each occurring in BB1 and BB2, and the loads are
3448 provably of adjacent fields in the same structure, then move both
3449 loads into BB0. Of course this can only be done if there are no
3450 dependencies preventing such motion.
3452 One of the hoisted loads will always be speculative, so the
3453 transformation is currently conservative:
3455 - The fields must be strictly adjacent.
3456 - The two fields must occupy a single memory block that is
3457 guaranteed to not cross a page boundary.
3459 The last is difficult to prove, as such memory blocks should be
3460 aligned on the minimum of the stack alignment boundary and the
3461 alignment guaranteed by heap allocation interfaces. Thus we rely
3462 on a parameter for the alignment value.
3464 Provided a good value is used for the last case, the first
3465 restriction could possibly be relaxed. */
3467 static void
3468 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3469 basic_block bb2, basic_block bb3)
3471 int param_align = param_l1_cache_line_size;
3472 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
3473 gphi_iterator gsi;
3475 /* Walk the phis in bb3 looking for an opportunity. We are looking
3476 for phis of two SSA names, one each of which is defined in bb1 and
3477 bb2. */
3478 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3480 gphi *phi_stmt = gsi.phi ();
3481 gimple *def1, *def2;
3482 tree arg1, arg2, ref1, ref2, field1, field2;
3483 tree tree_offset1, tree_offset2, tree_size2, next;
3484 int offset1, offset2, size2;
3485 unsigned align1;
3486 gimple_stmt_iterator gsi2;
3487 basic_block bb_for_def1, bb_for_def2;
3489 if (gimple_phi_num_args (phi_stmt) != 2
3490 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3491 continue;
3493 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3494 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3496 if (TREE_CODE (arg1) != SSA_NAME
3497 || TREE_CODE (arg2) != SSA_NAME
3498 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3499 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3500 continue;
3502 def1 = SSA_NAME_DEF_STMT (arg1);
3503 def2 = SSA_NAME_DEF_STMT (arg2);
3505 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3506 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3507 continue;
3509 /* Check the mode of the arguments to be sure a conditional move
3510 can be generated for it. */
3511 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3512 == CODE_FOR_nothing)
3513 continue;
3515 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3516 if (!gimple_assign_single_p (def1)
3517 || !gimple_assign_single_p (def2)
3518 || gimple_has_volatile_ops (def1)
3519 || gimple_has_volatile_ops (def2))
3520 continue;
3522 ref1 = gimple_assign_rhs1 (def1);
3523 ref2 = gimple_assign_rhs1 (def2);
3525 if (TREE_CODE (ref1) != COMPONENT_REF
3526 || TREE_CODE (ref2) != COMPONENT_REF)
3527 continue;
3529 /* The zeroth operand of the two component references must be
3530 identical. It is not sufficient to compare get_base_address of
3531 the two references, because this could allow for different
3532 elements of the same array in the two trees. It is not safe to
3533 assume that the existence of one array element implies the
3534 existence of a different one. */
3535 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3536 continue;
3538 field1 = TREE_OPERAND (ref1, 1);
3539 field2 = TREE_OPERAND (ref2, 1);
3541 /* Check for field adjacency, and ensure field1 comes first. */
3542 for (next = DECL_CHAIN (field1);
3543 next && TREE_CODE (next) != FIELD_DECL;
3544 next = DECL_CHAIN (next))
3547 if (next != field2)
3549 for (next = DECL_CHAIN (field2);
3550 next && TREE_CODE (next) != FIELD_DECL;
3551 next = DECL_CHAIN (next))
3554 if (next != field1)
3555 continue;
3557 std::swap (field1, field2);
3558 std::swap (def1, def2);
3561 bb_for_def1 = gimple_bb (def1);
3562 bb_for_def2 = gimple_bb (def2);
3564 /* Check for proper alignment of the first field. */
3565 tree_offset1 = bit_position (field1);
3566 tree_offset2 = bit_position (field2);
3567 tree_size2 = DECL_SIZE (field2);
3569 if (!tree_fits_uhwi_p (tree_offset1)
3570 || !tree_fits_uhwi_p (tree_offset2)
3571 || !tree_fits_uhwi_p (tree_size2))
3572 continue;
3574 offset1 = tree_to_uhwi (tree_offset1);
3575 offset2 = tree_to_uhwi (tree_offset2);
3576 size2 = tree_to_uhwi (tree_size2);
3577 align1 = DECL_ALIGN (field1) % param_align_bits;
3579 if (offset1 % BITS_PER_UNIT != 0)
3580 continue;
3582 /* For profitability, the two field references should fit within
3583 a single cache line. */
3584 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3585 continue;
3587 /* The two expressions cannot be dependent upon vdefs defined
3588 in bb1/bb2. */
3589 if (local_mem_dependence (def1, bb_for_def1)
3590 || local_mem_dependence (def2, bb_for_def2))
3591 continue;
3593 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3594 bb0. We hoist the first one first so that a cache miss is handled
3595 efficiently regardless of hardware cache-fill policy. */
3596 gsi2 = gsi_for_stmt (def1);
3597 gsi_move_to_bb_end (&gsi2, bb0);
3598 gsi2 = gsi_for_stmt (def2);
3599 gsi_move_to_bb_end (&gsi2, bb0);
3600 statistics_counter_event (cfun, "hoisted loads", 1);
3602 if (dump_file && (dump_flags & TDF_DETAILS))
3604 fprintf (dump_file,
3605 "\nHoisting adjacent loads from %d and %d into %d: \n",
3606 bb_for_def1->index, bb_for_def2->index, bb0->index);
3607 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3608 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3613 /* Determine whether we should attempt to hoist adjacent loads out of
3614 diamond patterns in pass_phiopt. Always hoist loads if
3615 -fhoist-adjacent-loads is specified and the target machine has
3616 both a conditional move instruction and a defined cache line size. */
3618 static bool
3619 gate_hoist_loads (void)
3621 return (flag_hoist_adjacent_loads == 1
3622 && param_l1_cache_line_size
3623 && HAVE_conditional_move);
3626 /* This pass tries to replaces an if-then-else block with an
3627 assignment. We have four kinds of transformations. Some of these
3628 transformations are also performed by the ifcvt RTL optimizer.
3630 Conditional Replacement
3631 -----------------------
3633 This transformation, implemented in match_simplify_replacement,
3634 replaces
3636 bb0:
3637 if (cond) goto bb2; else goto bb1;
3638 bb1:
3639 bb2:
3640 x = PHI <0 (bb1), 1 (bb0), ...>;
3642 with
3644 bb0:
3645 x' = cond;
3646 goto bb2;
3647 bb2:
3648 x = PHI <x' (bb0), ...>;
3650 We remove bb1 as it becomes unreachable. This occurs often due to
3651 gimplification of conditionals.
3653 Value Replacement
3654 -----------------
3656 This transformation, implemented in value_replacement, replaces
3658 bb0:
3659 if (a != b) goto bb2; else goto bb1;
3660 bb1:
3661 bb2:
3662 x = PHI <a (bb1), b (bb0), ...>;
3664 with
3666 bb0:
3667 bb2:
3668 x = PHI <b (bb0), ...>;
3670 This opportunity can sometimes occur as a result of other
3671 optimizations.
3674 Another case caught by value replacement looks like this:
3676 bb0:
3677 t1 = a == CONST;
3678 t2 = b > c;
3679 t3 = t1 & t2;
3680 if (t3 != 0) goto bb1; else goto bb2;
3681 bb1:
3682 bb2:
3683 x = PHI (CONST, a)
3685 Gets replaced with:
3686 bb0:
3687 bb2:
3688 t1 = a == CONST;
3689 t2 = b > c;
3690 t3 = t1 & t2;
3691 x = a;
3693 ABS Replacement
3694 ---------------
3696 This transformation, implemented in match_simplify_replacement, replaces
3698 bb0:
3699 if (a >= 0) goto bb2; else goto bb1;
3700 bb1:
3701 x = -a;
3702 bb2:
3703 x = PHI <x (bb1), a (bb0), ...>;
3705 with
3707 bb0:
3708 x' = ABS_EXPR< a >;
3709 bb2:
3710 x = PHI <x' (bb0), ...>;
3712 MIN/MAX Replacement
3713 -------------------
3715 This transformation, minmax_replacement replaces
3717 bb0:
3718 if (a <= b) goto bb2; else goto bb1;
3719 bb1:
3720 bb2:
3721 x = PHI <b (bb1), a (bb0), ...>;
3723 with
3725 bb0:
3726 x' = MIN_EXPR (a, b)
3727 bb2:
3728 x = PHI <x' (bb0), ...>;
3730 A similar transformation is done for MAX_EXPR.
3733 This pass also performs a fifth transformation of a slightly different
3734 flavor.
3736 Factor conversion in COND_EXPR
3737 ------------------------------
3739 This transformation factors the conversion out of COND_EXPR with
3740 factor_out_conditional_conversion.
3742 For example:
3743 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3744 <bb 3>:
3745 tmp = (int) a;
3746 <bb 4>:
3747 tmp = PHI <tmp, CST>
3749 Into:
3750 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3751 <bb 3>:
3752 <bb 4>:
3753 a = PHI <a, CST>
3754 tmp = (int) a;
3756 Adjacent Load Hoisting
3757 ----------------------
3759 This transformation replaces
3761 bb0:
3762 if (...) goto bb2; else goto bb1;
3763 bb1:
3764 x1 = (<expr>).field1;
3765 goto bb3;
3766 bb2:
3767 x2 = (<expr>).field2;
3768 bb3:
3769 # x = PHI <x1, x2>;
3771 with
3773 bb0:
3774 x1 = (<expr>).field1;
3775 x2 = (<expr>).field2;
3776 if (...) goto bb2; else goto bb1;
3777 bb1:
3778 goto bb3;
3779 bb2:
3780 bb3:
3781 # x = PHI <x1, x2>;
3783 The purpose of this transformation is to enable generation of conditional
3784 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3785 the loads is speculative, the transformation is restricted to very
3786 specific cases to avoid introducing a page fault. We are looking for
3787 the common idiom:
3789 if (...)
3790 x = y->left;
3791 else
3792 x = y->right;
3794 where left and right are typically adjacent pointers in a tree structure. */
3796 namespace {
3798 const pass_data pass_data_phiopt =
3800 GIMPLE_PASS, /* type */
3801 "phiopt", /* name */
3802 OPTGROUP_NONE, /* optinfo_flags */
3803 TV_TREE_PHIOPT, /* tv_id */
3804 ( PROP_cfg | PROP_ssa ), /* properties_required */
3805 0, /* properties_provided */
3806 0, /* properties_destroyed */
3807 0, /* todo_flags_start */
3808 0, /* todo_flags_finish */
3811 class pass_phiopt : public gimple_opt_pass
3813 public:
3814 pass_phiopt (gcc::context *ctxt)
3815 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3818 /* opt_pass methods: */
3819 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3820 void set_pass_param (unsigned n, bool param)
3822 gcc_assert (n == 0);
3823 early_p = param;
3825 virtual bool gate (function *) { return flag_ssa_phiopt; }
3826 virtual unsigned int execute (function *)
3828 return tree_ssa_phiopt_worker (false,
3829 !early_p ? gate_hoist_loads () : false,
3830 early_p);
3833 private:
3834 bool early_p;
3835 }; // class pass_phiopt
3837 } // anon namespace
3839 gimple_opt_pass *
3840 make_pass_phiopt (gcc::context *ctxt)
3842 return new pass_phiopt (ctxt);
3845 namespace {
3847 const pass_data pass_data_cselim =
3849 GIMPLE_PASS, /* type */
3850 "cselim", /* name */
3851 OPTGROUP_NONE, /* optinfo_flags */
3852 TV_TREE_PHIOPT, /* tv_id */
3853 ( PROP_cfg | PROP_ssa ), /* properties_required */
3854 0, /* properties_provided */
3855 0, /* properties_destroyed */
3856 0, /* todo_flags_start */
3857 0, /* todo_flags_finish */
3860 class pass_cselim : public gimple_opt_pass
3862 public:
3863 pass_cselim (gcc::context *ctxt)
3864 : gimple_opt_pass (pass_data_cselim, ctxt)
3867 /* opt_pass methods: */
3868 virtual bool gate (function *) { return flag_tree_cselim; }
3869 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3871 }; // class pass_cselim
3873 } // anon namespace
3875 gimple_opt_pass *
3876 make_pass_cselim (gcc::context *ctxt)
3878 return new pass_cselim (ctxt);