testsuite: Correct vec-rlmi-rlnm.c testsuite expected result
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobaa6bb88848b92c7434752d971ef590133f572aa4
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
48 #include "tree-eh.h"
49 #include "gimple-fold.h"
50 #include "internal-fn.h"
52 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
53 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
54 tree, tree);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gphi *, tree, tree);
57 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
58 gimple *);
59 static int value_replacement (basic_block, basic_block,
60 edge, edge, gimple *, tree, tree);
61 static bool minmax_replacement (basic_block, basic_block,
62 edge, edge, gimple *, tree, tree);
63 static bool abs_replacement (basic_block, basic_block,
64 edge, edge, gimple *, tree, tree);
65 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
66 edge, edge, gimple *,
67 tree, tree);
68 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
69 hash_set<tree> *);
70 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
71 static hash_set<tree> * get_non_trapping ();
72 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
73 static void hoist_adjacent_loads (basic_block, basic_block,
74 basic_block, basic_block);
75 static bool gate_hoist_loads (void);
77 /* This pass tries to transform conditional stores into unconditional
78 ones, enabling further simplifications with the simpler then and else
79 blocks. In particular it replaces this:
81 bb0:
82 if (cond) goto bb2; else goto bb1;
83 bb1:
84 *p = RHS;
85 bb2:
87 with
89 bb0:
90 if (cond) goto bb1; else goto bb2;
91 bb1:
92 condtmp' = *p;
93 bb2:
94 condtmp = PHI <RHS, condtmp'>
95 *p = condtmp;
97 This transformation can only be done under several constraints,
98 documented below. It also replaces:
100 bb0:
101 if (cond) goto bb2; else goto bb1;
102 bb1:
103 *p = RHS1;
104 goto bb3;
105 bb2:
106 *p = RHS2;
107 bb3:
109 with
111 bb0:
112 if (cond) goto bb3; else goto bb1;
113 bb1:
114 bb3:
115 condtmp = PHI <RHS1, RHS2>
116 *p = condtmp; */
118 static unsigned int
119 tree_ssa_cs_elim (void)
121 unsigned todo;
122 /* ??? We are not interested in loop related info, but the following
123 will create it, ICEing as we didn't init loops with pre-headers.
124 An interfacing issue of find_data_references_in_bb. */
125 loop_optimizer_init (LOOPS_NORMAL);
126 scev_initialize ();
127 todo = tree_ssa_phiopt_worker (true, false, false);
128 scev_finalize ();
129 loop_optimizer_finalize ();
130 return todo;
133 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
135 static gphi *
136 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
138 gimple_stmt_iterator i;
139 gphi *phi = NULL;
140 if (gimple_seq_singleton_p (seq))
141 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
142 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
144 gphi *p = as_a <gphi *> (gsi_stmt (i));
145 /* If the PHI arguments are equal then we can skip this PHI. */
146 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
147 gimple_phi_arg_def (p, e1->dest_idx)))
148 continue;
150 /* If we already have a PHI that has the two edge arguments are
151 different, then return it is not a singleton for these PHIs. */
152 if (phi)
153 return NULL;
155 phi = p;
157 return phi;
160 /* The core routine of conditional store replacement and normal
161 phi optimizations. Both share much of the infrastructure in how
162 to match applicable basic block patterns. DO_STORE_ELIM is true
163 when we want to do conditional store replacement, false otherwise.
164 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
165 of diamond control flow patterns, false otherwise. */
166 static unsigned int
167 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
169 basic_block bb;
170 basic_block *bb_order;
171 unsigned n, i;
172 bool cfgchanged = false;
173 hash_set<tree> *nontrap = 0;
175 if (do_store_elim)
176 /* Calculate the set of non-trapping memory accesses. */
177 nontrap = get_non_trapping ();
179 /* Search every basic block for COND_EXPR we may be able to optimize.
181 We walk the blocks in order that guarantees that a block with
182 a single predecessor is processed before the predecessor.
183 This ensures that we collapse inner ifs before visiting the
184 outer ones, and also that we do not try to visit a removed
185 block. */
186 bb_order = single_pred_before_succ_order ();
187 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
189 for (i = 0; i < n; i++)
191 gimple *cond_stmt;
192 gphi *phi;
193 basic_block bb1, bb2;
194 edge e1, e2;
195 tree arg0, arg1;
197 bb = bb_order[i];
199 cond_stmt = last_stmt (bb);
200 /* Check to see if the last statement is a GIMPLE_COND. */
201 if (!cond_stmt
202 || gimple_code (cond_stmt) != GIMPLE_COND)
203 continue;
205 e1 = EDGE_SUCC (bb, 0);
206 bb1 = e1->dest;
207 e2 = EDGE_SUCC (bb, 1);
208 bb2 = e2->dest;
210 /* We cannot do the optimization on abnormal edges. */
211 if ((e1->flags & EDGE_ABNORMAL) != 0
212 || (e2->flags & EDGE_ABNORMAL) != 0)
213 continue;
215 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
216 if (EDGE_COUNT (bb1->succs) == 0
217 || bb2 == NULL
218 || EDGE_COUNT (bb2->succs) == 0)
219 continue;
221 /* Find the bb which is the fall through to the other. */
222 if (EDGE_SUCC (bb1, 0)->dest == bb2)
224 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
226 std::swap (bb1, bb2);
227 std::swap (e1, e2);
229 else if (do_store_elim
230 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
232 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
234 if (!single_succ_p (bb1)
235 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
236 || !single_succ_p (bb2)
237 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
238 || EDGE_COUNT (bb3->preds) != 2)
239 continue;
240 if (cond_if_else_store_replacement (bb1, bb2, bb3))
241 cfgchanged = true;
242 continue;
244 else if (do_hoist_loads
245 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
247 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
249 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
250 && single_succ_p (bb1)
251 && single_succ_p (bb2)
252 && single_pred_p (bb1)
253 && single_pred_p (bb2)
254 && EDGE_COUNT (bb->succs) == 2
255 && EDGE_COUNT (bb3->preds) == 2
256 /* If one edge or the other is dominant, a conditional move
257 is likely to perform worse than the well-predicted branch. */
258 && !predictable_edge_p (EDGE_SUCC (bb, 0))
259 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
260 hoist_adjacent_loads (bb, bb1, bb2, bb3);
261 continue;
263 else
264 continue;
266 e1 = EDGE_SUCC (bb1, 0);
268 /* Make sure that bb1 is just a fall through. */
269 if (!single_succ_p (bb1)
270 || (e1->flags & EDGE_FALLTHRU) == 0)
271 continue;
273 /* Also make sure that bb1 only have one predecessor and that it
274 is bb. */
275 if (!single_pred_p (bb1)
276 || single_pred (bb1) != bb)
277 continue;
279 if (do_store_elim)
281 /* bb1 is the middle block, bb2 the join block, bb the split block,
282 e1 the fallthrough edge from bb1 to bb2. We can't do the
283 optimization if the join block has more than two predecessors. */
284 if (EDGE_COUNT (bb2->preds) > 2)
285 continue;
286 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
287 cfgchanged = true;
289 else
291 gimple_seq phis = phi_nodes (bb2);
292 gimple_stmt_iterator gsi;
293 bool candorest = true;
295 /* Value replacement can work with more than one PHI
296 so try that first. */
297 if (!early_p)
298 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
300 phi = as_a <gphi *> (gsi_stmt (gsi));
301 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
302 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
303 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
305 candorest = false;
306 cfgchanged = true;
307 break;
311 if (!candorest)
312 continue;
314 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
315 if (!phi)
316 continue;
318 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
319 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
321 /* Something is wrong if we cannot find the arguments in the PHI
322 node. */
323 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
325 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
326 arg0, arg1,
327 cond_stmt);
328 if (newphi != NULL)
330 phi = newphi;
331 /* factor_out_conditional_conversion may create a new PHI in
332 BB2 and eliminate an existing PHI in BB2. Recompute values
333 that may be affected by that change. */
334 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
335 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
336 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
339 /* Do the replacement of conditional if it can be done. */
340 if (two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
341 cfgchanged = true;
342 else if (!early_p
343 && conditional_replacement (bb, bb1, e1, e2, phi,
344 arg0, arg1))
345 cfgchanged = true;
346 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
347 cfgchanged = true;
348 else if (!early_p
349 && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
350 e2, phi, arg0,
351 arg1))
352 cfgchanged = true;
353 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
354 cfgchanged = true;
358 free (bb_order);
360 if (do_store_elim)
361 delete nontrap;
362 /* If the CFG has changed, we should cleanup the CFG. */
363 if (cfgchanged && do_store_elim)
365 /* In cond-store replacement we have added some loads on edges
366 and new VOPS (as we moved the store, and created a load). */
367 gsi_commit_edge_inserts ();
368 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
370 else if (cfgchanged)
371 return TODO_cleanup_cfg;
372 return 0;
375 /* Replace PHI node element whose edge is E in block BB with variable NEW.
376 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
377 is known to have two edges, one of which must reach BB). */
379 static void
380 replace_phi_edge_with_variable (basic_block cond_block,
381 edge e, gimple *phi, tree new_tree)
383 basic_block bb = gimple_bb (phi);
384 basic_block block_to_remove;
385 gimple_stmt_iterator gsi;
387 /* Change the PHI argument to new. */
388 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
390 /* Remove the empty basic block. */
391 if (EDGE_SUCC (cond_block, 0)->dest == bb)
393 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
394 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
395 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
397 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
399 else
401 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
402 EDGE_SUCC (cond_block, 1)->flags
403 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
404 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
406 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
408 delete_basic_block (block_to_remove);
410 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
411 gsi = gsi_last_bb (cond_block);
412 gsi_remove (&gsi, true);
414 if (dump_file && (dump_flags & TDF_DETAILS))
415 fprintf (dump_file,
416 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
417 cond_block->index,
418 bb->index);
421 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
422 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
423 to the result of PHI stmt. COND_STMT is the controlling predicate.
424 Return the newly-created PHI, if any. */
426 static gphi *
427 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
428 tree arg0, tree arg1, gimple *cond_stmt)
430 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
431 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
432 tree temp, result;
433 gphi *newphi;
434 gimple_stmt_iterator gsi, gsi_for_def;
435 location_t locus = gimple_location (phi);
436 enum tree_code convert_code;
438 /* Handle only PHI statements with two arguments. TODO: If all
439 other arguments to PHI are INTEGER_CST or if their defining
440 statement have the same unary operation, we can handle more
441 than two arguments too. */
442 if (gimple_phi_num_args (phi) != 2)
443 return NULL;
445 /* First canonicalize to simplify tests. */
446 if (TREE_CODE (arg0) != SSA_NAME)
448 std::swap (arg0, arg1);
449 std::swap (e0, e1);
452 if (TREE_CODE (arg0) != SSA_NAME
453 || (TREE_CODE (arg1) != SSA_NAME
454 && TREE_CODE (arg1) != INTEGER_CST))
455 return NULL;
457 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
458 a conversion. */
459 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
460 if (!gimple_assign_cast_p (arg0_def_stmt))
461 return NULL;
463 /* Use the RHS as new_arg0. */
464 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
465 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
466 if (convert_code == VIEW_CONVERT_EXPR)
468 new_arg0 = TREE_OPERAND (new_arg0, 0);
469 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
470 return NULL;
473 if (TREE_CODE (arg1) == SSA_NAME)
475 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
476 is a conversion. */
477 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
478 if (!is_gimple_assign (arg1_def_stmt)
479 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
480 return NULL;
482 /* Use the RHS as new_arg1. */
483 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
484 if (convert_code == VIEW_CONVERT_EXPR)
485 new_arg1 = TREE_OPERAND (new_arg1, 0);
487 else
489 /* If arg1 is an INTEGER_CST, fold it to new type. */
490 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
491 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
493 if (gimple_assign_cast_p (arg0_def_stmt))
495 /* For the INTEGER_CST case, we are just moving the
496 conversion from one place to another, which can often
497 hurt as the conversion moves further away from the
498 statement that computes the value. So, perform this
499 only if new_arg0 is an operand of COND_STMT, or
500 if arg0_def_stmt is the only non-debug stmt in
501 its basic block, because then it is possible this
502 could enable further optimizations (minmax replacement
503 etc.). See PR71016. */
504 if (new_arg0 != gimple_cond_lhs (cond_stmt)
505 && new_arg0 != gimple_cond_rhs (cond_stmt)
506 && gimple_bb (arg0_def_stmt) == e0->src)
508 gsi = gsi_for_stmt (arg0_def_stmt);
509 gsi_prev_nondebug (&gsi);
510 if (!gsi_end_p (gsi))
512 if (gassign *assign
513 = dyn_cast <gassign *> (gsi_stmt (gsi)))
515 tree lhs = gimple_assign_lhs (assign);
516 enum tree_code ass_code
517 = gimple_assign_rhs_code (assign);
518 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
519 return NULL;
520 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
521 return NULL;
522 gsi_prev_nondebug (&gsi);
523 if (!gsi_end_p (gsi))
524 return NULL;
526 else
527 return NULL;
529 gsi = gsi_for_stmt (arg0_def_stmt);
530 gsi_next_nondebug (&gsi);
531 if (!gsi_end_p (gsi))
532 return NULL;
534 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
536 else
537 return NULL;
539 else
540 return NULL;
543 /* If arg0/arg1 have > 1 use, then this transformation actually increases
544 the number of expressions evaluated at runtime. */
545 if (!has_single_use (arg0)
546 || (arg1_def_stmt && !has_single_use (arg1)))
547 return NULL;
549 /* If types of new_arg0 and new_arg1 are different bailout. */
550 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
551 return NULL;
553 /* Create a new PHI stmt. */
554 result = PHI_RESULT (phi);
555 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
556 newphi = create_phi_node (temp, gimple_bb (phi));
558 if (dump_file && (dump_flags & TDF_DETAILS))
560 fprintf (dump_file, "PHI ");
561 print_generic_expr (dump_file, gimple_phi_result (phi));
562 fprintf (dump_file,
563 " changed to factor conversion out from COND_EXPR.\n");
564 fprintf (dump_file, "New stmt with CAST that defines ");
565 print_generic_expr (dump_file, result);
566 fprintf (dump_file, ".\n");
569 /* Remove the old cast(s) that has single use. */
570 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
571 gsi_remove (&gsi_for_def, true);
572 release_defs (arg0_def_stmt);
574 if (arg1_def_stmt)
576 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
577 gsi_remove (&gsi_for_def, true);
578 release_defs (arg1_def_stmt);
581 add_phi_arg (newphi, new_arg0, e0, locus);
582 add_phi_arg (newphi, new_arg1, e1, locus);
584 /* Create the conversion stmt and insert it. */
585 if (convert_code == VIEW_CONVERT_EXPR)
587 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
588 new_stmt = gimple_build_assign (result, temp);
590 else
591 new_stmt = gimple_build_assign (result, convert_code, temp);
592 gsi = gsi_after_labels (gimple_bb (phi));
593 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
595 /* Remove the original PHI stmt. */
596 gsi = gsi_for_stmt (phi);
597 gsi_remove (&gsi, true);
598 return newphi;
601 /* Optimize
602 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
603 if (x_5 op cstN) # where op is == or != and N is 1 or 2
604 goto bb3;
605 else
606 goto bb4;
607 bb3:
608 bb4:
609 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
611 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
612 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
613 of cst3 and cst4 is smaller. */
615 static bool
616 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
617 edge e1, gphi *phi, tree arg0, tree arg1)
619 /* Only look for adjacent integer constants. */
620 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
621 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
622 || TREE_CODE (arg0) != INTEGER_CST
623 || TREE_CODE (arg1) != INTEGER_CST
624 || (tree_int_cst_lt (arg0, arg1)
625 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
626 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
627 return false;
629 if (!empty_block_p (middle_bb))
630 return false;
632 gimple *stmt = last_stmt (cond_bb);
633 tree lhs = gimple_cond_lhs (stmt);
634 tree rhs = gimple_cond_rhs (stmt);
636 if (TREE_CODE (lhs) != SSA_NAME
637 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
638 || TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
639 || TREE_CODE (rhs) != INTEGER_CST)
640 return false;
642 switch (gimple_cond_code (stmt))
644 case EQ_EXPR:
645 case NE_EXPR:
646 break;
647 default:
648 return false;
651 wide_int min, max;
652 if (get_range_info (lhs, &min, &max) != VR_RANGE
653 || min + 1 != max
654 || (wi::to_wide (rhs) != min
655 && wi::to_wide (rhs) != max))
656 return false;
658 /* We need to know which is the true edge and which is the false
659 edge so that we know when to invert the condition below. */
660 edge true_edge, false_edge;
661 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
662 if ((gimple_cond_code (stmt) == EQ_EXPR)
663 ^ (wi::to_wide (rhs) == max)
664 ^ (e1 == false_edge))
665 std::swap (arg0, arg1);
667 tree type;
668 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
670 /* Avoid performing the arithmetics in bool type which has different
671 semantics, otherwise prefer unsigned types from the two with
672 the same precision. */
673 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
674 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
675 type = TREE_TYPE (lhs);
676 else
677 type = TREE_TYPE (arg0);
679 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
680 type = TREE_TYPE (lhs);
681 else
682 type = TREE_TYPE (arg0);
684 min = wide_int::from (min, TYPE_PRECISION (type),
685 TYPE_SIGN (TREE_TYPE (lhs)));
686 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
687 TYPE_SIGN (TREE_TYPE (arg0)));
688 enum tree_code code;
689 wi::overflow_type ovf;
690 if (tree_int_cst_lt (arg0, arg1))
692 code = PLUS_EXPR;
693 a -= min;
694 if (!TYPE_UNSIGNED (type))
696 /* lhs is known to be in range [min, min+1] and we want to add a
697 to it. Check if that operation can overflow for those 2 values
698 and if yes, force unsigned type. */
699 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
700 if (ovf)
701 type = unsigned_type_for (type);
704 else
706 code = MINUS_EXPR;
707 a += min;
708 if (!TYPE_UNSIGNED (type))
710 /* lhs is known to be in range [min, min+1] and we want to subtract
711 it from a. Check if that operation can overflow for those 2
712 values and if yes, force unsigned type. */
713 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
714 if (ovf)
715 type = unsigned_type_for (type);
719 tree arg = wide_int_to_tree (type, a);
720 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
721 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
722 lhs = gimplify_build1 (&gsi, NOP_EXPR, type, lhs);
723 tree new_rhs;
724 if (code == PLUS_EXPR)
725 new_rhs = gimplify_build2 (&gsi, PLUS_EXPR, type, lhs, arg);
726 else
727 new_rhs = gimplify_build2 (&gsi, MINUS_EXPR, type, arg, lhs);
728 if (!useless_type_conversion_p (TREE_TYPE (arg0), type))
729 new_rhs = gimplify_build1 (&gsi, NOP_EXPR, TREE_TYPE (arg0), new_rhs);
731 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
733 /* Note that we optimized this PHI. */
734 return true;
737 /* The function conditional_replacement does the main work of doing the
738 conditional replacement. Return true if the replacement is done.
739 Otherwise return false.
740 BB is the basic block where the replacement is going to be done on. ARG0
741 is argument 0 from PHI. Likewise for ARG1. */
743 static bool
744 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
745 edge e0, edge e1, gphi *phi,
746 tree arg0, tree arg1)
748 tree result;
749 gimple *stmt;
750 gassign *new_stmt;
751 tree cond;
752 gimple_stmt_iterator gsi;
753 edge true_edge, false_edge;
754 tree new_var, new_var2;
755 bool neg;
757 /* FIXME: Gimplification of complex type is too hard for now. */
758 /* We aren't prepared to handle vectors either (and it is a question
759 if it would be worthwhile anyway). */
760 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
761 || POINTER_TYPE_P (TREE_TYPE (arg0)))
762 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
763 || POINTER_TYPE_P (TREE_TYPE (arg1))))
764 return false;
766 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
767 convert it to the conditional. */
768 if ((integer_zerop (arg0) && integer_onep (arg1))
769 || (integer_zerop (arg1) && integer_onep (arg0)))
770 neg = false;
771 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
772 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
773 neg = true;
774 else
775 return false;
777 if (!empty_block_p (middle_bb))
778 return false;
780 /* At this point we know we have a GIMPLE_COND with two successors.
781 One successor is BB, the other successor is an empty block which
782 falls through into BB.
784 There is a single PHI node at the join point (BB) and its arguments
785 are constants (0, 1) or (0, -1).
787 So, given the condition COND, and the two PHI arguments, we can
788 rewrite this PHI into non-branching code:
790 dest = (COND) or dest = COND'
792 We use the condition as-is if the argument associated with the
793 true edge has the value one or the argument associated with the
794 false edge as the value zero. Note that those conditions are not
795 the same since only one of the outgoing edges from the GIMPLE_COND
796 will directly reach BB and thus be associated with an argument. */
798 stmt = last_stmt (cond_bb);
799 result = PHI_RESULT (phi);
801 /* To handle special cases like floating point comparison, it is easier and
802 less error-prone to build a tree and gimplify it on the fly though it is
803 less efficient. */
804 cond = fold_build2_loc (gimple_location (stmt),
805 gimple_cond_code (stmt), boolean_type_node,
806 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
808 /* We need to know which is the true edge and which is the false
809 edge so that we know when to invert the condition below. */
810 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
811 if ((e0 == true_edge && integer_zerop (arg0))
812 || (e0 == false_edge && !integer_zerop (arg0))
813 || (e1 == true_edge && integer_zerop (arg1))
814 || (e1 == false_edge && !integer_zerop (arg1)))
815 cond = fold_build1_loc (gimple_location (stmt),
816 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
818 if (neg)
820 cond = fold_convert_loc (gimple_location (stmt),
821 TREE_TYPE (result), cond);
822 cond = fold_build1_loc (gimple_location (stmt),
823 NEGATE_EXPR, TREE_TYPE (cond), cond);
826 /* Insert our new statements at the end of conditional block before the
827 COND_STMT. */
828 gsi = gsi_for_stmt (stmt);
829 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
830 GSI_SAME_STMT);
832 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
834 location_t locus_0, locus_1;
836 new_var2 = make_ssa_name (TREE_TYPE (result));
837 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
838 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
839 new_var = new_var2;
841 /* Set the locus to the first argument, unless is doesn't have one. */
842 locus_0 = gimple_phi_arg_location (phi, 0);
843 locus_1 = gimple_phi_arg_location (phi, 1);
844 if (locus_0 == UNKNOWN_LOCATION)
845 locus_0 = locus_1;
846 gimple_set_location (new_stmt, locus_0);
849 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
851 /* Note that we optimized this PHI. */
852 return true;
855 /* Update *ARG which is defined in STMT so that it contains the
856 computed value if that seems profitable. Return true if the
857 statement is made dead by that rewriting. */
859 static bool
860 jump_function_from_stmt (tree *arg, gimple *stmt)
862 enum tree_code code = gimple_assign_rhs_code (stmt);
863 if (code == ADDR_EXPR)
865 /* For arg = &p->i transform it to p, if possible. */
866 tree rhs1 = gimple_assign_rhs1 (stmt);
867 poly_int64 offset;
868 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
869 &offset);
870 if (tem
871 && TREE_CODE (tem) == MEM_REF
872 && known_eq (mem_ref_offset (tem) + offset, 0))
874 *arg = TREE_OPERAND (tem, 0);
875 return true;
878 /* TODO: Much like IPA-CP jump-functions we want to handle constant
879 additions symbolically here, and we'd need to update the comparison
880 code that compares the arg + cst tuples in our caller. For now the
881 code above exactly handles the VEC_BASE pattern from vec.h. */
882 return false;
885 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
886 of the form SSA_NAME NE 0.
888 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
889 the two input values of the EQ_EXPR match arg0 and arg1.
891 If so update *code and return TRUE. Otherwise return FALSE. */
893 static bool
894 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
895 enum tree_code *code, const_tree rhs)
897 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
898 statement. */
899 if (TREE_CODE (rhs) == SSA_NAME)
901 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
903 /* Verify the defining statement has an EQ_EXPR on the RHS. */
904 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
906 /* Finally verify the source operands of the EQ_EXPR are equal
907 to arg0 and arg1. */
908 tree op0 = gimple_assign_rhs1 (def1);
909 tree op1 = gimple_assign_rhs2 (def1);
910 if ((operand_equal_for_phi_arg_p (arg0, op0)
911 && operand_equal_for_phi_arg_p (arg1, op1))
912 || (operand_equal_for_phi_arg_p (arg0, op1)
913 && operand_equal_for_phi_arg_p (arg1, op0)))
915 /* We will perform the optimization. */
916 *code = gimple_assign_rhs_code (def1);
917 return true;
921 return false;
924 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
926 Also return TRUE if arg0/arg1 are equal to the source arguments of a
927 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
929 Return FALSE otherwise. */
931 static bool
932 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
933 enum tree_code *code, gimple *cond)
935 gimple *def;
936 tree lhs = gimple_cond_lhs (cond);
937 tree rhs = gimple_cond_rhs (cond);
939 if ((operand_equal_for_phi_arg_p (arg0, lhs)
940 && operand_equal_for_phi_arg_p (arg1, rhs))
941 || (operand_equal_for_phi_arg_p (arg1, lhs)
942 && operand_equal_for_phi_arg_p (arg0, rhs)))
943 return true;
945 /* Now handle more complex case where we have an EQ comparison
946 which feeds a BIT_AND_EXPR which feeds COND.
948 First verify that COND is of the form SSA_NAME NE 0. */
949 if (*code != NE_EXPR || !integer_zerop (rhs)
950 || TREE_CODE (lhs) != SSA_NAME)
951 return false;
953 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
954 def = SSA_NAME_DEF_STMT (lhs);
955 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
956 return false;
958 /* Now verify arg0/arg1 correspond to the source arguments of an
959 EQ comparison feeding the BIT_AND_EXPR. */
961 tree tmp = gimple_assign_rhs1 (def);
962 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
963 return true;
965 tmp = gimple_assign_rhs2 (def);
966 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
967 return true;
969 return false;
972 /* Returns true if ARG is a neutral element for operation CODE
973 on the RIGHT side. */
975 static bool
976 neutral_element_p (tree_code code, tree arg, bool right)
978 switch (code)
980 case PLUS_EXPR:
981 case BIT_IOR_EXPR:
982 case BIT_XOR_EXPR:
983 return integer_zerop (arg);
985 case LROTATE_EXPR:
986 case RROTATE_EXPR:
987 case LSHIFT_EXPR:
988 case RSHIFT_EXPR:
989 case MINUS_EXPR:
990 case POINTER_PLUS_EXPR:
991 return right && integer_zerop (arg);
993 case MULT_EXPR:
994 return integer_onep (arg);
996 case TRUNC_DIV_EXPR:
997 case CEIL_DIV_EXPR:
998 case FLOOR_DIV_EXPR:
999 case ROUND_DIV_EXPR:
1000 case EXACT_DIV_EXPR:
1001 return right && integer_onep (arg);
1003 case BIT_AND_EXPR:
1004 return integer_all_onesp (arg);
1006 default:
1007 return false;
1011 /* Returns true if ARG is an absorbing element for operation CODE. */
1013 static bool
1014 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1016 switch (code)
1018 case BIT_IOR_EXPR:
1019 return integer_all_onesp (arg);
1021 case MULT_EXPR:
1022 case BIT_AND_EXPR:
1023 return integer_zerop (arg);
1025 case LSHIFT_EXPR:
1026 case RSHIFT_EXPR:
1027 case LROTATE_EXPR:
1028 case RROTATE_EXPR:
1029 return !right && integer_zerop (arg);
1031 case TRUNC_DIV_EXPR:
1032 case CEIL_DIV_EXPR:
1033 case FLOOR_DIV_EXPR:
1034 case ROUND_DIV_EXPR:
1035 case EXACT_DIV_EXPR:
1036 case TRUNC_MOD_EXPR:
1037 case CEIL_MOD_EXPR:
1038 case FLOOR_MOD_EXPR:
1039 case ROUND_MOD_EXPR:
1040 return (!right
1041 && integer_zerop (arg)
1042 && tree_single_nonzero_warnv_p (rval, NULL));
1044 default:
1045 return false;
1049 /* The function value_replacement does the main work of doing the value
1050 replacement. Return non-zero if the replacement is done. Otherwise return
1051 0. If we remove the middle basic block, return 2.
1052 BB is the basic block where the replacement is going to be done on. ARG0
1053 is argument 0 from the PHI. Likewise for ARG1. */
1055 static int
1056 value_replacement (basic_block cond_bb, basic_block middle_bb,
1057 edge e0, edge e1, gimple *phi,
1058 tree arg0, tree arg1)
1060 gimple_stmt_iterator gsi;
1061 gimple *cond;
1062 edge true_edge, false_edge;
1063 enum tree_code code;
1064 bool empty_or_with_defined_p = true;
1066 /* If the type says honor signed zeros we cannot do this
1067 optimization. */
1068 if (HONOR_SIGNED_ZEROS (arg1))
1069 return 0;
1071 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1072 arguments, then adjust arg0 or arg1. */
1073 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1074 while (!gsi_end_p (gsi))
1076 gimple *stmt = gsi_stmt (gsi);
1077 tree lhs;
1078 gsi_next_nondebug (&gsi);
1079 if (!is_gimple_assign (stmt))
1081 if (gimple_code (stmt) != GIMPLE_PREDICT
1082 && gimple_code (stmt) != GIMPLE_NOP)
1083 empty_or_with_defined_p = false;
1084 continue;
1086 /* Now try to adjust arg0 or arg1 according to the computation
1087 in the statement. */
1088 lhs = gimple_assign_lhs (stmt);
1089 if (!(lhs == arg0
1090 && jump_function_from_stmt (&arg0, stmt))
1091 || (lhs == arg1
1092 && jump_function_from_stmt (&arg1, stmt)))
1093 empty_or_with_defined_p = false;
1096 cond = last_stmt (cond_bb);
1097 code = gimple_cond_code (cond);
1099 /* This transformation is only valid for equality comparisons. */
1100 if (code != NE_EXPR && code != EQ_EXPR)
1101 return 0;
1103 /* We need to know which is the true edge and which is the false
1104 edge so that we know if have abs or negative abs. */
1105 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1107 /* At this point we know we have a COND_EXPR with two successors.
1108 One successor is BB, the other successor is an empty block which
1109 falls through into BB.
1111 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1113 There is a single PHI node at the join point (BB) with two arguments.
1115 We now need to verify that the two arguments in the PHI node match
1116 the two arguments to the equality comparison. */
1118 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1120 edge e;
1121 tree arg;
1123 /* For NE_EXPR, we want to build an assignment result = arg where
1124 arg is the PHI argument associated with the true edge. For
1125 EQ_EXPR we want the PHI argument associated with the false edge. */
1126 e = (code == NE_EXPR ? true_edge : false_edge);
1128 /* Unfortunately, E may not reach BB (it may instead have gone to
1129 OTHER_BLOCK). If that is the case, then we want the single outgoing
1130 edge from OTHER_BLOCK which reaches BB and represents the desired
1131 path from COND_BLOCK. */
1132 if (e->dest == middle_bb)
1133 e = single_succ_edge (e->dest);
1135 /* Now we know the incoming edge to BB that has the argument for the
1136 RHS of our new assignment statement. */
1137 if (e0 == e)
1138 arg = arg0;
1139 else
1140 arg = arg1;
1142 /* If the middle basic block was empty or is defining the
1143 PHI arguments and this is a single phi where the args are different
1144 for the edges e0 and e1 then we can remove the middle basic block. */
1145 if (empty_or_with_defined_p
1146 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1147 e0, e1) == phi)
1149 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1150 /* Note that we optimized this PHI. */
1151 return 2;
1153 else
1155 /* Replace the PHI arguments with arg. */
1156 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1157 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1158 if (dump_file && (dump_flags & TDF_DETAILS))
1160 fprintf (dump_file, "PHI ");
1161 print_generic_expr (dump_file, gimple_phi_result (phi));
1162 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1163 cond_bb->index);
1164 print_generic_expr (dump_file, arg);
1165 fprintf (dump_file, ".\n");
1167 return 1;
1172 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1173 gsi = gsi_last_nondebug_bb (middle_bb);
1174 if (gsi_end_p (gsi))
1175 return 0;
1177 gimple *assign = gsi_stmt (gsi);
1178 if (!is_gimple_assign (assign)
1179 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1180 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1181 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1182 return 0;
1184 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1185 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1186 return 0;
1188 /* Allow up to 2 cheap preparation statements that prepare argument
1189 for assign, e.g.:
1190 if (y_4 != 0)
1191 goto <bb 3>;
1192 else
1193 goto <bb 4>;
1194 <bb 3>:
1195 _1 = (int) y_4;
1196 iftmp.0_6 = x_5(D) r<< _1;
1197 <bb 4>:
1198 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1200 if (y_3(D) == 0)
1201 goto <bb 4>;
1202 else
1203 goto <bb 3>;
1204 <bb 3>:
1205 y_4 = y_3(D) & 31;
1206 _1 = (int) y_4;
1207 _6 = x_5(D) r<< _1;
1208 <bb 4>:
1209 # _2 = PHI <x_5(D)(2), _6(3)> */
1210 gimple *prep_stmt[2] = { NULL, NULL };
1211 int prep_cnt;
1212 for (prep_cnt = 0; ; prep_cnt++)
1214 gsi_prev_nondebug (&gsi);
1215 if (gsi_end_p (gsi))
1216 break;
1218 gimple *g = gsi_stmt (gsi);
1219 if (gimple_code (g) == GIMPLE_LABEL)
1220 break;
1222 if (prep_cnt == 2 || !is_gimple_assign (g))
1223 return 0;
1225 tree lhs = gimple_assign_lhs (g);
1226 tree rhs1 = gimple_assign_rhs1 (g);
1227 use_operand_p use_p;
1228 gimple *use_stmt;
1229 if (TREE_CODE (lhs) != SSA_NAME
1230 || TREE_CODE (rhs1) != SSA_NAME
1231 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1232 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1233 || !single_imm_use (lhs, &use_p, &use_stmt)
1234 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1235 return 0;
1236 switch (gimple_assign_rhs_code (g))
1238 CASE_CONVERT:
1239 break;
1240 case PLUS_EXPR:
1241 case BIT_AND_EXPR:
1242 case BIT_IOR_EXPR:
1243 case BIT_XOR_EXPR:
1244 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1245 return 0;
1246 break;
1247 default:
1248 return 0;
1250 prep_stmt[prep_cnt] = g;
1253 /* Only transform if it removes the condition. */
1254 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1255 return 0;
1257 /* Size-wise, this is always profitable. */
1258 if (optimize_bb_for_speed_p (cond_bb)
1259 /* The special case is useless if it has a low probability. */
1260 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1261 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1262 /* If assign is cheap, there is no point avoiding it. */
1263 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1264 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1265 return 0;
1267 tree lhs = gimple_assign_lhs (assign);
1268 tree rhs1 = gimple_assign_rhs1 (assign);
1269 tree rhs2 = gimple_assign_rhs2 (assign);
1270 enum tree_code code_def = gimple_assign_rhs_code (assign);
1271 tree cond_lhs = gimple_cond_lhs (cond);
1272 tree cond_rhs = gimple_cond_rhs (cond);
1274 /* Propagate the cond_rhs constant through preparation stmts,
1275 make sure UB isn't invoked while doing that. */
1276 for (int i = prep_cnt - 1; i >= 0; --i)
1278 gimple *g = prep_stmt[i];
1279 tree grhs1 = gimple_assign_rhs1 (g);
1280 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1281 return 0;
1282 cond_lhs = gimple_assign_lhs (g);
1283 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1284 if (TREE_CODE (cond_rhs) != INTEGER_CST
1285 || TREE_OVERFLOW (cond_rhs))
1286 return 0;
1287 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1289 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1290 gimple_assign_rhs2 (g));
1291 if (TREE_OVERFLOW (cond_rhs))
1292 return 0;
1294 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1295 if (TREE_CODE (cond_rhs) != INTEGER_CST
1296 || TREE_OVERFLOW (cond_rhs))
1297 return 0;
1300 if (((code == NE_EXPR && e1 == false_edge)
1301 || (code == EQ_EXPR && e1 == true_edge))
1302 && arg0 == lhs
1303 && ((arg1 == rhs1
1304 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1305 && neutral_element_p (code_def, cond_rhs, true))
1306 || (arg1 == rhs2
1307 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1308 && neutral_element_p (code_def, cond_rhs, false))
1309 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1310 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1311 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1312 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1313 && absorbing_element_p (code_def,
1314 cond_rhs, false, rhs2))))))
1316 gsi = gsi_for_stmt (cond);
1317 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1318 def-stmt in:
1319 if (n_5 != 0)
1320 goto <bb 3>;
1321 else
1322 goto <bb 4>;
1324 <bb 3>:
1325 # RANGE [0, 4294967294]
1326 u_6 = n_5 + 4294967295;
1328 <bb 4>:
1329 # u_3 = PHI <u_6(3), 4294967295(2)> */
1330 reset_flow_sensitive_info (lhs);
1331 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1333 /* If available, we can use VR of phi result at least. */
1334 tree phires = gimple_phi_result (phi);
1335 struct range_info_def *phires_range_info
1336 = SSA_NAME_RANGE_INFO (phires);
1337 if (phires_range_info)
1338 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1339 phires_range_info);
1341 gimple_stmt_iterator gsi_from;
1342 for (int i = prep_cnt - 1; i >= 0; --i)
1344 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1345 reset_flow_sensitive_info (plhs);
1346 gsi_from = gsi_for_stmt (prep_stmt[i]);
1347 gsi_move_before (&gsi_from, &gsi);
1349 gsi_from = gsi_for_stmt (assign);
1350 gsi_move_before (&gsi_from, &gsi);
1351 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1352 return 2;
1355 return 0;
1358 /* The function minmax_replacement does the main work of doing the minmax
1359 replacement. Return true if the replacement is done. Otherwise return
1360 false.
1361 BB is the basic block where the replacement is going to be done on. ARG0
1362 is argument 0 from the PHI. Likewise for ARG1. */
1364 static bool
1365 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1366 edge e0, edge e1, gimple *phi,
1367 tree arg0, tree arg1)
1369 tree result;
1370 edge true_edge, false_edge;
1371 enum tree_code minmax, ass_code;
1372 tree smaller, larger, arg_true, arg_false;
1373 gimple_stmt_iterator gsi, gsi_from;
1375 tree type = TREE_TYPE (PHI_RESULT (phi));
1377 /* The optimization may be unsafe due to NaNs. */
1378 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1379 return false;
1381 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1382 enum tree_code cmp = gimple_cond_code (cond);
1383 tree rhs = gimple_cond_rhs (cond);
1385 /* Turn EQ/NE of extreme values to order comparisons. */
1386 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1387 && TREE_CODE (rhs) == INTEGER_CST
1388 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1390 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1392 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1393 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1394 wi::min_value (TREE_TYPE (rhs)) + 1);
1396 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1398 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1399 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1400 wi::max_value (TREE_TYPE (rhs)) - 1);
1404 /* This transformation is only valid for order comparisons. Record which
1405 operand is smaller/larger if the result of the comparison is true. */
1406 tree alt_smaller = NULL_TREE;
1407 tree alt_larger = NULL_TREE;
1408 if (cmp == LT_EXPR || cmp == LE_EXPR)
1410 smaller = gimple_cond_lhs (cond);
1411 larger = rhs;
1412 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1413 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1414 if (TREE_CODE (larger) == INTEGER_CST
1415 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1417 if (cmp == LT_EXPR)
1419 wi::overflow_type overflow;
1420 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1421 TYPE_SIGN (TREE_TYPE (larger)),
1422 &overflow);
1423 if (! overflow)
1424 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1426 else
1428 wi::overflow_type overflow;
1429 wide_int alt = wi::add (wi::to_wide (larger), 1,
1430 TYPE_SIGN (TREE_TYPE (larger)),
1431 &overflow);
1432 if (! overflow)
1433 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1437 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1439 smaller = rhs;
1440 larger = gimple_cond_lhs (cond);
1441 /* If we have larger > CST it is equivalent to larger >= CST+1.
1442 Likewise larger >= CST is equivalent to larger > CST-1. */
1443 if (TREE_CODE (smaller) == INTEGER_CST
1444 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1446 wi::overflow_type overflow;
1447 if (cmp == GT_EXPR)
1449 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1450 TYPE_SIGN (TREE_TYPE (smaller)),
1451 &overflow);
1452 if (! overflow)
1453 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1455 else
1457 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1458 TYPE_SIGN (TREE_TYPE (smaller)),
1459 &overflow);
1460 if (! overflow)
1461 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1465 else
1466 return false;
1468 /* Handle the special case of (signed_type)x < 0 being equivalent
1469 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1470 to x <= MAX_VAL(signed_type). */
1471 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1472 && INTEGRAL_TYPE_P (type)
1473 && TYPE_UNSIGNED (type)
1474 && integer_zerop (rhs))
1476 tree op = gimple_cond_lhs (cond);
1477 if (TREE_CODE (op) == SSA_NAME
1478 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1479 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1481 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1482 if (gimple_assign_cast_p (def_stmt))
1484 tree op1 = gimple_assign_rhs1 (def_stmt);
1485 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1486 && TYPE_UNSIGNED (TREE_TYPE (op1))
1487 && (TYPE_PRECISION (TREE_TYPE (op))
1488 == TYPE_PRECISION (TREE_TYPE (op1)))
1489 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1491 wide_int w1 = wi::max_value (TREE_TYPE (op));
1492 wide_int w2 = wi::add (w1, 1);
1493 if (cmp == LT_EXPR)
1495 larger = op1;
1496 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1497 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1498 alt_larger = NULL_TREE;
1500 else
1502 smaller = op1;
1503 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1504 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1505 alt_smaller = NULL_TREE;
1512 /* We need to know which is the true edge and which is the false
1513 edge so that we know if have abs or negative abs. */
1514 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1516 /* Forward the edges over the middle basic block. */
1517 if (true_edge->dest == middle_bb)
1518 true_edge = EDGE_SUCC (true_edge->dest, 0);
1519 if (false_edge->dest == middle_bb)
1520 false_edge = EDGE_SUCC (false_edge->dest, 0);
1522 if (true_edge == e0)
1524 gcc_assert (false_edge == e1);
1525 arg_true = arg0;
1526 arg_false = arg1;
1528 else
1530 gcc_assert (false_edge == e0);
1531 gcc_assert (true_edge == e1);
1532 arg_true = arg1;
1533 arg_false = arg0;
1536 if (empty_block_p (middle_bb))
1538 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1539 || (alt_smaller
1540 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1541 && (operand_equal_for_phi_arg_p (arg_false, larger)
1542 || (alt_larger
1543 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1545 /* Case
1547 if (smaller < larger)
1548 rslt = smaller;
1549 else
1550 rslt = larger; */
1551 minmax = MIN_EXPR;
1553 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1554 || (alt_smaller
1555 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1556 && (operand_equal_for_phi_arg_p (arg_true, larger)
1557 || (alt_larger
1558 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1559 minmax = MAX_EXPR;
1560 else
1561 return false;
1563 else
1565 /* Recognize the following case, assuming d <= u:
1567 if (a <= u)
1568 b = MAX (a, d);
1569 x = PHI <b, u>
1571 This is equivalent to
1573 b = MAX (a, d);
1574 x = MIN (b, u); */
1576 gimple *assign = last_and_only_stmt (middle_bb);
1577 tree lhs, op0, op1, bound;
1579 if (!assign
1580 || gimple_code (assign) != GIMPLE_ASSIGN)
1581 return false;
1583 lhs = gimple_assign_lhs (assign);
1584 ass_code = gimple_assign_rhs_code (assign);
1585 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1586 return false;
1587 op0 = gimple_assign_rhs1 (assign);
1588 op1 = gimple_assign_rhs2 (assign);
1590 if (true_edge->src == middle_bb)
1592 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1593 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1594 return false;
1596 if (operand_equal_for_phi_arg_p (arg_false, larger)
1597 || (alt_larger
1598 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1600 /* Case
1602 if (smaller < larger)
1604 r' = MAX_EXPR (smaller, bound)
1606 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1607 if (ass_code != MAX_EXPR)
1608 return false;
1610 minmax = MIN_EXPR;
1611 if (operand_equal_for_phi_arg_p (op0, smaller)
1612 || (alt_smaller
1613 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1614 bound = op1;
1615 else if (operand_equal_for_phi_arg_p (op1, smaller)
1616 || (alt_smaller
1617 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1618 bound = op0;
1619 else
1620 return false;
1622 /* We need BOUND <= LARGER. */
1623 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1624 bound, larger)))
1625 return false;
1627 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1628 || (alt_smaller
1629 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1631 /* Case
1633 if (smaller < larger)
1635 r' = MIN_EXPR (larger, bound)
1637 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1638 if (ass_code != MIN_EXPR)
1639 return false;
1641 minmax = MAX_EXPR;
1642 if (operand_equal_for_phi_arg_p (op0, larger)
1643 || (alt_larger
1644 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1645 bound = op1;
1646 else if (operand_equal_for_phi_arg_p (op1, larger)
1647 || (alt_larger
1648 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1649 bound = op0;
1650 else
1651 return false;
1653 /* We need BOUND >= SMALLER. */
1654 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1655 bound, smaller)))
1656 return false;
1658 else
1659 return false;
1661 else
1663 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1664 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1665 return false;
1667 if (operand_equal_for_phi_arg_p (arg_true, larger)
1668 || (alt_larger
1669 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1671 /* Case
1673 if (smaller > larger)
1675 r' = MIN_EXPR (smaller, bound)
1677 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1678 if (ass_code != MIN_EXPR)
1679 return false;
1681 minmax = MAX_EXPR;
1682 if (operand_equal_for_phi_arg_p (op0, smaller)
1683 || (alt_smaller
1684 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1685 bound = op1;
1686 else if (operand_equal_for_phi_arg_p (op1, smaller)
1687 || (alt_smaller
1688 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1689 bound = op0;
1690 else
1691 return false;
1693 /* We need BOUND >= LARGER. */
1694 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1695 bound, larger)))
1696 return false;
1698 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1699 || (alt_smaller
1700 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1702 /* Case
1704 if (smaller > larger)
1706 r' = MAX_EXPR (larger, bound)
1708 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1709 if (ass_code != MAX_EXPR)
1710 return false;
1712 minmax = MIN_EXPR;
1713 if (operand_equal_for_phi_arg_p (op0, larger))
1714 bound = op1;
1715 else if (operand_equal_for_phi_arg_p (op1, larger))
1716 bound = op0;
1717 else
1718 return false;
1720 /* We need BOUND <= SMALLER. */
1721 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1722 bound, smaller)))
1723 return false;
1725 else
1726 return false;
1729 /* Move the statement from the middle block. */
1730 gsi = gsi_last_bb (cond_bb);
1731 gsi_from = gsi_last_nondebug_bb (middle_bb);
1732 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1733 SSA_OP_DEF));
1734 gsi_move_before (&gsi_from, &gsi);
1737 /* Emit the statement to compute min/max. */
1738 gimple_seq stmts = NULL;
1739 tree phi_result = PHI_RESULT (phi);
1740 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1741 /* Duplicate range info if we're the only things setting the target PHI. */
1742 if (!gimple_seq_empty_p (stmts)
1743 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
1744 && !POINTER_TYPE_P (TREE_TYPE (phi_result))
1745 && SSA_NAME_RANGE_INFO (phi_result))
1746 duplicate_ssa_name_range_info (result, SSA_NAME_RANGE_TYPE (phi_result),
1747 SSA_NAME_RANGE_INFO (phi_result));
1749 gsi = gsi_last_bb (cond_bb);
1750 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1752 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1754 return true;
1757 /* Convert
1759 <bb 2>
1760 if (b_4(D) != 0)
1761 goto <bb 3>
1762 else
1763 goto <bb 4>
1765 <bb 3>
1766 _2 = (unsigned long) b_4(D);
1767 _9 = __builtin_popcountl (_2);
1769 _9 = __builtin_popcountl (b_4(D));
1771 <bb 4>
1772 c_12 = PHI <0(2), _9(3)>
1774 Into
1775 <bb 2>
1776 _2 = (unsigned long) b_4(D);
1777 _9 = __builtin_popcountl (_2);
1779 _9 = __builtin_popcountl (b_4(D));
1781 <bb 4>
1782 c_12 = PHI <_9(2)>
1784 Similarly for __builtin_clz or __builtin_ctz if
1785 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
1786 instead of 0 above it uses the value from that macro. */
1788 static bool
1789 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb,
1790 basic_block middle_bb,
1791 edge e1, edge e2, gimple *phi,
1792 tree arg0, tree arg1)
1794 gimple *cond;
1795 gimple_stmt_iterator gsi, gsi_from;
1796 gimple *call;
1797 gimple *cast = NULL;
1798 tree lhs, arg;
1800 /* Check that
1801 _2 = (unsigned long) b_4(D);
1802 _9 = __builtin_popcountl (_2);
1804 _9 = __builtin_popcountl (b_4(D));
1805 are the only stmts in the middle_bb. */
1807 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1808 if (gsi_end_p (gsi))
1809 return false;
1810 cast = gsi_stmt (gsi);
1811 gsi_next_nondebug (&gsi);
1812 if (!gsi_end_p (gsi))
1814 call = gsi_stmt (gsi);
1815 gsi_next_nondebug (&gsi);
1816 if (!gsi_end_p (gsi))
1817 return false;
1819 else
1821 call = cast;
1822 cast = NULL;
1825 /* Check that we have a popcount/clz/ctz builtin. */
1826 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
1827 return false;
1829 arg = gimple_call_arg (call, 0);
1830 lhs = gimple_get_lhs (call);
1832 if (lhs == NULL_TREE)
1833 return false;
1835 combined_fn cfn = gimple_call_combined_fn (call);
1836 internal_fn ifn = IFN_LAST;
1837 int val = 0;
1838 switch (cfn)
1840 CASE_CFN_POPCOUNT:
1841 break;
1842 CASE_CFN_CLZ:
1843 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1845 tree type = TREE_TYPE (arg);
1846 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
1847 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1848 val) == 2)
1850 ifn = IFN_CLZ;
1851 break;
1854 return false;
1855 CASE_CFN_CTZ:
1856 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1858 tree type = TREE_TYPE (arg);
1859 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
1860 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1861 val) == 2)
1863 ifn = IFN_CTZ;
1864 break;
1867 return false;
1868 default:
1869 return false;
1872 if (cast)
1874 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
1875 /* Check that we have a cast prior to that. */
1876 if (gimple_code (cast) != GIMPLE_ASSIGN
1877 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1878 return false;
1879 /* Result of the cast stmt is the argument to the builtin. */
1880 if (arg != gimple_assign_lhs (cast))
1881 return false;
1882 arg = gimple_assign_rhs1 (cast);
1885 cond = last_stmt (cond_bb);
1887 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
1888 builtin. */
1889 if (gimple_code (cond) != GIMPLE_COND
1890 || (gimple_cond_code (cond) != NE_EXPR
1891 && gimple_cond_code (cond) != EQ_EXPR)
1892 || !integer_zerop (gimple_cond_rhs (cond))
1893 || arg != gimple_cond_lhs (cond))
1894 return false;
1896 /* Canonicalize. */
1897 if ((e2->flags & EDGE_TRUE_VALUE
1898 && gimple_cond_code (cond) == NE_EXPR)
1899 || (e1->flags & EDGE_TRUE_VALUE
1900 && gimple_cond_code (cond) == EQ_EXPR))
1902 std::swap (arg0, arg1);
1903 std::swap (e1, e2);
1906 /* Check PHI arguments. */
1907 if (lhs != arg0
1908 || TREE_CODE (arg1) != INTEGER_CST
1909 || wi::to_wide (arg1) != val)
1910 return false;
1912 /* And insert the popcount/clz/ctz builtin and cast stmt before the
1913 cond_bb. */
1914 gsi = gsi_last_bb (cond_bb);
1915 if (cast)
1917 gsi_from = gsi_for_stmt (cast);
1918 gsi_move_before (&gsi_from, &gsi);
1919 reset_flow_sensitive_info (gimple_get_lhs (cast));
1921 gsi_from = gsi_for_stmt (call);
1922 if (ifn == IFN_LAST || gimple_call_internal_p (call))
1923 gsi_move_before (&gsi_from, &gsi);
1924 else
1926 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
1927 the latter is well defined at zero. */
1928 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
1929 gimple_call_set_lhs (call, lhs);
1930 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
1931 gsi_remove (&gsi_from, true);
1933 reset_flow_sensitive_info (lhs);
1935 /* Now update the PHI and remove unneeded bbs. */
1936 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1937 return true;
1940 /* The function absolute_replacement does the main work of doing the absolute
1941 replacement. Return true if the replacement is done. Otherwise return
1942 false.
1943 bb is the basic block where the replacement is going to be done on. arg0
1944 is argument 0 from the phi. Likewise for arg1. */
1946 static bool
1947 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1948 edge e0 ATTRIBUTE_UNUSED, edge e1,
1949 gimple *phi, tree arg0, tree arg1)
1951 tree result;
1952 gassign *new_stmt;
1953 gimple *cond;
1954 gimple_stmt_iterator gsi;
1955 edge true_edge, false_edge;
1956 gimple *assign;
1957 edge e;
1958 tree rhs, lhs;
1959 bool negate;
1960 enum tree_code cond_code;
1962 /* If the type says honor signed zeros we cannot do this
1963 optimization. */
1964 if (HONOR_SIGNED_ZEROS (arg1))
1965 return false;
1967 /* OTHER_BLOCK must have only one executable statement which must have the
1968 form arg0 = -arg1 or arg1 = -arg0. */
1970 assign = last_and_only_stmt (middle_bb);
1971 /* If we did not find the proper negation assignment, then we cannot
1972 optimize. */
1973 if (assign == NULL)
1974 return false;
1976 /* If we got here, then we have found the only executable statement
1977 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1978 arg1 = -arg0, then we cannot optimize. */
1979 if (gimple_code (assign) != GIMPLE_ASSIGN)
1980 return false;
1982 lhs = gimple_assign_lhs (assign);
1984 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1985 return false;
1987 rhs = gimple_assign_rhs1 (assign);
1989 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1990 if (!(lhs == arg0 && rhs == arg1)
1991 && !(lhs == arg1 && rhs == arg0))
1992 return false;
1994 cond = last_stmt (cond_bb);
1995 result = PHI_RESULT (phi);
1997 /* Only relationals comparing arg[01] against zero are interesting. */
1998 cond_code = gimple_cond_code (cond);
1999 if (cond_code != GT_EXPR && cond_code != GE_EXPR
2000 && cond_code != LT_EXPR && cond_code != LE_EXPR)
2001 return false;
2003 /* Make sure the conditional is arg[01] OP y. */
2004 if (gimple_cond_lhs (cond) != rhs)
2005 return false;
2007 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
2008 ? real_zerop (gimple_cond_rhs (cond))
2009 : integer_zerop (gimple_cond_rhs (cond)))
2011 else
2012 return false;
2014 /* We need to know which is the true edge and which is the false
2015 edge so that we know if have abs or negative abs. */
2016 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2018 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2019 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2020 the false edge goes to OTHER_BLOCK. */
2021 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
2022 e = true_edge;
2023 else
2024 e = false_edge;
2026 if (e->dest == middle_bb)
2027 negate = true;
2028 else
2029 negate = false;
2031 /* If the code negates only iff positive then make sure to not
2032 introduce undefined behavior when negating or computing the absolute.
2033 ??? We could use range info if present to check for arg1 == INT_MIN. */
2034 if (negate
2035 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
2036 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
2037 return false;
2039 result = duplicate_ssa_name (result, NULL);
2041 if (negate)
2042 lhs = make_ssa_name (TREE_TYPE (result));
2043 else
2044 lhs = result;
2046 /* Build the modify expression with abs expression. */
2047 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
2049 gsi = gsi_last_bb (cond_bb);
2050 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2052 if (negate)
2054 /* Get the right GSI. We want to insert after the recently
2055 added ABS_EXPR statement (which we know is the first statement
2056 in the block. */
2057 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2059 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2062 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2064 /* Note that we optimized this PHI. */
2065 return true;
2068 /* Auxiliary functions to determine the set of memory accesses which
2069 can't trap because they are preceded by accesses to the same memory
2070 portion. We do that for MEM_REFs, so we only need to track
2071 the SSA_NAME of the pointer indirectly referenced. The algorithm
2072 simply is a walk over all instructions in dominator order. When
2073 we see an MEM_REF we determine if we've already seen a same
2074 ref anywhere up to the root of the dominator tree. If we do the
2075 current access can't trap. If we don't see any dominating access
2076 the current access might trap, but might also make later accesses
2077 non-trapping, so we remember it. We need to be careful with loads
2078 or stores, for instance a load might not trap, while a store would,
2079 so if we see a dominating read access this doesn't mean that a later
2080 write access would not trap. Hence we also need to differentiate the
2081 type of access(es) seen.
2083 ??? We currently are very conservative and assume that a load might
2084 trap even if a store doesn't (write-only memory). This probably is
2085 overly conservative.
2087 We currently support a special case that for !TREE_ADDRESSABLE automatic
2088 variables, it could ignore whether something is a load or store because the
2089 local stack should be always writable. */
2091 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2092 basic block an *_REF through it was seen, which would constitute a
2093 no-trap region for same accesses.
2095 Size is needed to support 2 MEM_REFs of different types, like
2096 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2097 OEP_ADDRESS_OF. */
2098 struct ref_to_bb
2100 tree exp;
2101 HOST_WIDE_INT size;
2102 unsigned int phase;
2103 basic_block bb;
2106 /* Hashtable helpers. */
2108 struct refs_hasher : free_ptr_hash<ref_to_bb>
2110 static inline hashval_t hash (const ref_to_bb *);
2111 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2114 /* Used for quick clearing of the hash-table when we see calls.
2115 Hash entries with phase < nt_call_phase are invalid. */
2116 static unsigned int nt_call_phase;
2118 /* The hash function. */
2120 inline hashval_t
2121 refs_hasher::hash (const ref_to_bb *n)
2123 inchash::hash hstate;
2124 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2125 hstate.add_hwi (n->size);
2126 return hstate.end ();
2129 /* The equality function of *P1 and *P2. */
2131 inline bool
2132 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2134 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2135 && n1->size == n2->size;
2138 class nontrapping_dom_walker : public dom_walker
2140 public:
2141 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2142 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2145 virtual edge before_dom_children (basic_block);
2146 virtual void after_dom_children (basic_block);
2148 private:
2150 /* We see the expression EXP in basic block BB. If it's an interesting
2151 expression (an MEM_REF through an SSA_NAME) possibly insert the
2152 expression into the set NONTRAP or the hash table of seen expressions.
2153 STORE is true if this expression is on the LHS, otherwise it's on
2154 the RHS. */
2155 void add_or_mark_expr (basic_block, tree, bool);
2157 hash_set<tree> *m_nontrapping;
2159 /* The hash table for remembering what we've seen. */
2160 hash_table<refs_hasher> m_seen_refs;
2163 /* Called by walk_dominator_tree, when entering the block BB. */
2164 edge
2165 nontrapping_dom_walker::before_dom_children (basic_block bb)
2167 edge e;
2168 edge_iterator ei;
2169 gimple_stmt_iterator gsi;
2171 /* If we haven't seen all our predecessors, clear the hash-table. */
2172 FOR_EACH_EDGE (e, ei, bb->preds)
2173 if ((((size_t)e->src->aux) & 2) == 0)
2175 nt_call_phase++;
2176 break;
2179 /* Mark this BB as being on the path to dominator root and as visited. */
2180 bb->aux = (void*)(1 | 2);
2182 /* And walk the statements in order. */
2183 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2185 gimple *stmt = gsi_stmt (gsi);
2187 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2188 || (is_gimple_call (stmt)
2189 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2190 nt_call_phase++;
2191 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2193 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2194 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2197 return NULL;
2200 /* Called by walk_dominator_tree, when basic block BB is exited. */
2201 void
2202 nontrapping_dom_walker::after_dom_children (basic_block bb)
2204 /* This BB isn't on the path to dominator root anymore. */
2205 bb->aux = (void*)2;
2208 /* We see the expression EXP in basic block BB. If it's an interesting
2209 expression of:
2210 1) MEM_REF
2211 2) ARRAY_REF
2212 3) COMPONENT_REF
2213 possibly insert the expression into the set NONTRAP or the hash table
2214 of seen expressions. STORE is true if this expression is on the LHS,
2215 otherwise it's on the RHS. */
2216 void
2217 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2219 HOST_WIDE_INT size;
2221 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2222 || TREE_CODE (exp) == COMPONENT_REF)
2223 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2225 struct ref_to_bb map;
2226 ref_to_bb **slot;
2227 struct ref_to_bb *r2bb;
2228 basic_block found_bb = 0;
2230 if (!store)
2232 tree base = get_base_address (exp);
2233 /* Only record a LOAD of a local variable without address-taken, as
2234 the local stack is always writable. This allows cselim on a STORE
2235 with a dominating LOAD. */
2236 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2237 return;
2240 /* Try to find the last seen *_REF, which can trap. */
2241 map.exp = exp;
2242 map.size = size;
2243 slot = m_seen_refs.find_slot (&map, INSERT);
2244 r2bb = *slot;
2245 if (r2bb && r2bb->phase >= nt_call_phase)
2246 found_bb = r2bb->bb;
2248 /* If we've found a trapping *_REF, _and_ it dominates EXP
2249 (it's in a basic block on the path from us to the dominator root)
2250 then we can't trap. */
2251 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2253 m_nontrapping->add (exp);
2255 else
2257 /* EXP might trap, so insert it into the hash table. */
2258 if (r2bb)
2260 r2bb->phase = nt_call_phase;
2261 r2bb->bb = bb;
2263 else
2265 r2bb = XNEW (struct ref_to_bb);
2266 r2bb->phase = nt_call_phase;
2267 r2bb->bb = bb;
2268 r2bb->exp = exp;
2269 r2bb->size = size;
2270 *slot = r2bb;
2276 /* This is the entry point of gathering non trapping memory accesses.
2277 It will do a dominator walk over the whole function, and it will
2278 make use of the bb->aux pointers. It returns a set of trees
2279 (the MEM_REFs itself) which can't trap. */
2280 static hash_set<tree> *
2281 get_non_trapping (void)
2283 nt_call_phase = 0;
2284 hash_set<tree> *nontrap = new hash_set<tree>;
2285 /* We're going to do a dominator walk, so ensure that we have
2286 dominance information. */
2287 calculate_dominance_info (CDI_DOMINATORS);
2289 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2290 .walk (cfun->cfg->x_entry_block_ptr);
2292 clear_aux_for_blocks ();
2293 return nontrap;
2296 /* Do the main work of conditional store replacement. We already know
2297 that the recognized pattern looks like so:
2299 split:
2300 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2301 MIDDLE_BB:
2302 something
2303 fallthrough (edge E0)
2304 JOIN_BB:
2305 some more
2307 We check that MIDDLE_BB contains only one store, that that store
2308 doesn't trap (not via NOTRAP, but via checking if an access to the same
2309 memory location dominates us, or the store is to a local addressable
2310 object) and that the store has a "simple" RHS. */
2312 static bool
2313 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2314 edge e0, edge e1, hash_set<tree> *nontrap)
2316 gimple *assign = last_and_only_stmt (middle_bb);
2317 tree lhs, rhs, name, name2;
2318 gphi *newphi;
2319 gassign *new_stmt;
2320 gimple_stmt_iterator gsi;
2321 location_t locus;
2323 /* Check if middle_bb contains of only one store. */
2324 if (!assign
2325 || !gimple_assign_single_p (assign)
2326 || gimple_has_volatile_ops (assign))
2327 return false;
2329 /* And no PHI nodes so all uses in the single stmt are also
2330 available where we insert to. */
2331 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2332 return false;
2334 locus = gimple_location (assign);
2335 lhs = gimple_assign_lhs (assign);
2336 rhs = gimple_assign_rhs1 (assign);
2337 if ((TREE_CODE (lhs) != MEM_REF
2338 && TREE_CODE (lhs) != ARRAY_REF
2339 && TREE_CODE (lhs) != COMPONENT_REF)
2340 || !is_gimple_reg_type (TREE_TYPE (lhs)))
2341 return false;
2343 /* Prove that we can move the store down. We could also check
2344 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2345 whose value is not available readily, which we want to avoid. */
2346 if (!nontrap->contains (lhs))
2348 /* If LHS is an access to a local variable without address-taken
2349 (or when we allow data races) and known not to trap, we could
2350 always safely move down the store. */
2351 tree base = get_base_address (lhs);
2352 if (!auto_var_p (base)
2353 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
2354 || tree_could_trap_p (lhs))
2355 return false;
2358 /* Now we've checked the constraints, so do the transformation:
2359 1) Remove the single store. */
2360 gsi = gsi_for_stmt (assign);
2361 unlink_stmt_vdef (assign);
2362 gsi_remove (&gsi, true);
2363 release_defs (assign);
2365 /* Make both store and load use alias-set zero as we have to
2366 deal with the case of the store being a conditional change
2367 of the dynamic type. */
2368 lhs = unshare_expr (lhs);
2369 tree *basep = &lhs;
2370 while (handled_component_p (*basep))
2371 basep = &TREE_OPERAND (*basep, 0);
2372 if (TREE_CODE (*basep) == MEM_REF
2373 || TREE_CODE (*basep) == TARGET_MEM_REF)
2374 TREE_OPERAND (*basep, 1)
2375 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2376 else
2377 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2378 build_fold_addr_expr (*basep),
2379 build_zero_cst (ptr_type_node));
2381 /* 2) Insert a load from the memory of the store to the temporary
2382 on the edge which did not contain the store. */
2383 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2384 new_stmt = gimple_build_assign (name, lhs);
2385 gimple_set_location (new_stmt, locus);
2386 lhs = unshare_expr (lhs);
2387 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2388 warnings. */
2389 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt)) = 1;
2390 gsi_insert_on_edge (e1, new_stmt);
2392 /* 3) Create a PHI node at the join block, with one argument
2393 holding the old RHS, and the other holding the temporary
2394 where we stored the old memory contents. */
2395 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2396 newphi = create_phi_node (name2, join_bb);
2397 add_phi_arg (newphi, rhs, e0, locus);
2398 add_phi_arg (newphi, name, e1, locus);
2400 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2402 /* 4) Insert that PHI node. */
2403 gsi = gsi_after_labels (join_bb);
2404 if (gsi_end_p (gsi))
2406 gsi = gsi_last_bb (join_bb);
2407 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2409 else
2410 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2412 if (dump_file && (dump_flags & TDF_DETAILS))
2414 fprintf (dump_file, "\nConditional store replacement happened!");
2415 fprintf (dump_file, "\nReplaced the store with a load.");
2416 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
2417 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
2420 return true;
2423 /* Do the main work of conditional store replacement. */
2425 static bool
2426 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
2427 basic_block join_bb, gimple *then_assign,
2428 gimple *else_assign)
2430 tree lhs_base, lhs, then_rhs, else_rhs, name;
2431 location_t then_locus, else_locus;
2432 gimple_stmt_iterator gsi;
2433 gphi *newphi;
2434 gassign *new_stmt;
2436 if (then_assign == NULL
2437 || !gimple_assign_single_p (then_assign)
2438 || gimple_clobber_p (then_assign)
2439 || gimple_has_volatile_ops (then_assign)
2440 || else_assign == NULL
2441 || !gimple_assign_single_p (else_assign)
2442 || gimple_clobber_p (else_assign)
2443 || gimple_has_volatile_ops (else_assign))
2444 return false;
2446 lhs = gimple_assign_lhs (then_assign);
2447 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2448 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2449 return false;
2451 lhs_base = get_base_address (lhs);
2452 if (lhs_base == NULL_TREE
2453 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2454 return false;
2456 then_rhs = gimple_assign_rhs1 (then_assign);
2457 else_rhs = gimple_assign_rhs1 (else_assign);
2458 then_locus = gimple_location (then_assign);
2459 else_locus = gimple_location (else_assign);
2461 /* Now we've checked the constraints, so do the transformation:
2462 1) Remove the stores. */
2463 gsi = gsi_for_stmt (then_assign);
2464 unlink_stmt_vdef (then_assign);
2465 gsi_remove (&gsi, true);
2466 release_defs (then_assign);
2468 gsi = gsi_for_stmt (else_assign);
2469 unlink_stmt_vdef (else_assign);
2470 gsi_remove (&gsi, true);
2471 release_defs (else_assign);
2473 /* 2) Create a PHI node at the join block, with one argument
2474 holding the old RHS, and the other holding the temporary
2475 where we stored the old memory contents. */
2476 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2477 newphi = create_phi_node (name, join_bb);
2478 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2479 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2481 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2483 /* 3) Insert that PHI node. */
2484 gsi = gsi_after_labels (join_bb);
2485 if (gsi_end_p (gsi))
2487 gsi = gsi_last_bb (join_bb);
2488 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2490 else
2491 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2493 return true;
2496 /* Return the single store in BB with VDEF or NULL if there are
2497 other stores in the BB or loads following the store. */
2499 static gimple *
2500 single_trailing_store_in_bb (basic_block bb, tree vdef)
2502 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2503 return NULL;
2504 gimple *store = SSA_NAME_DEF_STMT (vdef);
2505 if (gimple_bb (store) != bb
2506 || gimple_code (store) == GIMPLE_PHI)
2507 return NULL;
2509 /* Verify there is no other store in this BB. */
2510 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2511 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2512 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2513 return NULL;
2515 /* Verify there is no load or store after the store. */
2516 use_operand_p use_p;
2517 imm_use_iterator imm_iter;
2518 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2519 if (USE_STMT (use_p) != store
2520 && gimple_bb (USE_STMT (use_p)) == bb)
2521 return NULL;
2523 return store;
2526 /* Conditional store replacement. We already know
2527 that the recognized pattern looks like so:
2529 split:
2530 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2531 THEN_BB:
2533 X = Y;
2535 goto JOIN_BB;
2536 ELSE_BB:
2538 X = Z;
2540 fallthrough (edge E0)
2541 JOIN_BB:
2542 some more
2544 We check that it is safe to sink the store to JOIN_BB by verifying that
2545 there are no read-after-write or write-after-write dependencies in
2546 THEN_BB and ELSE_BB. */
2548 static bool
2549 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2550 basic_block join_bb)
2552 vec<data_reference_p> then_datarefs, else_datarefs;
2553 vec<ddr_p> then_ddrs, else_ddrs;
2554 gimple *then_store, *else_store;
2555 bool found, ok = false, res;
2556 struct data_dependence_relation *ddr;
2557 data_reference_p then_dr, else_dr;
2558 int i, j;
2559 tree then_lhs, else_lhs;
2560 basic_block blocks[3];
2562 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2563 cheap enough to always handle as it allows us to elide dependence
2564 checking. */
2565 gphi *vphi = NULL;
2566 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2567 gsi_next (&si))
2568 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2570 vphi = si.phi ();
2571 break;
2573 if (!vphi)
2574 return false;
2575 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2576 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2577 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2578 if (then_assign)
2580 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2581 if (else_assign)
2582 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2583 then_assign, else_assign);
2586 /* If either vectorization or if-conversion is disabled then do
2587 not sink any stores. */
2588 if (param_max_stores_to_sink == 0
2589 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
2590 || !flag_tree_loop_if_convert)
2591 return false;
2593 /* Find data references. */
2594 then_datarefs.create (1);
2595 else_datarefs.create (1);
2596 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2597 == chrec_dont_know)
2598 || !then_datarefs.length ()
2599 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2600 == chrec_dont_know)
2601 || !else_datarefs.length ())
2603 free_data_refs (then_datarefs);
2604 free_data_refs (else_datarefs);
2605 return false;
2608 /* Find pairs of stores with equal LHS. */
2609 auto_vec<gimple *, 1> then_stores, else_stores;
2610 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2612 if (DR_IS_READ (then_dr))
2613 continue;
2615 then_store = DR_STMT (then_dr);
2616 then_lhs = gimple_get_lhs (then_store);
2617 if (then_lhs == NULL_TREE)
2618 continue;
2619 found = false;
2621 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2623 if (DR_IS_READ (else_dr))
2624 continue;
2626 else_store = DR_STMT (else_dr);
2627 else_lhs = gimple_get_lhs (else_store);
2628 if (else_lhs == NULL_TREE)
2629 continue;
2631 if (operand_equal_p (then_lhs, else_lhs, 0))
2633 found = true;
2634 break;
2638 if (!found)
2639 continue;
2641 then_stores.safe_push (then_store);
2642 else_stores.safe_push (else_store);
2645 /* No pairs of stores found. */
2646 if (!then_stores.length ()
2647 || then_stores.length () > (unsigned) param_max_stores_to_sink)
2649 free_data_refs (then_datarefs);
2650 free_data_refs (else_datarefs);
2651 return false;
2654 /* Compute and check data dependencies in both basic blocks. */
2655 then_ddrs.create (1);
2656 else_ddrs.create (1);
2657 if (!compute_all_dependences (then_datarefs, &then_ddrs,
2658 vNULL, false)
2659 || !compute_all_dependences (else_datarefs, &else_ddrs,
2660 vNULL, false))
2662 free_dependence_relations (then_ddrs);
2663 free_dependence_relations (else_ddrs);
2664 free_data_refs (then_datarefs);
2665 free_data_refs (else_datarefs);
2666 return false;
2668 blocks[0] = then_bb;
2669 blocks[1] = else_bb;
2670 blocks[2] = join_bb;
2671 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2673 /* Check that there are no read-after-write or write-after-write dependencies
2674 in THEN_BB. */
2675 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2677 struct data_reference *dra = DDR_A (ddr);
2678 struct data_reference *drb = DDR_B (ddr);
2680 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2681 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2682 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2683 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2684 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2685 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2687 free_dependence_relations (then_ddrs);
2688 free_dependence_relations (else_ddrs);
2689 free_data_refs (then_datarefs);
2690 free_data_refs (else_datarefs);
2691 return false;
2695 /* Check that there are no read-after-write or write-after-write dependencies
2696 in ELSE_BB. */
2697 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2699 struct data_reference *dra = DDR_A (ddr);
2700 struct data_reference *drb = DDR_B (ddr);
2702 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2703 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2704 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2705 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2706 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2707 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2709 free_dependence_relations (then_ddrs);
2710 free_dependence_relations (else_ddrs);
2711 free_data_refs (then_datarefs);
2712 free_data_refs (else_datarefs);
2713 return false;
2717 /* Sink stores with same LHS. */
2718 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2720 else_store = else_stores[i];
2721 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2722 then_store, else_store);
2723 ok = ok || res;
2726 free_dependence_relations (then_ddrs);
2727 free_dependence_relations (else_ddrs);
2728 free_data_refs (then_datarefs);
2729 free_data_refs (else_datarefs);
2731 return ok;
2734 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2736 static bool
2737 local_mem_dependence (gimple *stmt, basic_block bb)
2739 tree vuse = gimple_vuse (stmt);
2740 gimple *def;
2742 if (!vuse)
2743 return false;
2745 def = SSA_NAME_DEF_STMT (vuse);
2746 return (def && gimple_bb (def) == bb);
2749 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2750 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2751 and BB3 rejoins control flow following BB1 and BB2, look for
2752 opportunities to hoist loads as follows. If BB3 contains a PHI of
2753 two loads, one each occurring in BB1 and BB2, and the loads are
2754 provably of adjacent fields in the same structure, then move both
2755 loads into BB0. Of course this can only be done if there are no
2756 dependencies preventing such motion.
2758 One of the hoisted loads will always be speculative, so the
2759 transformation is currently conservative:
2761 - The fields must be strictly adjacent.
2762 - The two fields must occupy a single memory block that is
2763 guaranteed to not cross a page boundary.
2765 The last is difficult to prove, as such memory blocks should be
2766 aligned on the minimum of the stack alignment boundary and the
2767 alignment guaranteed by heap allocation interfaces. Thus we rely
2768 on a parameter for the alignment value.
2770 Provided a good value is used for the last case, the first
2771 restriction could possibly be relaxed. */
2773 static void
2774 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2775 basic_block bb2, basic_block bb3)
2777 int param_align = param_l1_cache_line_size;
2778 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2779 gphi_iterator gsi;
2781 /* Walk the phis in bb3 looking for an opportunity. We are looking
2782 for phis of two SSA names, one each of which is defined in bb1 and
2783 bb2. */
2784 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2786 gphi *phi_stmt = gsi.phi ();
2787 gimple *def1, *def2;
2788 tree arg1, arg2, ref1, ref2, field1, field2;
2789 tree tree_offset1, tree_offset2, tree_size2, next;
2790 int offset1, offset2, size2;
2791 unsigned align1;
2792 gimple_stmt_iterator gsi2;
2793 basic_block bb_for_def1, bb_for_def2;
2795 if (gimple_phi_num_args (phi_stmt) != 2
2796 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2797 continue;
2799 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2800 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2802 if (TREE_CODE (arg1) != SSA_NAME
2803 || TREE_CODE (arg2) != SSA_NAME
2804 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2805 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2806 continue;
2808 def1 = SSA_NAME_DEF_STMT (arg1);
2809 def2 = SSA_NAME_DEF_STMT (arg2);
2811 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2812 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2813 continue;
2815 /* Check the mode of the arguments to be sure a conditional move
2816 can be generated for it. */
2817 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2818 == CODE_FOR_nothing)
2819 continue;
2821 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2822 if (!gimple_assign_single_p (def1)
2823 || !gimple_assign_single_p (def2)
2824 || gimple_has_volatile_ops (def1)
2825 || gimple_has_volatile_ops (def2))
2826 continue;
2828 ref1 = gimple_assign_rhs1 (def1);
2829 ref2 = gimple_assign_rhs1 (def2);
2831 if (TREE_CODE (ref1) != COMPONENT_REF
2832 || TREE_CODE (ref2) != COMPONENT_REF)
2833 continue;
2835 /* The zeroth operand of the two component references must be
2836 identical. It is not sufficient to compare get_base_address of
2837 the two references, because this could allow for different
2838 elements of the same array in the two trees. It is not safe to
2839 assume that the existence of one array element implies the
2840 existence of a different one. */
2841 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2842 continue;
2844 field1 = TREE_OPERAND (ref1, 1);
2845 field2 = TREE_OPERAND (ref2, 1);
2847 /* Check for field adjacency, and ensure field1 comes first. */
2848 for (next = DECL_CHAIN (field1);
2849 next && TREE_CODE (next) != FIELD_DECL;
2850 next = DECL_CHAIN (next))
2853 if (next != field2)
2855 for (next = DECL_CHAIN (field2);
2856 next && TREE_CODE (next) != FIELD_DECL;
2857 next = DECL_CHAIN (next))
2860 if (next != field1)
2861 continue;
2863 std::swap (field1, field2);
2864 std::swap (def1, def2);
2867 bb_for_def1 = gimple_bb (def1);
2868 bb_for_def2 = gimple_bb (def2);
2870 /* Check for proper alignment of the first field. */
2871 tree_offset1 = bit_position (field1);
2872 tree_offset2 = bit_position (field2);
2873 tree_size2 = DECL_SIZE (field2);
2875 if (!tree_fits_uhwi_p (tree_offset1)
2876 || !tree_fits_uhwi_p (tree_offset2)
2877 || !tree_fits_uhwi_p (tree_size2))
2878 continue;
2880 offset1 = tree_to_uhwi (tree_offset1);
2881 offset2 = tree_to_uhwi (tree_offset2);
2882 size2 = tree_to_uhwi (tree_size2);
2883 align1 = DECL_ALIGN (field1) % param_align_bits;
2885 if (offset1 % BITS_PER_UNIT != 0)
2886 continue;
2888 /* For profitability, the two field references should fit within
2889 a single cache line. */
2890 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2891 continue;
2893 /* The two expressions cannot be dependent upon vdefs defined
2894 in bb1/bb2. */
2895 if (local_mem_dependence (def1, bb_for_def1)
2896 || local_mem_dependence (def2, bb_for_def2))
2897 continue;
2899 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2900 bb0. We hoist the first one first so that a cache miss is handled
2901 efficiently regardless of hardware cache-fill policy. */
2902 gsi2 = gsi_for_stmt (def1);
2903 gsi_move_to_bb_end (&gsi2, bb0);
2904 gsi2 = gsi_for_stmt (def2);
2905 gsi_move_to_bb_end (&gsi2, bb0);
2907 if (dump_file && (dump_flags & TDF_DETAILS))
2909 fprintf (dump_file,
2910 "\nHoisting adjacent loads from %d and %d into %d: \n",
2911 bb_for_def1->index, bb_for_def2->index, bb0->index);
2912 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2913 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2918 /* Determine whether we should attempt to hoist adjacent loads out of
2919 diamond patterns in pass_phiopt. Always hoist loads if
2920 -fhoist-adjacent-loads is specified and the target machine has
2921 both a conditional move instruction and a defined cache line size. */
2923 static bool
2924 gate_hoist_loads (void)
2926 return (flag_hoist_adjacent_loads == 1
2927 && param_l1_cache_line_size
2928 && HAVE_conditional_move);
2931 /* This pass tries to replaces an if-then-else block with an
2932 assignment. We have four kinds of transformations. Some of these
2933 transformations are also performed by the ifcvt RTL optimizer.
2935 Conditional Replacement
2936 -----------------------
2938 This transformation, implemented in conditional_replacement,
2939 replaces
2941 bb0:
2942 if (cond) goto bb2; else goto bb1;
2943 bb1:
2944 bb2:
2945 x = PHI <0 (bb1), 1 (bb0), ...>;
2947 with
2949 bb0:
2950 x' = cond;
2951 goto bb2;
2952 bb2:
2953 x = PHI <x' (bb0), ...>;
2955 We remove bb1 as it becomes unreachable. This occurs often due to
2956 gimplification of conditionals.
2958 Value Replacement
2959 -----------------
2961 This transformation, implemented in value_replacement, replaces
2963 bb0:
2964 if (a != b) goto bb2; else goto bb1;
2965 bb1:
2966 bb2:
2967 x = PHI <a (bb1), b (bb0), ...>;
2969 with
2971 bb0:
2972 bb2:
2973 x = PHI <b (bb0), ...>;
2975 This opportunity can sometimes occur as a result of other
2976 optimizations.
2979 Another case caught by value replacement looks like this:
2981 bb0:
2982 t1 = a == CONST;
2983 t2 = b > c;
2984 t3 = t1 & t2;
2985 if (t3 != 0) goto bb1; else goto bb2;
2986 bb1:
2987 bb2:
2988 x = PHI (CONST, a)
2990 Gets replaced with:
2991 bb0:
2992 bb2:
2993 t1 = a == CONST;
2994 t2 = b > c;
2995 t3 = t1 & t2;
2996 x = a;
2998 ABS Replacement
2999 ---------------
3001 This transformation, implemented in abs_replacement, replaces
3003 bb0:
3004 if (a >= 0) goto bb2; else goto bb1;
3005 bb1:
3006 x = -a;
3007 bb2:
3008 x = PHI <x (bb1), a (bb0), ...>;
3010 with
3012 bb0:
3013 x' = ABS_EXPR< a >;
3014 bb2:
3015 x = PHI <x' (bb0), ...>;
3017 MIN/MAX Replacement
3018 -------------------
3020 This transformation, minmax_replacement replaces
3022 bb0:
3023 if (a <= b) goto bb2; else goto bb1;
3024 bb1:
3025 bb2:
3026 x = PHI <b (bb1), a (bb0), ...>;
3028 with
3030 bb0:
3031 x' = MIN_EXPR (a, b)
3032 bb2:
3033 x = PHI <x' (bb0), ...>;
3035 A similar transformation is done for MAX_EXPR.
3038 This pass also performs a fifth transformation of a slightly different
3039 flavor.
3041 Factor conversion in COND_EXPR
3042 ------------------------------
3044 This transformation factors the conversion out of COND_EXPR with
3045 factor_out_conditional_conversion.
3047 For example:
3048 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3049 <bb 3>:
3050 tmp = (int) a;
3051 <bb 4>:
3052 tmp = PHI <tmp, CST>
3054 Into:
3055 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3056 <bb 3>:
3057 <bb 4>:
3058 a = PHI <a, CST>
3059 tmp = (int) a;
3061 Adjacent Load Hoisting
3062 ----------------------
3064 This transformation replaces
3066 bb0:
3067 if (...) goto bb2; else goto bb1;
3068 bb1:
3069 x1 = (<expr>).field1;
3070 goto bb3;
3071 bb2:
3072 x2 = (<expr>).field2;
3073 bb3:
3074 # x = PHI <x1, x2>;
3076 with
3078 bb0:
3079 x1 = (<expr>).field1;
3080 x2 = (<expr>).field2;
3081 if (...) goto bb2; else goto bb1;
3082 bb1:
3083 goto bb3;
3084 bb2:
3085 bb3:
3086 # x = PHI <x1, x2>;
3088 The purpose of this transformation is to enable generation of conditional
3089 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3090 the loads is speculative, the transformation is restricted to very
3091 specific cases to avoid introducing a page fault. We are looking for
3092 the common idiom:
3094 if (...)
3095 x = y->left;
3096 else
3097 x = y->right;
3099 where left and right are typically adjacent pointers in a tree structure. */
3101 namespace {
3103 const pass_data pass_data_phiopt =
3105 GIMPLE_PASS, /* type */
3106 "phiopt", /* name */
3107 OPTGROUP_NONE, /* optinfo_flags */
3108 TV_TREE_PHIOPT, /* tv_id */
3109 ( PROP_cfg | PROP_ssa ), /* properties_required */
3110 0, /* properties_provided */
3111 0, /* properties_destroyed */
3112 0, /* todo_flags_start */
3113 0, /* todo_flags_finish */
3116 class pass_phiopt : public gimple_opt_pass
3118 public:
3119 pass_phiopt (gcc::context *ctxt)
3120 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3123 /* opt_pass methods: */
3124 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3125 void set_pass_param (unsigned n, bool param)
3127 gcc_assert (n == 0);
3128 early_p = param;
3130 virtual bool gate (function *) { return flag_ssa_phiopt; }
3131 virtual unsigned int execute (function *)
3133 return tree_ssa_phiopt_worker (false,
3134 !early_p ? gate_hoist_loads () : false,
3135 early_p);
3138 private:
3139 bool early_p;
3140 }; // class pass_phiopt
3142 } // anon namespace
3144 gimple_opt_pass *
3145 make_pass_phiopt (gcc::context *ctxt)
3147 return new pass_phiopt (ctxt);
3150 namespace {
3152 const pass_data pass_data_cselim =
3154 GIMPLE_PASS, /* type */
3155 "cselim", /* name */
3156 OPTGROUP_NONE, /* optinfo_flags */
3157 TV_TREE_PHIOPT, /* tv_id */
3158 ( PROP_cfg | PROP_ssa ), /* properties_required */
3159 0, /* properties_provided */
3160 0, /* properties_destroyed */
3161 0, /* todo_flags_start */
3162 0, /* todo_flags_finish */
3165 class pass_cselim : public gimple_opt_pass
3167 public:
3168 pass_cselim (gcc::context *ctxt)
3169 : gimple_opt_pass (pass_data_cselim, ctxt)
3172 /* opt_pass methods: */
3173 virtual bool gate (function *) { return flag_tree_cselim; }
3174 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3176 }; // class pass_cselim
3178 } // anon namespace
3180 gimple_opt_pass *
3181 make_pass_cselim (gcc::context *ctxt)
3183 return new pass_cselim (ctxt);