testsuite: Update scanning symbol sections to support AIX.
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobe2758b2bd64468893a7b7d2f0a90fb2a9c2007e5
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
48 #include "tree-eh.h"
49 #include "gimple-fold.h"
50 #include "internal-fn.h"
52 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
53 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
54 tree, tree);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gphi *, tree, tree);
57 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
58 gimple *);
59 static int value_replacement (basic_block, basic_block,
60 edge, edge, gimple *, tree, tree);
61 static bool minmax_replacement (basic_block, basic_block,
62 edge, edge, gimple *, tree, tree);
63 static bool abs_replacement (basic_block, basic_block,
64 edge, edge, gimple *, tree, tree);
65 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
66 edge, edge, gimple *,
67 tree, tree);
68 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
69 hash_set<tree> *);
70 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
71 static hash_set<tree> * get_non_trapping ();
72 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
73 static void hoist_adjacent_loads (basic_block, basic_block,
74 basic_block, basic_block);
75 static bool gate_hoist_loads (void);
77 /* This pass tries to transform conditional stores into unconditional
78 ones, enabling further simplifications with the simpler then and else
79 blocks. In particular it replaces this:
81 bb0:
82 if (cond) goto bb2; else goto bb1;
83 bb1:
84 *p = RHS;
85 bb2:
87 with
89 bb0:
90 if (cond) goto bb1; else goto bb2;
91 bb1:
92 condtmp' = *p;
93 bb2:
94 condtmp = PHI <RHS, condtmp'>
95 *p = condtmp;
97 This transformation can only be done under several constraints,
98 documented below. It also replaces:
100 bb0:
101 if (cond) goto bb2; else goto bb1;
102 bb1:
103 *p = RHS1;
104 goto bb3;
105 bb2:
106 *p = RHS2;
107 bb3:
109 with
111 bb0:
112 if (cond) goto bb3; else goto bb1;
113 bb1:
114 bb3:
115 condtmp = PHI <RHS1, RHS2>
116 *p = condtmp; */
118 static unsigned int
119 tree_ssa_cs_elim (void)
121 unsigned todo;
122 /* ??? We are not interested in loop related info, but the following
123 will create it, ICEing as we didn't init loops with pre-headers.
124 An interfacing issue of find_data_references_in_bb. */
125 loop_optimizer_init (LOOPS_NORMAL);
126 scev_initialize ();
127 todo = tree_ssa_phiopt_worker (true, false, false);
128 scev_finalize ();
129 loop_optimizer_finalize ();
130 return todo;
133 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
135 static gphi *
136 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
138 gimple_stmt_iterator i;
139 gphi *phi = NULL;
140 if (gimple_seq_singleton_p (seq))
141 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
142 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
144 gphi *p = as_a <gphi *> (gsi_stmt (i));
145 /* If the PHI arguments are equal then we can skip this PHI. */
146 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
147 gimple_phi_arg_def (p, e1->dest_idx)))
148 continue;
150 /* If we already have a PHI that has the two edge arguments are
151 different, then return it is not a singleton for these PHIs. */
152 if (phi)
153 return NULL;
155 phi = p;
157 return phi;
160 /* The core routine of conditional store replacement and normal
161 phi optimizations. Both share much of the infrastructure in how
162 to match applicable basic block patterns. DO_STORE_ELIM is true
163 when we want to do conditional store replacement, false otherwise.
164 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
165 of diamond control flow patterns, false otherwise. */
166 static unsigned int
167 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
169 basic_block bb;
170 basic_block *bb_order;
171 unsigned n, i;
172 bool cfgchanged = false;
173 hash_set<tree> *nontrap = 0;
175 if (do_store_elim)
176 /* Calculate the set of non-trapping memory accesses. */
177 nontrap = get_non_trapping ();
179 /* Search every basic block for COND_EXPR we may be able to optimize.
181 We walk the blocks in order that guarantees that a block with
182 a single predecessor is processed before the predecessor.
183 This ensures that we collapse inner ifs before visiting the
184 outer ones, and also that we do not try to visit a removed
185 block. */
186 bb_order = single_pred_before_succ_order ();
187 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
189 for (i = 0; i < n; i++)
191 gimple *cond_stmt;
192 gphi *phi;
193 basic_block bb1, bb2;
194 edge e1, e2;
195 tree arg0, arg1;
197 bb = bb_order[i];
199 cond_stmt = last_stmt (bb);
200 /* Check to see if the last statement is a GIMPLE_COND. */
201 if (!cond_stmt
202 || gimple_code (cond_stmt) != GIMPLE_COND)
203 continue;
205 e1 = EDGE_SUCC (bb, 0);
206 bb1 = e1->dest;
207 e2 = EDGE_SUCC (bb, 1);
208 bb2 = e2->dest;
210 /* We cannot do the optimization on abnormal edges. */
211 if ((e1->flags & EDGE_ABNORMAL) != 0
212 || (e2->flags & EDGE_ABNORMAL) != 0)
213 continue;
215 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
216 if (EDGE_COUNT (bb1->succs) == 0
217 || bb2 == NULL
218 || EDGE_COUNT (bb2->succs) == 0)
219 continue;
221 /* Find the bb which is the fall through to the other. */
222 if (EDGE_SUCC (bb1, 0)->dest == bb2)
224 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
226 std::swap (bb1, bb2);
227 std::swap (e1, e2);
229 else if (do_store_elim
230 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
232 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
234 if (!single_succ_p (bb1)
235 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
236 || !single_succ_p (bb2)
237 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
238 || EDGE_COUNT (bb3->preds) != 2)
239 continue;
240 if (cond_if_else_store_replacement (bb1, bb2, bb3))
241 cfgchanged = true;
242 continue;
244 else if (do_hoist_loads
245 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
247 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
249 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
250 && single_succ_p (bb1)
251 && single_succ_p (bb2)
252 && single_pred_p (bb1)
253 && single_pred_p (bb2)
254 && EDGE_COUNT (bb->succs) == 2
255 && EDGE_COUNT (bb3->preds) == 2
256 /* If one edge or the other is dominant, a conditional move
257 is likely to perform worse than the well-predicted branch. */
258 && !predictable_edge_p (EDGE_SUCC (bb, 0))
259 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
260 hoist_adjacent_loads (bb, bb1, bb2, bb3);
261 continue;
263 else
264 continue;
266 e1 = EDGE_SUCC (bb1, 0);
268 /* Make sure that bb1 is just a fall through. */
269 if (!single_succ_p (bb1)
270 || (e1->flags & EDGE_FALLTHRU) == 0)
271 continue;
273 /* Also make sure that bb1 only have one predecessor and that it
274 is bb. */
275 if (!single_pred_p (bb1)
276 || single_pred (bb1) != bb)
277 continue;
279 if (do_store_elim)
281 /* bb1 is the middle block, bb2 the join block, bb the split block,
282 e1 the fallthrough edge from bb1 to bb2. We can't do the
283 optimization if the join block has more than two predecessors. */
284 if (EDGE_COUNT (bb2->preds) > 2)
285 continue;
286 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
287 cfgchanged = true;
289 else
291 gimple_seq phis = phi_nodes (bb2);
292 gimple_stmt_iterator gsi;
293 bool candorest = true;
295 /* Value replacement can work with more than one PHI
296 so try that first. */
297 if (!early_p)
298 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
300 phi = as_a <gphi *> (gsi_stmt (gsi));
301 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
302 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
303 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
305 candorest = false;
306 cfgchanged = true;
307 break;
311 if (!candorest)
312 continue;
314 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
315 if (!phi)
316 continue;
318 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
319 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
321 /* Something is wrong if we cannot find the arguments in the PHI
322 node. */
323 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
325 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
326 arg0, arg1,
327 cond_stmt);
328 if (newphi != NULL)
330 phi = newphi;
331 /* factor_out_conditional_conversion may create a new PHI in
332 BB2 and eliminate an existing PHI in BB2. Recompute values
333 that may be affected by that change. */
334 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
335 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
336 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
339 /* Do the replacement of conditional if it can be done. */
340 if (two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
341 cfgchanged = true;
342 else if (!early_p
343 && conditional_replacement (bb, bb1, e1, e2, phi,
344 arg0, arg1))
345 cfgchanged = true;
346 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
347 cfgchanged = true;
348 else if (!early_p
349 && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
350 e2, phi, arg0,
351 arg1))
352 cfgchanged = true;
353 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
354 cfgchanged = true;
358 free (bb_order);
360 if (do_store_elim)
361 delete nontrap;
362 /* If the CFG has changed, we should cleanup the CFG. */
363 if (cfgchanged && do_store_elim)
365 /* In cond-store replacement we have added some loads on edges
366 and new VOPS (as we moved the store, and created a load). */
367 gsi_commit_edge_inserts ();
368 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
370 else if (cfgchanged)
371 return TODO_cleanup_cfg;
372 return 0;
375 /* Replace PHI node element whose edge is E in block BB with variable NEW.
376 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
377 is known to have two edges, one of which must reach BB). */
379 static void
380 replace_phi_edge_with_variable (basic_block cond_block,
381 edge e, gimple *phi, tree new_tree)
383 basic_block bb = gimple_bb (phi);
384 basic_block block_to_remove;
385 gimple_stmt_iterator gsi;
387 /* Change the PHI argument to new. */
388 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
390 /* Remove the empty basic block. */
391 if (EDGE_SUCC (cond_block, 0)->dest == bb)
393 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
394 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
395 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
397 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
399 else
401 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
402 EDGE_SUCC (cond_block, 1)->flags
403 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
404 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
406 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
408 delete_basic_block (block_to_remove);
410 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
411 gsi = gsi_last_bb (cond_block);
412 gsi_remove (&gsi, true);
414 if (dump_file && (dump_flags & TDF_DETAILS))
415 fprintf (dump_file,
416 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
417 cond_block->index,
418 bb->index);
421 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
422 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
423 to the result of PHI stmt. COND_STMT is the controlling predicate.
424 Return the newly-created PHI, if any. */
426 static gphi *
427 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
428 tree arg0, tree arg1, gimple *cond_stmt)
430 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
431 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
432 tree temp, result;
433 gphi *newphi;
434 gimple_stmt_iterator gsi, gsi_for_def;
435 location_t locus = gimple_location (phi);
436 enum tree_code convert_code;
438 /* Handle only PHI statements with two arguments. TODO: If all
439 other arguments to PHI are INTEGER_CST or if their defining
440 statement have the same unary operation, we can handle more
441 than two arguments too. */
442 if (gimple_phi_num_args (phi) != 2)
443 return NULL;
445 /* First canonicalize to simplify tests. */
446 if (TREE_CODE (arg0) != SSA_NAME)
448 std::swap (arg0, arg1);
449 std::swap (e0, e1);
452 if (TREE_CODE (arg0) != SSA_NAME
453 || (TREE_CODE (arg1) != SSA_NAME
454 && TREE_CODE (arg1) != INTEGER_CST))
455 return NULL;
457 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
458 a conversion. */
459 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
460 if (!gimple_assign_cast_p (arg0_def_stmt))
461 return NULL;
463 /* Use the RHS as new_arg0. */
464 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
465 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
466 if (convert_code == VIEW_CONVERT_EXPR)
468 new_arg0 = TREE_OPERAND (new_arg0, 0);
469 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
470 return NULL;
473 if (TREE_CODE (arg1) == SSA_NAME)
475 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
476 is a conversion. */
477 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
478 if (!is_gimple_assign (arg1_def_stmt)
479 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
480 return NULL;
482 /* Use the RHS as new_arg1. */
483 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
484 if (convert_code == VIEW_CONVERT_EXPR)
485 new_arg1 = TREE_OPERAND (new_arg1, 0);
487 else
489 /* If arg1 is an INTEGER_CST, fold it to new type. */
490 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
491 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
493 if (gimple_assign_cast_p (arg0_def_stmt))
495 /* For the INTEGER_CST case, we are just moving the
496 conversion from one place to another, which can often
497 hurt as the conversion moves further away from the
498 statement that computes the value. So, perform this
499 only if new_arg0 is an operand of COND_STMT, or
500 if arg0_def_stmt is the only non-debug stmt in
501 its basic block, because then it is possible this
502 could enable further optimizations (minmax replacement
503 etc.). See PR71016. */
504 if (new_arg0 != gimple_cond_lhs (cond_stmt)
505 && new_arg0 != gimple_cond_rhs (cond_stmt)
506 && gimple_bb (arg0_def_stmt) == e0->src)
508 gsi = gsi_for_stmt (arg0_def_stmt);
509 gsi_prev_nondebug (&gsi);
510 if (!gsi_end_p (gsi))
512 if (gassign *assign
513 = dyn_cast <gassign *> (gsi_stmt (gsi)))
515 tree lhs = gimple_assign_lhs (assign);
516 enum tree_code ass_code
517 = gimple_assign_rhs_code (assign);
518 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
519 return NULL;
520 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
521 return NULL;
522 gsi_prev_nondebug (&gsi);
523 if (!gsi_end_p (gsi))
524 return NULL;
526 else
527 return NULL;
529 gsi = gsi_for_stmt (arg0_def_stmt);
530 gsi_next_nondebug (&gsi);
531 if (!gsi_end_p (gsi))
532 return NULL;
534 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
536 else
537 return NULL;
539 else
540 return NULL;
543 /* If arg0/arg1 have > 1 use, then this transformation actually increases
544 the number of expressions evaluated at runtime. */
545 if (!has_single_use (arg0)
546 || (arg1_def_stmt && !has_single_use (arg1)))
547 return NULL;
549 /* If types of new_arg0 and new_arg1 are different bailout. */
550 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
551 return NULL;
553 /* Create a new PHI stmt. */
554 result = PHI_RESULT (phi);
555 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
556 newphi = create_phi_node (temp, gimple_bb (phi));
558 if (dump_file && (dump_flags & TDF_DETAILS))
560 fprintf (dump_file, "PHI ");
561 print_generic_expr (dump_file, gimple_phi_result (phi));
562 fprintf (dump_file,
563 " changed to factor conversion out from COND_EXPR.\n");
564 fprintf (dump_file, "New stmt with CAST that defines ");
565 print_generic_expr (dump_file, result);
566 fprintf (dump_file, ".\n");
569 /* Remove the old cast(s) that has single use. */
570 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
571 gsi_remove (&gsi_for_def, true);
572 release_defs (arg0_def_stmt);
574 if (arg1_def_stmt)
576 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
577 gsi_remove (&gsi_for_def, true);
578 release_defs (arg1_def_stmt);
581 add_phi_arg (newphi, new_arg0, e0, locus);
582 add_phi_arg (newphi, new_arg1, e1, locus);
584 /* Create the conversion stmt and insert it. */
585 if (convert_code == VIEW_CONVERT_EXPR)
587 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
588 new_stmt = gimple_build_assign (result, temp);
590 else
591 new_stmt = gimple_build_assign (result, convert_code, temp);
592 gsi = gsi_after_labels (gimple_bb (phi));
593 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
595 /* Remove the original PHI stmt. */
596 gsi = gsi_for_stmt (phi);
597 gsi_remove (&gsi, true);
598 return newphi;
601 /* Optimize
602 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
603 if (x_5 op cstN) # where op is == or != and N is 1 or 2
604 goto bb3;
605 else
606 goto bb4;
607 bb3:
608 bb4:
609 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
611 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
612 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
613 of cst3 and cst4 is smaller. */
615 static bool
616 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
617 edge e1, gphi *phi, tree arg0, tree arg1)
619 /* Only look for adjacent integer constants. */
620 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
621 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
622 || TREE_CODE (arg0) != INTEGER_CST
623 || TREE_CODE (arg1) != INTEGER_CST
624 || (tree_int_cst_lt (arg0, arg1)
625 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
626 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
627 return false;
629 if (!empty_block_p (middle_bb))
630 return false;
632 gimple *stmt = last_stmt (cond_bb);
633 tree lhs = gimple_cond_lhs (stmt);
634 tree rhs = gimple_cond_rhs (stmt);
636 if (TREE_CODE (lhs) != SSA_NAME
637 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
638 || TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
639 || TREE_CODE (rhs) != INTEGER_CST)
640 return false;
642 switch (gimple_cond_code (stmt))
644 case EQ_EXPR:
645 case NE_EXPR:
646 break;
647 default:
648 return false;
651 wide_int min, max;
652 if (get_range_info (lhs, &min, &max) != VR_RANGE
653 || min + 1 != max
654 || (wi::to_wide (rhs) != min
655 && wi::to_wide (rhs) != max))
656 return false;
658 /* We need to know which is the true edge and which is the false
659 edge so that we know when to invert the condition below. */
660 edge true_edge, false_edge;
661 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
662 if ((gimple_cond_code (stmt) == EQ_EXPR)
663 ^ (wi::to_wide (rhs) == max)
664 ^ (e1 == false_edge))
665 std::swap (arg0, arg1);
667 tree type;
668 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
670 /* Avoid performing the arithmetics in bool type which has different
671 semantics, otherwise prefer unsigned types from the two with
672 the same precision. */
673 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
674 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
675 type = TREE_TYPE (lhs);
676 else
677 type = TREE_TYPE (arg0);
679 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
680 type = TREE_TYPE (lhs);
681 else
682 type = TREE_TYPE (arg0);
684 min = wide_int::from (min, TYPE_PRECISION (type),
685 TYPE_SIGN (TREE_TYPE (lhs)));
686 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
687 TYPE_SIGN (TREE_TYPE (arg0)));
688 enum tree_code code;
689 wi::overflow_type ovf;
690 if (tree_int_cst_lt (arg0, arg1))
692 code = PLUS_EXPR;
693 a -= min;
694 if (!TYPE_UNSIGNED (type))
696 /* lhs is known to be in range [min, min+1] and we want to add a
697 to it. Check if that operation can overflow for those 2 values
698 and if yes, force unsigned type. */
699 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
700 if (ovf)
701 type = unsigned_type_for (type);
704 else
706 code = MINUS_EXPR;
707 a += min;
708 if (!TYPE_UNSIGNED (type))
710 /* lhs is known to be in range [min, min+1] and we want to subtract
711 it from a. Check if that operation can overflow for those 2
712 values and if yes, force unsigned type. */
713 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
714 if (ovf)
715 type = unsigned_type_for (type);
719 tree arg = wide_int_to_tree (type, a);
720 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
721 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
722 lhs = gimplify_build1 (&gsi, NOP_EXPR, type, lhs);
723 tree new_rhs;
724 if (code == PLUS_EXPR)
725 new_rhs = gimplify_build2 (&gsi, PLUS_EXPR, type, lhs, arg);
726 else
727 new_rhs = gimplify_build2 (&gsi, MINUS_EXPR, type, arg, lhs);
728 if (!useless_type_conversion_p (TREE_TYPE (arg0), type))
729 new_rhs = gimplify_build1 (&gsi, NOP_EXPR, TREE_TYPE (arg0), new_rhs);
731 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
733 /* Note that we optimized this PHI. */
734 return true;
737 /* The function conditional_replacement does the main work of doing the
738 conditional replacement. Return true if the replacement is done.
739 Otherwise return false.
740 BB is the basic block where the replacement is going to be done on. ARG0
741 is argument 0 from PHI. Likewise for ARG1. */
743 static bool
744 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
745 edge e0, edge e1, gphi *phi,
746 tree arg0, tree arg1)
748 tree result;
749 gimple *stmt;
750 gassign *new_stmt;
751 tree cond;
752 gimple_stmt_iterator gsi;
753 edge true_edge, false_edge;
754 tree new_var, new_var2;
755 bool neg = false;
756 int shift = 0;
757 tree nonzero_arg;
759 /* FIXME: Gimplification of complex type is too hard for now. */
760 /* We aren't prepared to handle vectors either (and it is a question
761 if it would be worthwhile anyway). */
762 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
763 || POINTER_TYPE_P (TREE_TYPE (arg0)))
764 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
765 || POINTER_TYPE_P (TREE_TYPE (arg1))))
766 return false;
768 /* The PHI arguments have the constants 0 and 1, or 0 and -1 or
769 0 and (1 << cst), then convert it to the conditional. */
770 if (integer_zerop (arg0))
771 nonzero_arg = arg1;
772 else if (integer_zerop (arg1))
773 nonzero_arg = arg0;
774 else
775 return false;
776 if (integer_all_onesp (nonzero_arg))
777 neg = true;
778 else if (integer_pow2p (nonzero_arg))
780 shift = tree_log2 (nonzero_arg);
781 if (shift && POINTER_TYPE_P (TREE_TYPE (nonzero_arg)))
782 return false;
784 else
785 return false;
787 if (!empty_block_p (middle_bb))
788 return false;
790 /* At this point we know we have a GIMPLE_COND with two successors.
791 One successor is BB, the other successor is an empty block which
792 falls through into BB.
794 There is a single PHI node at the join point (BB) and its arguments
795 are constants (0, 1) or (0, -1) or (0, (1 << shift)).
797 So, given the condition COND, and the two PHI arguments, we can
798 rewrite this PHI into non-branching code:
800 dest = (COND) or dest = COND' or dest = (COND) << shift
802 We use the condition as-is if the argument associated with the
803 true edge has the value one or the argument associated with the
804 false edge as the value zero. Note that those conditions are not
805 the same since only one of the outgoing edges from the GIMPLE_COND
806 will directly reach BB and thus be associated with an argument. */
808 stmt = last_stmt (cond_bb);
809 result = PHI_RESULT (phi);
811 /* To handle special cases like floating point comparison, it is easier and
812 less error-prone to build a tree and gimplify it on the fly though it is
813 less efficient. */
814 cond = fold_build2_loc (gimple_location (stmt),
815 gimple_cond_code (stmt), boolean_type_node,
816 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
818 /* We need to know which is the true edge and which is the false
819 edge so that we know when to invert the condition below. */
820 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
821 if ((e0 == true_edge && integer_zerop (arg0))
822 || (e0 == false_edge && !integer_zerop (arg0))
823 || (e1 == true_edge && integer_zerop (arg1))
824 || (e1 == false_edge && !integer_zerop (arg1)))
825 cond = fold_build1_loc (gimple_location (stmt),
826 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
828 if (neg)
830 cond = fold_convert_loc (gimple_location (stmt),
831 TREE_TYPE (result), cond);
832 cond = fold_build1_loc (gimple_location (stmt),
833 NEGATE_EXPR, TREE_TYPE (cond), cond);
835 else if (shift)
837 cond = fold_convert_loc (gimple_location (stmt),
838 TREE_TYPE (result), cond);
839 cond = fold_build2_loc (gimple_location (stmt),
840 LSHIFT_EXPR, TREE_TYPE (cond), cond,
841 build_int_cst (integer_type_node, shift));
844 /* Insert our new statements at the end of conditional block before the
845 COND_STMT. */
846 gsi = gsi_for_stmt (stmt);
847 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
848 GSI_SAME_STMT);
850 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
852 location_t locus_0, locus_1;
854 new_var2 = make_ssa_name (TREE_TYPE (result));
855 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
856 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
857 new_var = new_var2;
859 /* Set the locus to the first argument, unless is doesn't have one. */
860 locus_0 = gimple_phi_arg_location (phi, 0);
861 locus_1 = gimple_phi_arg_location (phi, 1);
862 if (locus_0 == UNKNOWN_LOCATION)
863 locus_0 = locus_1;
864 gimple_set_location (new_stmt, locus_0);
867 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
869 /* Note that we optimized this PHI. */
870 return true;
873 /* Update *ARG which is defined in STMT so that it contains the
874 computed value if that seems profitable. Return true if the
875 statement is made dead by that rewriting. */
877 static bool
878 jump_function_from_stmt (tree *arg, gimple *stmt)
880 enum tree_code code = gimple_assign_rhs_code (stmt);
881 if (code == ADDR_EXPR)
883 /* For arg = &p->i transform it to p, if possible. */
884 tree rhs1 = gimple_assign_rhs1 (stmt);
885 poly_int64 offset;
886 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
887 &offset);
888 if (tem
889 && TREE_CODE (tem) == MEM_REF
890 && known_eq (mem_ref_offset (tem) + offset, 0))
892 *arg = TREE_OPERAND (tem, 0);
893 return true;
896 /* TODO: Much like IPA-CP jump-functions we want to handle constant
897 additions symbolically here, and we'd need to update the comparison
898 code that compares the arg + cst tuples in our caller. For now the
899 code above exactly handles the VEC_BASE pattern from vec.h. */
900 return false;
903 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
904 of the form SSA_NAME NE 0.
906 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
907 the two input values of the EQ_EXPR match arg0 and arg1.
909 If so update *code and return TRUE. Otherwise return FALSE. */
911 static bool
912 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
913 enum tree_code *code, const_tree rhs)
915 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
916 statement. */
917 if (TREE_CODE (rhs) == SSA_NAME)
919 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
921 /* Verify the defining statement has an EQ_EXPR on the RHS. */
922 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
924 /* Finally verify the source operands of the EQ_EXPR are equal
925 to arg0 and arg1. */
926 tree op0 = gimple_assign_rhs1 (def1);
927 tree op1 = gimple_assign_rhs2 (def1);
928 if ((operand_equal_for_phi_arg_p (arg0, op0)
929 && operand_equal_for_phi_arg_p (arg1, op1))
930 || (operand_equal_for_phi_arg_p (arg0, op1)
931 && operand_equal_for_phi_arg_p (arg1, op0)))
933 /* We will perform the optimization. */
934 *code = gimple_assign_rhs_code (def1);
935 return true;
939 return false;
942 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
944 Also return TRUE if arg0/arg1 are equal to the source arguments of a
945 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
947 Return FALSE otherwise. */
949 static bool
950 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
951 enum tree_code *code, gimple *cond)
953 gimple *def;
954 tree lhs = gimple_cond_lhs (cond);
955 tree rhs = gimple_cond_rhs (cond);
957 if ((operand_equal_for_phi_arg_p (arg0, lhs)
958 && operand_equal_for_phi_arg_p (arg1, rhs))
959 || (operand_equal_for_phi_arg_p (arg1, lhs)
960 && operand_equal_for_phi_arg_p (arg0, rhs)))
961 return true;
963 /* Now handle more complex case where we have an EQ comparison
964 which feeds a BIT_AND_EXPR which feeds COND.
966 First verify that COND is of the form SSA_NAME NE 0. */
967 if (*code != NE_EXPR || !integer_zerop (rhs)
968 || TREE_CODE (lhs) != SSA_NAME)
969 return false;
971 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
972 def = SSA_NAME_DEF_STMT (lhs);
973 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
974 return false;
976 /* Now verify arg0/arg1 correspond to the source arguments of an
977 EQ comparison feeding the BIT_AND_EXPR. */
979 tree tmp = gimple_assign_rhs1 (def);
980 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
981 return true;
983 tmp = gimple_assign_rhs2 (def);
984 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
985 return true;
987 return false;
990 /* Returns true if ARG is a neutral element for operation CODE
991 on the RIGHT side. */
993 static bool
994 neutral_element_p (tree_code code, tree arg, bool right)
996 switch (code)
998 case PLUS_EXPR:
999 case BIT_IOR_EXPR:
1000 case BIT_XOR_EXPR:
1001 return integer_zerop (arg);
1003 case LROTATE_EXPR:
1004 case RROTATE_EXPR:
1005 case LSHIFT_EXPR:
1006 case RSHIFT_EXPR:
1007 case MINUS_EXPR:
1008 case POINTER_PLUS_EXPR:
1009 return right && integer_zerop (arg);
1011 case MULT_EXPR:
1012 return integer_onep (arg);
1014 case TRUNC_DIV_EXPR:
1015 case CEIL_DIV_EXPR:
1016 case FLOOR_DIV_EXPR:
1017 case ROUND_DIV_EXPR:
1018 case EXACT_DIV_EXPR:
1019 return right && integer_onep (arg);
1021 case BIT_AND_EXPR:
1022 return integer_all_onesp (arg);
1024 default:
1025 return false;
1029 /* Returns true if ARG is an absorbing element for operation CODE. */
1031 static bool
1032 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1034 switch (code)
1036 case BIT_IOR_EXPR:
1037 return integer_all_onesp (arg);
1039 case MULT_EXPR:
1040 case BIT_AND_EXPR:
1041 return integer_zerop (arg);
1043 case LSHIFT_EXPR:
1044 case RSHIFT_EXPR:
1045 case LROTATE_EXPR:
1046 case RROTATE_EXPR:
1047 return !right && integer_zerop (arg);
1049 case TRUNC_DIV_EXPR:
1050 case CEIL_DIV_EXPR:
1051 case FLOOR_DIV_EXPR:
1052 case ROUND_DIV_EXPR:
1053 case EXACT_DIV_EXPR:
1054 case TRUNC_MOD_EXPR:
1055 case CEIL_MOD_EXPR:
1056 case FLOOR_MOD_EXPR:
1057 case ROUND_MOD_EXPR:
1058 return (!right
1059 && integer_zerop (arg)
1060 && tree_single_nonzero_warnv_p (rval, NULL));
1062 default:
1063 return false;
1067 /* The function value_replacement does the main work of doing the value
1068 replacement. Return non-zero if the replacement is done. Otherwise return
1069 0. If we remove the middle basic block, return 2.
1070 BB is the basic block where the replacement is going to be done on. ARG0
1071 is argument 0 from the PHI. Likewise for ARG1. */
1073 static int
1074 value_replacement (basic_block cond_bb, basic_block middle_bb,
1075 edge e0, edge e1, gimple *phi,
1076 tree arg0, tree arg1)
1078 gimple_stmt_iterator gsi;
1079 gimple *cond;
1080 edge true_edge, false_edge;
1081 enum tree_code code;
1082 bool empty_or_with_defined_p = true;
1084 /* If the type says honor signed zeros we cannot do this
1085 optimization. */
1086 if (HONOR_SIGNED_ZEROS (arg1))
1087 return 0;
1089 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1090 arguments, then adjust arg0 or arg1. */
1091 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1092 while (!gsi_end_p (gsi))
1094 gimple *stmt = gsi_stmt (gsi);
1095 tree lhs;
1096 gsi_next_nondebug (&gsi);
1097 if (!is_gimple_assign (stmt))
1099 if (gimple_code (stmt) != GIMPLE_PREDICT
1100 && gimple_code (stmt) != GIMPLE_NOP)
1101 empty_or_with_defined_p = false;
1102 continue;
1104 /* Now try to adjust arg0 or arg1 according to the computation
1105 in the statement. */
1106 lhs = gimple_assign_lhs (stmt);
1107 if (!(lhs == arg0
1108 && jump_function_from_stmt (&arg0, stmt))
1109 || (lhs == arg1
1110 && jump_function_from_stmt (&arg1, stmt)))
1111 empty_or_with_defined_p = false;
1114 cond = last_stmt (cond_bb);
1115 code = gimple_cond_code (cond);
1117 /* This transformation is only valid for equality comparisons. */
1118 if (code != NE_EXPR && code != EQ_EXPR)
1119 return 0;
1121 /* We need to know which is the true edge and which is the false
1122 edge so that we know if have abs or negative abs. */
1123 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1125 /* At this point we know we have a COND_EXPR with two successors.
1126 One successor is BB, the other successor is an empty block which
1127 falls through into BB.
1129 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1131 There is a single PHI node at the join point (BB) with two arguments.
1133 We now need to verify that the two arguments in the PHI node match
1134 the two arguments to the equality comparison. */
1136 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1138 edge e;
1139 tree arg;
1141 /* For NE_EXPR, we want to build an assignment result = arg where
1142 arg is the PHI argument associated with the true edge. For
1143 EQ_EXPR we want the PHI argument associated with the false edge. */
1144 e = (code == NE_EXPR ? true_edge : false_edge);
1146 /* Unfortunately, E may not reach BB (it may instead have gone to
1147 OTHER_BLOCK). If that is the case, then we want the single outgoing
1148 edge from OTHER_BLOCK which reaches BB and represents the desired
1149 path from COND_BLOCK. */
1150 if (e->dest == middle_bb)
1151 e = single_succ_edge (e->dest);
1153 /* Now we know the incoming edge to BB that has the argument for the
1154 RHS of our new assignment statement. */
1155 if (e0 == e)
1156 arg = arg0;
1157 else
1158 arg = arg1;
1160 /* If the middle basic block was empty or is defining the
1161 PHI arguments and this is a single phi where the args are different
1162 for the edges e0 and e1 then we can remove the middle basic block. */
1163 if (empty_or_with_defined_p
1164 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1165 e0, e1) == phi)
1167 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1168 /* Note that we optimized this PHI. */
1169 return 2;
1171 else
1173 /* Replace the PHI arguments with arg. */
1174 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1175 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1176 if (dump_file && (dump_flags & TDF_DETAILS))
1178 fprintf (dump_file, "PHI ");
1179 print_generic_expr (dump_file, gimple_phi_result (phi));
1180 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1181 cond_bb->index);
1182 print_generic_expr (dump_file, arg);
1183 fprintf (dump_file, ".\n");
1185 return 1;
1190 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1191 gsi = gsi_last_nondebug_bb (middle_bb);
1192 if (gsi_end_p (gsi))
1193 return 0;
1195 gimple *assign = gsi_stmt (gsi);
1196 if (!is_gimple_assign (assign)
1197 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1198 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1199 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1200 return 0;
1202 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1203 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1204 return 0;
1206 /* Allow up to 2 cheap preparation statements that prepare argument
1207 for assign, e.g.:
1208 if (y_4 != 0)
1209 goto <bb 3>;
1210 else
1211 goto <bb 4>;
1212 <bb 3>:
1213 _1 = (int) y_4;
1214 iftmp.0_6 = x_5(D) r<< _1;
1215 <bb 4>:
1216 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1218 if (y_3(D) == 0)
1219 goto <bb 4>;
1220 else
1221 goto <bb 3>;
1222 <bb 3>:
1223 y_4 = y_3(D) & 31;
1224 _1 = (int) y_4;
1225 _6 = x_5(D) r<< _1;
1226 <bb 4>:
1227 # _2 = PHI <x_5(D)(2), _6(3)> */
1228 gimple *prep_stmt[2] = { NULL, NULL };
1229 int prep_cnt;
1230 for (prep_cnt = 0; ; prep_cnt++)
1232 gsi_prev_nondebug (&gsi);
1233 if (gsi_end_p (gsi))
1234 break;
1236 gimple *g = gsi_stmt (gsi);
1237 if (gimple_code (g) == GIMPLE_LABEL)
1238 break;
1240 if (prep_cnt == 2 || !is_gimple_assign (g))
1241 return 0;
1243 tree lhs = gimple_assign_lhs (g);
1244 tree rhs1 = gimple_assign_rhs1 (g);
1245 use_operand_p use_p;
1246 gimple *use_stmt;
1247 if (TREE_CODE (lhs) != SSA_NAME
1248 || TREE_CODE (rhs1) != SSA_NAME
1249 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1250 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1251 || !single_imm_use (lhs, &use_p, &use_stmt)
1252 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1253 return 0;
1254 switch (gimple_assign_rhs_code (g))
1256 CASE_CONVERT:
1257 break;
1258 case PLUS_EXPR:
1259 case BIT_AND_EXPR:
1260 case BIT_IOR_EXPR:
1261 case BIT_XOR_EXPR:
1262 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1263 return 0;
1264 break;
1265 default:
1266 return 0;
1268 prep_stmt[prep_cnt] = g;
1271 /* Only transform if it removes the condition. */
1272 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1273 return 0;
1275 /* Size-wise, this is always profitable. */
1276 if (optimize_bb_for_speed_p (cond_bb)
1277 /* The special case is useless if it has a low probability. */
1278 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1279 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1280 /* If assign is cheap, there is no point avoiding it. */
1281 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1282 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1283 return 0;
1285 tree lhs = gimple_assign_lhs (assign);
1286 tree rhs1 = gimple_assign_rhs1 (assign);
1287 tree rhs2 = gimple_assign_rhs2 (assign);
1288 enum tree_code code_def = gimple_assign_rhs_code (assign);
1289 tree cond_lhs = gimple_cond_lhs (cond);
1290 tree cond_rhs = gimple_cond_rhs (cond);
1292 /* Propagate the cond_rhs constant through preparation stmts,
1293 make sure UB isn't invoked while doing that. */
1294 for (int i = prep_cnt - 1; i >= 0; --i)
1296 gimple *g = prep_stmt[i];
1297 tree grhs1 = gimple_assign_rhs1 (g);
1298 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1299 return 0;
1300 cond_lhs = gimple_assign_lhs (g);
1301 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1302 if (TREE_CODE (cond_rhs) != INTEGER_CST
1303 || TREE_OVERFLOW (cond_rhs))
1304 return 0;
1305 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1307 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1308 gimple_assign_rhs2 (g));
1309 if (TREE_OVERFLOW (cond_rhs))
1310 return 0;
1312 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1313 if (TREE_CODE (cond_rhs) != INTEGER_CST
1314 || TREE_OVERFLOW (cond_rhs))
1315 return 0;
1318 if (((code == NE_EXPR && e1 == false_edge)
1319 || (code == EQ_EXPR && e1 == true_edge))
1320 && arg0 == lhs
1321 && ((arg1 == rhs1
1322 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1323 && neutral_element_p (code_def, cond_rhs, true))
1324 || (arg1 == rhs2
1325 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1326 && neutral_element_p (code_def, cond_rhs, false))
1327 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1328 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1329 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1330 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1331 && absorbing_element_p (code_def,
1332 cond_rhs, false, rhs2))))))
1334 gsi = gsi_for_stmt (cond);
1335 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1336 def-stmt in:
1337 if (n_5 != 0)
1338 goto <bb 3>;
1339 else
1340 goto <bb 4>;
1342 <bb 3>:
1343 # RANGE [0, 4294967294]
1344 u_6 = n_5 + 4294967295;
1346 <bb 4>:
1347 # u_3 = PHI <u_6(3), 4294967295(2)> */
1348 reset_flow_sensitive_info (lhs);
1349 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1351 /* If available, we can use VR of phi result at least. */
1352 tree phires = gimple_phi_result (phi);
1353 struct range_info_def *phires_range_info
1354 = SSA_NAME_RANGE_INFO (phires);
1355 if (phires_range_info)
1356 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1357 phires_range_info);
1359 gimple_stmt_iterator gsi_from;
1360 for (int i = prep_cnt - 1; i >= 0; --i)
1362 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1363 reset_flow_sensitive_info (plhs);
1364 gsi_from = gsi_for_stmt (prep_stmt[i]);
1365 gsi_move_before (&gsi_from, &gsi);
1367 gsi_from = gsi_for_stmt (assign);
1368 gsi_move_before (&gsi_from, &gsi);
1369 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1370 return 2;
1373 return 0;
1376 /* The function minmax_replacement does the main work of doing the minmax
1377 replacement. Return true if the replacement is done. Otherwise return
1378 false.
1379 BB is the basic block where the replacement is going to be done on. ARG0
1380 is argument 0 from the PHI. Likewise for ARG1. */
1382 static bool
1383 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1384 edge e0, edge e1, gimple *phi,
1385 tree arg0, tree arg1)
1387 tree result;
1388 edge true_edge, false_edge;
1389 enum tree_code minmax, ass_code;
1390 tree smaller, larger, arg_true, arg_false;
1391 gimple_stmt_iterator gsi, gsi_from;
1393 tree type = TREE_TYPE (PHI_RESULT (phi));
1395 /* The optimization may be unsafe due to NaNs. */
1396 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1397 return false;
1399 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1400 enum tree_code cmp = gimple_cond_code (cond);
1401 tree rhs = gimple_cond_rhs (cond);
1403 /* Turn EQ/NE of extreme values to order comparisons. */
1404 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1405 && TREE_CODE (rhs) == INTEGER_CST
1406 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1408 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1410 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1411 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1412 wi::min_value (TREE_TYPE (rhs)) + 1);
1414 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1416 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1417 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1418 wi::max_value (TREE_TYPE (rhs)) - 1);
1422 /* This transformation is only valid for order comparisons. Record which
1423 operand is smaller/larger if the result of the comparison is true. */
1424 tree alt_smaller = NULL_TREE;
1425 tree alt_larger = NULL_TREE;
1426 if (cmp == LT_EXPR || cmp == LE_EXPR)
1428 smaller = gimple_cond_lhs (cond);
1429 larger = rhs;
1430 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1431 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1432 if (TREE_CODE (larger) == INTEGER_CST
1433 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1435 if (cmp == LT_EXPR)
1437 wi::overflow_type overflow;
1438 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1439 TYPE_SIGN (TREE_TYPE (larger)),
1440 &overflow);
1441 if (! overflow)
1442 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1444 else
1446 wi::overflow_type overflow;
1447 wide_int alt = wi::add (wi::to_wide (larger), 1,
1448 TYPE_SIGN (TREE_TYPE (larger)),
1449 &overflow);
1450 if (! overflow)
1451 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1455 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1457 smaller = rhs;
1458 larger = gimple_cond_lhs (cond);
1459 /* If we have larger > CST it is equivalent to larger >= CST+1.
1460 Likewise larger >= CST is equivalent to larger > CST-1. */
1461 if (TREE_CODE (smaller) == INTEGER_CST
1462 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1464 wi::overflow_type overflow;
1465 if (cmp == GT_EXPR)
1467 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1468 TYPE_SIGN (TREE_TYPE (smaller)),
1469 &overflow);
1470 if (! overflow)
1471 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1473 else
1475 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1476 TYPE_SIGN (TREE_TYPE (smaller)),
1477 &overflow);
1478 if (! overflow)
1479 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1483 else
1484 return false;
1486 /* Handle the special case of (signed_type)x < 0 being equivalent
1487 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1488 to x <= MAX_VAL(signed_type). */
1489 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1490 && INTEGRAL_TYPE_P (type)
1491 && TYPE_UNSIGNED (type)
1492 && integer_zerop (rhs))
1494 tree op = gimple_cond_lhs (cond);
1495 if (TREE_CODE (op) == SSA_NAME
1496 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1497 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1499 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1500 if (gimple_assign_cast_p (def_stmt))
1502 tree op1 = gimple_assign_rhs1 (def_stmt);
1503 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1504 && TYPE_UNSIGNED (TREE_TYPE (op1))
1505 && (TYPE_PRECISION (TREE_TYPE (op))
1506 == TYPE_PRECISION (TREE_TYPE (op1)))
1507 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1509 wide_int w1 = wi::max_value (TREE_TYPE (op));
1510 wide_int w2 = wi::add (w1, 1);
1511 if (cmp == LT_EXPR)
1513 larger = op1;
1514 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1515 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1516 alt_larger = NULL_TREE;
1518 else
1520 smaller = op1;
1521 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1522 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1523 alt_smaller = NULL_TREE;
1530 /* We need to know which is the true edge and which is the false
1531 edge so that we know if have abs or negative abs. */
1532 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1534 /* Forward the edges over the middle basic block. */
1535 if (true_edge->dest == middle_bb)
1536 true_edge = EDGE_SUCC (true_edge->dest, 0);
1537 if (false_edge->dest == middle_bb)
1538 false_edge = EDGE_SUCC (false_edge->dest, 0);
1540 if (true_edge == e0)
1542 gcc_assert (false_edge == e1);
1543 arg_true = arg0;
1544 arg_false = arg1;
1546 else
1548 gcc_assert (false_edge == e0);
1549 gcc_assert (true_edge == e1);
1550 arg_true = arg1;
1551 arg_false = arg0;
1554 if (empty_block_p (middle_bb))
1556 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1557 || (alt_smaller
1558 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1559 && (operand_equal_for_phi_arg_p (arg_false, larger)
1560 || (alt_larger
1561 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1563 /* Case
1565 if (smaller < larger)
1566 rslt = smaller;
1567 else
1568 rslt = larger; */
1569 minmax = MIN_EXPR;
1571 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1572 || (alt_smaller
1573 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1574 && (operand_equal_for_phi_arg_p (arg_true, larger)
1575 || (alt_larger
1576 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1577 minmax = MAX_EXPR;
1578 else
1579 return false;
1581 else
1583 /* Recognize the following case, assuming d <= u:
1585 if (a <= u)
1586 b = MAX (a, d);
1587 x = PHI <b, u>
1589 This is equivalent to
1591 b = MAX (a, d);
1592 x = MIN (b, u); */
1594 gimple *assign = last_and_only_stmt (middle_bb);
1595 tree lhs, op0, op1, bound;
1597 if (!assign
1598 || gimple_code (assign) != GIMPLE_ASSIGN)
1599 return false;
1601 lhs = gimple_assign_lhs (assign);
1602 ass_code = gimple_assign_rhs_code (assign);
1603 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1604 return false;
1605 op0 = gimple_assign_rhs1 (assign);
1606 op1 = gimple_assign_rhs2 (assign);
1608 if (true_edge->src == middle_bb)
1610 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1611 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1612 return false;
1614 if (operand_equal_for_phi_arg_p (arg_false, larger)
1615 || (alt_larger
1616 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1618 /* Case
1620 if (smaller < larger)
1622 r' = MAX_EXPR (smaller, bound)
1624 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1625 if (ass_code != MAX_EXPR)
1626 return false;
1628 minmax = MIN_EXPR;
1629 if (operand_equal_for_phi_arg_p (op0, smaller)
1630 || (alt_smaller
1631 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1632 bound = op1;
1633 else if (operand_equal_for_phi_arg_p (op1, smaller)
1634 || (alt_smaller
1635 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1636 bound = op0;
1637 else
1638 return false;
1640 /* We need BOUND <= LARGER. */
1641 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1642 bound, larger)))
1643 return false;
1645 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1646 || (alt_smaller
1647 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1649 /* Case
1651 if (smaller < larger)
1653 r' = MIN_EXPR (larger, bound)
1655 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1656 if (ass_code != MIN_EXPR)
1657 return false;
1659 minmax = MAX_EXPR;
1660 if (operand_equal_for_phi_arg_p (op0, larger)
1661 || (alt_larger
1662 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1663 bound = op1;
1664 else if (operand_equal_for_phi_arg_p (op1, larger)
1665 || (alt_larger
1666 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1667 bound = op0;
1668 else
1669 return false;
1671 /* We need BOUND >= SMALLER. */
1672 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1673 bound, smaller)))
1674 return false;
1676 else
1677 return false;
1679 else
1681 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1682 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1683 return false;
1685 if (operand_equal_for_phi_arg_p (arg_true, larger)
1686 || (alt_larger
1687 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1689 /* Case
1691 if (smaller > larger)
1693 r' = MIN_EXPR (smaller, bound)
1695 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1696 if (ass_code != MIN_EXPR)
1697 return false;
1699 minmax = MAX_EXPR;
1700 if (operand_equal_for_phi_arg_p (op0, smaller)
1701 || (alt_smaller
1702 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1703 bound = op1;
1704 else if (operand_equal_for_phi_arg_p (op1, smaller)
1705 || (alt_smaller
1706 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1707 bound = op0;
1708 else
1709 return false;
1711 /* We need BOUND >= LARGER. */
1712 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1713 bound, larger)))
1714 return false;
1716 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1717 || (alt_smaller
1718 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1720 /* Case
1722 if (smaller > larger)
1724 r' = MAX_EXPR (larger, bound)
1726 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1727 if (ass_code != MAX_EXPR)
1728 return false;
1730 minmax = MIN_EXPR;
1731 if (operand_equal_for_phi_arg_p (op0, larger))
1732 bound = op1;
1733 else if (operand_equal_for_phi_arg_p (op1, larger))
1734 bound = op0;
1735 else
1736 return false;
1738 /* We need BOUND <= SMALLER. */
1739 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1740 bound, smaller)))
1741 return false;
1743 else
1744 return false;
1747 /* Move the statement from the middle block. */
1748 gsi = gsi_last_bb (cond_bb);
1749 gsi_from = gsi_last_nondebug_bb (middle_bb);
1750 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1751 SSA_OP_DEF));
1752 gsi_move_before (&gsi_from, &gsi);
1755 /* Emit the statement to compute min/max. */
1756 gimple_seq stmts = NULL;
1757 tree phi_result = PHI_RESULT (phi);
1758 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1759 /* Duplicate range info if we're the only things setting the target PHI. */
1760 if (!gimple_seq_empty_p (stmts)
1761 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
1762 && !POINTER_TYPE_P (TREE_TYPE (phi_result))
1763 && SSA_NAME_RANGE_INFO (phi_result))
1764 duplicate_ssa_name_range_info (result, SSA_NAME_RANGE_TYPE (phi_result),
1765 SSA_NAME_RANGE_INFO (phi_result));
1767 gsi = gsi_last_bb (cond_bb);
1768 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1770 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1772 return true;
1775 /* Convert
1777 <bb 2>
1778 if (b_4(D) != 0)
1779 goto <bb 3>
1780 else
1781 goto <bb 4>
1783 <bb 3>
1784 _2 = (unsigned long) b_4(D);
1785 _9 = __builtin_popcountl (_2);
1787 _9 = __builtin_popcountl (b_4(D));
1789 <bb 4>
1790 c_12 = PHI <0(2), _9(3)>
1792 Into
1793 <bb 2>
1794 _2 = (unsigned long) b_4(D);
1795 _9 = __builtin_popcountl (_2);
1797 _9 = __builtin_popcountl (b_4(D));
1799 <bb 4>
1800 c_12 = PHI <_9(2)>
1802 Similarly for __builtin_clz or __builtin_ctz if
1803 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
1804 instead of 0 above it uses the value from that macro. */
1806 static bool
1807 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb,
1808 basic_block middle_bb,
1809 edge e1, edge e2, gimple *phi,
1810 tree arg0, tree arg1)
1812 gimple *cond;
1813 gimple_stmt_iterator gsi, gsi_from;
1814 gimple *call;
1815 gimple *cast = NULL;
1816 tree lhs, arg;
1818 /* Check that
1819 _2 = (unsigned long) b_4(D);
1820 _9 = __builtin_popcountl (_2);
1822 _9 = __builtin_popcountl (b_4(D));
1823 are the only stmts in the middle_bb. */
1825 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1826 if (gsi_end_p (gsi))
1827 return false;
1828 cast = gsi_stmt (gsi);
1829 gsi_next_nondebug (&gsi);
1830 if (!gsi_end_p (gsi))
1832 call = gsi_stmt (gsi);
1833 gsi_next_nondebug (&gsi);
1834 if (!gsi_end_p (gsi))
1835 return false;
1837 else
1839 call = cast;
1840 cast = NULL;
1843 /* Check that we have a popcount/clz/ctz builtin. */
1844 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
1845 return false;
1847 arg = gimple_call_arg (call, 0);
1848 lhs = gimple_get_lhs (call);
1850 if (lhs == NULL_TREE)
1851 return false;
1853 combined_fn cfn = gimple_call_combined_fn (call);
1854 internal_fn ifn = IFN_LAST;
1855 int val = 0;
1856 switch (cfn)
1858 CASE_CFN_POPCOUNT:
1859 break;
1860 CASE_CFN_CLZ:
1861 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1863 tree type = TREE_TYPE (arg);
1864 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
1865 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1866 val) == 2)
1868 ifn = IFN_CLZ;
1869 break;
1872 return false;
1873 CASE_CFN_CTZ:
1874 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1876 tree type = TREE_TYPE (arg);
1877 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
1878 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1879 val) == 2)
1881 ifn = IFN_CTZ;
1882 break;
1885 return false;
1886 default:
1887 return false;
1890 if (cast)
1892 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
1893 /* Check that we have a cast prior to that. */
1894 if (gimple_code (cast) != GIMPLE_ASSIGN
1895 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1896 return false;
1897 /* Result of the cast stmt is the argument to the builtin. */
1898 if (arg != gimple_assign_lhs (cast))
1899 return false;
1900 arg = gimple_assign_rhs1 (cast);
1903 cond = last_stmt (cond_bb);
1905 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
1906 builtin. */
1907 if (gimple_code (cond) != GIMPLE_COND
1908 || (gimple_cond_code (cond) != NE_EXPR
1909 && gimple_cond_code (cond) != EQ_EXPR)
1910 || !integer_zerop (gimple_cond_rhs (cond))
1911 || arg != gimple_cond_lhs (cond))
1912 return false;
1914 /* Canonicalize. */
1915 if ((e2->flags & EDGE_TRUE_VALUE
1916 && gimple_cond_code (cond) == NE_EXPR)
1917 || (e1->flags & EDGE_TRUE_VALUE
1918 && gimple_cond_code (cond) == EQ_EXPR))
1920 std::swap (arg0, arg1);
1921 std::swap (e1, e2);
1924 /* Check PHI arguments. */
1925 if (lhs != arg0
1926 || TREE_CODE (arg1) != INTEGER_CST
1927 || wi::to_wide (arg1) != val)
1928 return false;
1930 /* And insert the popcount/clz/ctz builtin and cast stmt before the
1931 cond_bb. */
1932 gsi = gsi_last_bb (cond_bb);
1933 if (cast)
1935 gsi_from = gsi_for_stmt (cast);
1936 gsi_move_before (&gsi_from, &gsi);
1937 reset_flow_sensitive_info (gimple_get_lhs (cast));
1939 gsi_from = gsi_for_stmt (call);
1940 if (ifn == IFN_LAST || gimple_call_internal_p (call))
1941 gsi_move_before (&gsi_from, &gsi);
1942 else
1944 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
1945 the latter is well defined at zero. */
1946 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
1947 gimple_call_set_lhs (call, lhs);
1948 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
1949 gsi_remove (&gsi_from, true);
1951 reset_flow_sensitive_info (lhs);
1953 /* Now update the PHI and remove unneeded bbs. */
1954 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1955 return true;
1958 /* The function absolute_replacement does the main work of doing the absolute
1959 replacement. Return true if the replacement is done. Otherwise return
1960 false.
1961 bb is the basic block where the replacement is going to be done on. arg0
1962 is argument 0 from the phi. Likewise for arg1. */
1964 static bool
1965 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1966 edge e0 ATTRIBUTE_UNUSED, edge e1,
1967 gimple *phi, tree arg0, tree arg1)
1969 tree result;
1970 gassign *new_stmt;
1971 gimple *cond;
1972 gimple_stmt_iterator gsi;
1973 edge true_edge, false_edge;
1974 gimple *assign;
1975 edge e;
1976 tree rhs, lhs;
1977 bool negate;
1978 enum tree_code cond_code;
1980 /* If the type says honor signed zeros we cannot do this
1981 optimization. */
1982 if (HONOR_SIGNED_ZEROS (arg1))
1983 return false;
1985 /* OTHER_BLOCK must have only one executable statement which must have the
1986 form arg0 = -arg1 or arg1 = -arg0. */
1988 assign = last_and_only_stmt (middle_bb);
1989 /* If we did not find the proper negation assignment, then we cannot
1990 optimize. */
1991 if (assign == NULL)
1992 return false;
1994 /* If we got here, then we have found the only executable statement
1995 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1996 arg1 = -arg0, then we cannot optimize. */
1997 if (gimple_code (assign) != GIMPLE_ASSIGN)
1998 return false;
2000 lhs = gimple_assign_lhs (assign);
2002 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
2003 return false;
2005 rhs = gimple_assign_rhs1 (assign);
2007 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2008 if (!(lhs == arg0 && rhs == arg1)
2009 && !(lhs == arg1 && rhs == arg0))
2010 return false;
2012 cond = last_stmt (cond_bb);
2013 result = PHI_RESULT (phi);
2015 /* Only relationals comparing arg[01] against zero are interesting. */
2016 cond_code = gimple_cond_code (cond);
2017 if (cond_code != GT_EXPR && cond_code != GE_EXPR
2018 && cond_code != LT_EXPR && cond_code != LE_EXPR)
2019 return false;
2021 /* Make sure the conditional is arg[01] OP y. */
2022 if (gimple_cond_lhs (cond) != rhs)
2023 return false;
2025 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
2026 ? real_zerop (gimple_cond_rhs (cond))
2027 : integer_zerop (gimple_cond_rhs (cond)))
2029 else
2030 return false;
2032 /* We need to know which is the true edge and which is the false
2033 edge so that we know if have abs or negative abs. */
2034 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2036 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2037 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2038 the false edge goes to OTHER_BLOCK. */
2039 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
2040 e = true_edge;
2041 else
2042 e = false_edge;
2044 if (e->dest == middle_bb)
2045 negate = true;
2046 else
2047 negate = false;
2049 /* If the code negates only iff positive then make sure to not
2050 introduce undefined behavior when negating or computing the absolute.
2051 ??? We could use range info if present to check for arg1 == INT_MIN. */
2052 if (negate
2053 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
2054 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
2055 return false;
2057 result = duplicate_ssa_name (result, NULL);
2059 if (negate)
2060 lhs = make_ssa_name (TREE_TYPE (result));
2061 else
2062 lhs = result;
2064 /* Build the modify expression with abs expression. */
2065 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
2067 gsi = gsi_last_bb (cond_bb);
2068 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2070 if (negate)
2072 /* Get the right GSI. We want to insert after the recently
2073 added ABS_EXPR statement (which we know is the first statement
2074 in the block. */
2075 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2077 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2080 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2082 /* Note that we optimized this PHI. */
2083 return true;
2086 /* Auxiliary functions to determine the set of memory accesses which
2087 can't trap because they are preceded by accesses to the same memory
2088 portion. We do that for MEM_REFs, so we only need to track
2089 the SSA_NAME of the pointer indirectly referenced. The algorithm
2090 simply is a walk over all instructions in dominator order. When
2091 we see an MEM_REF we determine if we've already seen a same
2092 ref anywhere up to the root of the dominator tree. If we do the
2093 current access can't trap. If we don't see any dominating access
2094 the current access might trap, but might also make later accesses
2095 non-trapping, so we remember it. We need to be careful with loads
2096 or stores, for instance a load might not trap, while a store would,
2097 so if we see a dominating read access this doesn't mean that a later
2098 write access would not trap. Hence we also need to differentiate the
2099 type of access(es) seen.
2101 ??? We currently are very conservative and assume that a load might
2102 trap even if a store doesn't (write-only memory). This probably is
2103 overly conservative.
2105 We currently support a special case that for !TREE_ADDRESSABLE automatic
2106 variables, it could ignore whether something is a load or store because the
2107 local stack should be always writable. */
2109 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2110 basic block an *_REF through it was seen, which would constitute a
2111 no-trap region for same accesses.
2113 Size is needed to support 2 MEM_REFs of different types, like
2114 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2115 OEP_ADDRESS_OF. */
2116 struct ref_to_bb
2118 tree exp;
2119 HOST_WIDE_INT size;
2120 unsigned int phase;
2121 basic_block bb;
2124 /* Hashtable helpers. */
2126 struct refs_hasher : free_ptr_hash<ref_to_bb>
2128 static inline hashval_t hash (const ref_to_bb *);
2129 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2132 /* Used for quick clearing of the hash-table when we see calls.
2133 Hash entries with phase < nt_call_phase are invalid. */
2134 static unsigned int nt_call_phase;
2136 /* The hash function. */
2138 inline hashval_t
2139 refs_hasher::hash (const ref_to_bb *n)
2141 inchash::hash hstate;
2142 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2143 hstate.add_hwi (n->size);
2144 return hstate.end ();
2147 /* The equality function of *P1 and *P2. */
2149 inline bool
2150 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2152 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2153 && n1->size == n2->size;
2156 class nontrapping_dom_walker : public dom_walker
2158 public:
2159 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2160 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2163 virtual edge before_dom_children (basic_block);
2164 virtual void after_dom_children (basic_block);
2166 private:
2168 /* We see the expression EXP in basic block BB. If it's an interesting
2169 expression (an MEM_REF through an SSA_NAME) possibly insert the
2170 expression into the set NONTRAP or the hash table of seen expressions.
2171 STORE is true if this expression is on the LHS, otherwise it's on
2172 the RHS. */
2173 void add_or_mark_expr (basic_block, tree, bool);
2175 hash_set<tree> *m_nontrapping;
2177 /* The hash table for remembering what we've seen. */
2178 hash_table<refs_hasher> m_seen_refs;
2181 /* Called by walk_dominator_tree, when entering the block BB. */
2182 edge
2183 nontrapping_dom_walker::before_dom_children (basic_block bb)
2185 edge e;
2186 edge_iterator ei;
2187 gimple_stmt_iterator gsi;
2189 /* If we haven't seen all our predecessors, clear the hash-table. */
2190 FOR_EACH_EDGE (e, ei, bb->preds)
2191 if ((((size_t)e->src->aux) & 2) == 0)
2193 nt_call_phase++;
2194 break;
2197 /* Mark this BB as being on the path to dominator root and as visited. */
2198 bb->aux = (void*)(1 | 2);
2200 /* And walk the statements in order. */
2201 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2203 gimple *stmt = gsi_stmt (gsi);
2205 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2206 || (is_gimple_call (stmt)
2207 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2208 nt_call_phase++;
2209 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2211 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2212 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2215 return NULL;
2218 /* Called by walk_dominator_tree, when basic block BB is exited. */
2219 void
2220 nontrapping_dom_walker::after_dom_children (basic_block bb)
2222 /* This BB isn't on the path to dominator root anymore. */
2223 bb->aux = (void*)2;
2226 /* We see the expression EXP in basic block BB. If it's an interesting
2227 expression of:
2228 1) MEM_REF
2229 2) ARRAY_REF
2230 3) COMPONENT_REF
2231 possibly insert the expression into the set NONTRAP or the hash table
2232 of seen expressions. STORE is true if this expression is on the LHS,
2233 otherwise it's on the RHS. */
2234 void
2235 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2237 HOST_WIDE_INT size;
2239 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2240 || TREE_CODE (exp) == COMPONENT_REF)
2241 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2243 struct ref_to_bb map;
2244 ref_to_bb **slot;
2245 struct ref_to_bb *r2bb;
2246 basic_block found_bb = 0;
2248 if (!store)
2250 tree base = get_base_address (exp);
2251 /* Only record a LOAD of a local variable without address-taken, as
2252 the local stack is always writable. This allows cselim on a STORE
2253 with a dominating LOAD. */
2254 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2255 return;
2258 /* Try to find the last seen *_REF, which can trap. */
2259 map.exp = exp;
2260 map.size = size;
2261 slot = m_seen_refs.find_slot (&map, INSERT);
2262 r2bb = *slot;
2263 if (r2bb && r2bb->phase >= nt_call_phase)
2264 found_bb = r2bb->bb;
2266 /* If we've found a trapping *_REF, _and_ it dominates EXP
2267 (it's in a basic block on the path from us to the dominator root)
2268 then we can't trap. */
2269 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2271 m_nontrapping->add (exp);
2273 else
2275 /* EXP might trap, so insert it into the hash table. */
2276 if (r2bb)
2278 r2bb->phase = nt_call_phase;
2279 r2bb->bb = bb;
2281 else
2283 r2bb = XNEW (struct ref_to_bb);
2284 r2bb->phase = nt_call_phase;
2285 r2bb->bb = bb;
2286 r2bb->exp = exp;
2287 r2bb->size = size;
2288 *slot = r2bb;
2294 /* This is the entry point of gathering non trapping memory accesses.
2295 It will do a dominator walk over the whole function, and it will
2296 make use of the bb->aux pointers. It returns a set of trees
2297 (the MEM_REFs itself) which can't trap. */
2298 static hash_set<tree> *
2299 get_non_trapping (void)
2301 nt_call_phase = 0;
2302 hash_set<tree> *nontrap = new hash_set<tree>;
2303 /* We're going to do a dominator walk, so ensure that we have
2304 dominance information. */
2305 calculate_dominance_info (CDI_DOMINATORS);
2307 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2308 .walk (cfun->cfg->x_entry_block_ptr);
2310 clear_aux_for_blocks ();
2311 return nontrap;
2314 /* Do the main work of conditional store replacement. We already know
2315 that the recognized pattern looks like so:
2317 split:
2318 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2319 MIDDLE_BB:
2320 something
2321 fallthrough (edge E0)
2322 JOIN_BB:
2323 some more
2325 We check that MIDDLE_BB contains only one store, that that store
2326 doesn't trap (not via NOTRAP, but via checking if an access to the same
2327 memory location dominates us, or the store is to a local addressable
2328 object) and that the store has a "simple" RHS. */
2330 static bool
2331 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2332 edge e0, edge e1, hash_set<tree> *nontrap)
2334 gimple *assign = last_and_only_stmt (middle_bb);
2335 tree lhs, rhs, name, name2;
2336 gphi *newphi;
2337 gassign *new_stmt;
2338 gimple_stmt_iterator gsi;
2339 location_t locus;
2341 /* Check if middle_bb contains of only one store. */
2342 if (!assign
2343 || !gimple_assign_single_p (assign)
2344 || gimple_has_volatile_ops (assign))
2345 return false;
2347 /* And no PHI nodes so all uses in the single stmt are also
2348 available where we insert to. */
2349 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2350 return false;
2352 locus = gimple_location (assign);
2353 lhs = gimple_assign_lhs (assign);
2354 rhs = gimple_assign_rhs1 (assign);
2355 if ((TREE_CODE (lhs) != MEM_REF
2356 && TREE_CODE (lhs) != ARRAY_REF
2357 && TREE_CODE (lhs) != COMPONENT_REF)
2358 || !is_gimple_reg_type (TREE_TYPE (lhs)))
2359 return false;
2361 /* Prove that we can move the store down. We could also check
2362 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2363 whose value is not available readily, which we want to avoid. */
2364 if (!nontrap->contains (lhs))
2366 /* If LHS is an access to a local variable without address-taken
2367 (or when we allow data races) and known not to trap, we could
2368 always safely move down the store. */
2369 tree base = get_base_address (lhs);
2370 if (!auto_var_p (base)
2371 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
2372 || tree_could_trap_p (lhs))
2373 return false;
2376 /* Now we've checked the constraints, so do the transformation:
2377 1) Remove the single store. */
2378 gsi = gsi_for_stmt (assign);
2379 unlink_stmt_vdef (assign);
2380 gsi_remove (&gsi, true);
2381 release_defs (assign);
2383 /* Make both store and load use alias-set zero as we have to
2384 deal with the case of the store being a conditional change
2385 of the dynamic type. */
2386 lhs = unshare_expr (lhs);
2387 tree *basep = &lhs;
2388 while (handled_component_p (*basep))
2389 basep = &TREE_OPERAND (*basep, 0);
2390 if (TREE_CODE (*basep) == MEM_REF
2391 || TREE_CODE (*basep) == TARGET_MEM_REF)
2392 TREE_OPERAND (*basep, 1)
2393 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2394 else
2395 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2396 build_fold_addr_expr (*basep),
2397 build_zero_cst (ptr_type_node));
2399 /* 2) Insert a load from the memory of the store to the temporary
2400 on the edge which did not contain the store. */
2401 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2402 new_stmt = gimple_build_assign (name, lhs);
2403 gimple_set_location (new_stmt, locus);
2404 lhs = unshare_expr (lhs);
2405 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2406 warnings. */
2407 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt)) = 1;
2408 gsi_insert_on_edge (e1, new_stmt);
2410 /* 3) Create a PHI node at the join block, with one argument
2411 holding the old RHS, and the other holding the temporary
2412 where we stored the old memory contents. */
2413 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2414 newphi = create_phi_node (name2, join_bb);
2415 add_phi_arg (newphi, rhs, e0, locus);
2416 add_phi_arg (newphi, name, e1, locus);
2418 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2420 /* 4) Insert that PHI node. */
2421 gsi = gsi_after_labels (join_bb);
2422 if (gsi_end_p (gsi))
2424 gsi = gsi_last_bb (join_bb);
2425 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2427 else
2428 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2430 if (dump_file && (dump_flags & TDF_DETAILS))
2432 fprintf (dump_file, "\nConditional store replacement happened!");
2433 fprintf (dump_file, "\nReplaced the store with a load.");
2434 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
2435 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
2438 return true;
2441 /* Do the main work of conditional store replacement. */
2443 static bool
2444 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
2445 basic_block join_bb, gimple *then_assign,
2446 gimple *else_assign)
2448 tree lhs_base, lhs, then_rhs, else_rhs, name;
2449 location_t then_locus, else_locus;
2450 gimple_stmt_iterator gsi;
2451 gphi *newphi;
2452 gassign *new_stmt;
2454 if (then_assign == NULL
2455 || !gimple_assign_single_p (then_assign)
2456 || gimple_clobber_p (then_assign)
2457 || gimple_has_volatile_ops (then_assign)
2458 || else_assign == NULL
2459 || !gimple_assign_single_p (else_assign)
2460 || gimple_clobber_p (else_assign)
2461 || gimple_has_volatile_ops (else_assign))
2462 return false;
2464 lhs = gimple_assign_lhs (then_assign);
2465 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2466 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2467 return false;
2469 lhs_base = get_base_address (lhs);
2470 if (lhs_base == NULL_TREE
2471 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2472 return false;
2474 then_rhs = gimple_assign_rhs1 (then_assign);
2475 else_rhs = gimple_assign_rhs1 (else_assign);
2476 then_locus = gimple_location (then_assign);
2477 else_locus = gimple_location (else_assign);
2479 /* Now we've checked the constraints, so do the transformation:
2480 1) Remove the stores. */
2481 gsi = gsi_for_stmt (then_assign);
2482 unlink_stmt_vdef (then_assign);
2483 gsi_remove (&gsi, true);
2484 release_defs (then_assign);
2486 gsi = gsi_for_stmt (else_assign);
2487 unlink_stmt_vdef (else_assign);
2488 gsi_remove (&gsi, true);
2489 release_defs (else_assign);
2491 /* 2) Create a PHI node at the join block, with one argument
2492 holding the old RHS, and the other holding the temporary
2493 where we stored the old memory contents. */
2494 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2495 newphi = create_phi_node (name, join_bb);
2496 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2497 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2499 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2501 /* 3) Insert that PHI node. */
2502 gsi = gsi_after_labels (join_bb);
2503 if (gsi_end_p (gsi))
2505 gsi = gsi_last_bb (join_bb);
2506 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2508 else
2509 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2511 return true;
2514 /* Return the single store in BB with VDEF or NULL if there are
2515 other stores in the BB or loads following the store. */
2517 static gimple *
2518 single_trailing_store_in_bb (basic_block bb, tree vdef)
2520 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2521 return NULL;
2522 gimple *store = SSA_NAME_DEF_STMT (vdef);
2523 if (gimple_bb (store) != bb
2524 || gimple_code (store) == GIMPLE_PHI)
2525 return NULL;
2527 /* Verify there is no other store in this BB. */
2528 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2529 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2530 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2531 return NULL;
2533 /* Verify there is no load or store after the store. */
2534 use_operand_p use_p;
2535 imm_use_iterator imm_iter;
2536 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2537 if (USE_STMT (use_p) != store
2538 && gimple_bb (USE_STMT (use_p)) == bb)
2539 return NULL;
2541 return store;
2544 /* Conditional store replacement. We already know
2545 that the recognized pattern looks like so:
2547 split:
2548 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2549 THEN_BB:
2551 X = Y;
2553 goto JOIN_BB;
2554 ELSE_BB:
2556 X = Z;
2558 fallthrough (edge E0)
2559 JOIN_BB:
2560 some more
2562 We check that it is safe to sink the store to JOIN_BB by verifying that
2563 there are no read-after-write or write-after-write dependencies in
2564 THEN_BB and ELSE_BB. */
2566 static bool
2567 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2568 basic_block join_bb)
2570 vec<data_reference_p> then_datarefs, else_datarefs;
2571 vec<ddr_p> then_ddrs, else_ddrs;
2572 gimple *then_store, *else_store;
2573 bool found, ok = false, res;
2574 struct data_dependence_relation *ddr;
2575 data_reference_p then_dr, else_dr;
2576 int i, j;
2577 tree then_lhs, else_lhs;
2578 basic_block blocks[3];
2580 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2581 cheap enough to always handle as it allows us to elide dependence
2582 checking. */
2583 gphi *vphi = NULL;
2584 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2585 gsi_next (&si))
2586 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2588 vphi = si.phi ();
2589 break;
2591 if (!vphi)
2592 return false;
2593 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2594 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2595 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2596 if (then_assign)
2598 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2599 if (else_assign)
2600 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2601 then_assign, else_assign);
2604 /* If either vectorization or if-conversion is disabled then do
2605 not sink any stores. */
2606 if (param_max_stores_to_sink == 0
2607 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
2608 || !flag_tree_loop_if_convert)
2609 return false;
2611 /* Find data references. */
2612 then_datarefs.create (1);
2613 else_datarefs.create (1);
2614 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2615 == chrec_dont_know)
2616 || !then_datarefs.length ()
2617 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2618 == chrec_dont_know)
2619 || !else_datarefs.length ())
2621 free_data_refs (then_datarefs);
2622 free_data_refs (else_datarefs);
2623 return false;
2626 /* Find pairs of stores with equal LHS. */
2627 auto_vec<gimple *, 1> then_stores, else_stores;
2628 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2630 if (DR_IS_READ (then_dr))
2631 continue;
2633 then_store = DR_STMT (then_dr);
2634 then_lhs = gimple_get_lhs (then_store);
2635 if (then_lhs == NULL_TREE)
2636 continue;
2637 found = false;
2639 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2641 if (DR_IS_READ (else_dr))
2642 continue;
2644 else_store = DR_STMT (else_dr);
2645 else_lhs = gimple_get_lhs (else_store);
2646 if (else_lhs == NULL_TREE)
2647 continue;
2649 if (operand_equal_p (then_lhs, else_lhs, 0))
2651 found = true;
2652 break;
2656 if (!found)
2657 continue;
2659 then_stores.safe_push (then_store);
2660 else_stores.safe_push (else_store);
2663 /* No pairs of stores found. */
2664 if (!then_stores.length ()
2665 || then_stores.length () > (unsigned) param_max_stores_to_sink)
2667 free_data_refs (then_datarefs);
2668 free_data_refs (else_datarefs);
2669 return false;
2672 /* Compute and check data dependencies in both basic blocks. */
2673 then_ddrs.create (1);
2674 else_ddrs.create (1);
2675 if (!compute_all_dependences (then_datarefs, &then_ddrs,
2676 vNULL, false)
2677 || !compute_all_dependences (else_datarefs, &else_ddrs,
2678 vNULL, false))
2680 free_dependence_relations (then_ddrs);
2681 free_dependence_relations (else_ddrs);
2682 free_data_refs (then_datarefs);
2683 free_data_refs (else_datarefs);
2684 return false;
2686 blocks[0] = then_bb;
2687 blocks[1] = else_bb;
2688 blocks[2] = join_bb;
2689 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2691 /* Check that there are no read-after-write or write-after-write dependencies
2692 in THEN_BB. */
2693 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2695 struct data_reference *dra = DDR_A (ddr);
2696 struct data_reference *drb = DDR_B (ddr);
2698 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2699 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2700 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2701 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2702 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2703 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2705 free_dependence_relations (then_ddrs);
2706 free_dependence_relations (else_ddrs);
2707 free_data_refs (then_datarefs);
2708 free_data_refs (else_datarefs);
2709 return false;
2713 /* Check that there are no read-after-write or write-after-write dependencies
2714 in ELSE_BB. */
2715 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2717 struct data_reference *dra = DDR_A (ddr);
2718 struct data_reference *drb = DDR_B (ddr);
2720 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2721 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2722 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2723 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2724 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2725 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2727 free_dependence_relations (then_ddrs);
2728 free_dependence_relations (else_ddrs);
2729 free_data_refs (then_datarefs);
2730 free_data_refs (else_datarefs);
2731 return false;
2735 /* Sink stores with same LHS. */
2736 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2738 else_store = else_stores[i];
2739 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2740 then_store, else_store);
2741 ok = ok || res;
2744 free_dependence_relations (then_ddrs);
2745 free_dependence_relations (else_ddrs);
2746 free_data_refs (then_datarefs);
2747 free_data_refs (else_datarefs);
2749 return ok;
2752 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2754 static bool
2755 local_mem_dependence (gimple *stmt, basic_block bb)
2757 tree vuse = gimple_vuse (stmt);
2758 gimple *def;
2760 if (!vuse)
2761 return false;
2763 def = SSA_NAME_DEF_STMT (vuse);
2764 return (def && gimple_bb (def) == bb);
2767 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2768 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2769 and BB3 rejoins control flow following BB1 and BB2, look for
2770 opportunities to hoist loads as follows. If BB3 contains a PHI of
2771 two loads, one each occurring in BB1 and BB2, and the loads are
2772 provably of adjacent fields in the same structure, then move both
2773 loads into BB0. Of course this can only be done if there are no
2774 dependencies preventing such motion.
2776 One of the hoisted loads will always be speculative, so the
2777 transformation is currently conservative:
2779 - The fields must be strictly adjacent.
2780 - The two fields must occupy a single memory block that is
2781 guaranteed to not cross a page boundary.
2783 The last is difficult to prove, as such memory blocks should be
2784 aligned on the minimum of the stack alignment boundary and the
2785 alignment guaranteed by heap allocation interfaces. Thus we rely
2786 on a parameter for the alignment value.
2788 Provided a good value is used for the last case, the first
2789 restriction could possibly be relaxed. */
2791 static void
2792 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2793 basic_block bb2, basic_block bb3)
2795 int param_align = param_l1_cache_line_size;
2796 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2797 gphi_iterator gsi;
2799 /* Walk the phis in bb3 looking for an opportunity. We are looking
2800 for phis of two SSA names, one each of which is defined in bb1 and
2801 bb2. */
2802 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2804 gphi *phi_stmt = gsi.phi ();
2805 gimple *def1, *def2;
2806 tree arg1, arg2, ref1, ref2, field1, field2;
2807 tree tree_offset1, tree_offset2, tree_size2, next;
2808 int offset1, offset2, size2;
2809 unsigned align1;
2810 gimple_stmt_iterator gsi2;
2811 basic_block bb_for_def1, bb_for_def2;
2813 if (gimple_phi_num_args (phi_stmt) != 2
2814 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2815 continue;
2817 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2818 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2820 if (TREE_CODE (arg1) != SSA_NAME
2821 || TREE_CODE (arg2) != SSA_NAME
2822 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2823 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2824 continue;
2826 def1 = SSA_NAME_DEF_STMT (arg1);
2827 def2 = SSA_NAME_DEF_STMT (arg2);
2829 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2830 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2831 continue;
2833 /* Check the mode of the arguments to be sure a conditional move
2834 can be generated for it. */
2835 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2836 == CODE_FOR_nothing)
2837 continue;
2839 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2840 if (!gimple_assign_single_p (def1)
2841 || !gimple_assign_single_p (def2)
2842 || gimple_has_volatile_ops (def1)
2843 || gimple_has_volatile_ops (def2))
2844 continue;
2846 ref1 = gimple_assign_rhs1 (def1);
2847 ref2 = gimple_assign_rhs1 (def2);
2849 if (TREE_CODE (ref1) != COMPONENT_REF
2850 || TREE_CODE (ref2) != COMPONENT_REF)
2851 continue;
2853 /* The zeroth operand of the two component references must be
2854 identical. It is not sufficient to compare get_base_address of
2855 the two references, because this could allow for different
2856 elements of the same array in the two trees. It is not safe to
2857 assume that the existence of one array element implies the
2858 existence of a different one. */
2859 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2860 continue;
2862 field1 = TREE_OPERAND (ref1, 1);
2863 field2 = TREE_OPERAND (ref2, 1);
2865 /* Check for field adjacency, and ensure field1 comes first. */
2866 for (next = DECL_CHAIN (field1);
2867 next && TREE_CODE (next) != FIELD_DECL;
2868 next = DECL_CHAIN (next))
2871 if (next != field2)
2873 for (next = DECL_CHAIN (field2);
2874 next && TREE_CODE (next) != FIELD_DECL;
2875 next = DECL_CHAIN (next))
2878 if (next != field1)
2879 continue;
2881 std::swap (field1, field2);
2882 std::swap (def1, def2);
2885 bb_for_def1 = gimple_bb (def1);
2886 bb_for_def2 = gimple_bb (def2);
2888 /* Check for proper alignment of the first field. */
2889 tree_offset1 = bit_position (field1);
2890 tree_offset2 = bit_position (field2);
2891 tree_size2 = DECL_SIZE (field2);
2893 if (!tree_fits_uhwi_p (tree_offset1)
2894 || !tree_fits_uhwi_p (tree_offset2)
2895 || !tree_fits_uhwi_p (tree_size2))
2896 continue;
2898 offset1 = tree_to_uhwi (tree_offset1);
2899 offset2 = tree_to_uhwi (tree_offset2);
2900 size2 = tree_to_uhwi (tree_size2);
2901 align1 = DECL_ALIGN (field1) % param_align_bits;
2903 if (offset1 % BITS_PER_UNIT != 0)
2904 continue;
2906 /* For profitability, the two field references should fit within
2907 a single cache line. */
2908 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2909 continue;
2911 /* The two expressions cannot be dependent upon vdefs defined
2912 in bb1/bb2. */
2913 if (local_mem_dependence (def1, bb_for_def1)
2914 || local_mem_dependence (def2, bb_for_def2))
2915 continue;
2917 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2918 bb0. We hoist the first one first so that a cache miss is handled
2919 efficiently regardless of hardware cache-fill policy. */
2920 gsi2 = gsi_for_stmt (def1);
2921 gsi_move_to_bb_end (&gsi2, bb0);
2922 gsi2 = gsi_for_stmt (def2);
2923 gsi_move_to_bb_end (&gsi2, bb0);
2925 if (dump_file && (dump_flags & TDF_DETAILS))
2927 fprintf (dump_file,
2928 "\nHoisting adjacent loads from %d and %d into %d: \n",
2929 bb_for_def1->index, bb_for_def2->index, bb0->index);
2930 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2931 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2936 /* Determine whether we should attempt to hoist adjacent loads out of
2937 diamond patterns in pass_phiopt. Always hoist loads if
2938 -fhoist-adjacent-loads is specified and the target machine has
2939 both a conditional move instruction and a defined cache line size. */
2941 static bool
2942 gate_hoist_loads (void)
2944 return (flag_hoist_adjacent_loads == 1
2945 && param_l1_cache_line_size
2946 && HAVE_conditional_move);
2949 /* This pass tries to replaces an if-then-else block with an
2950 assignment. We have four kinds of transformations. Some of these
2951 transformations are also performed by the ifcvt RTL optimizer.
2953 Conditional Replacement
2954 -----------------------
2956 This transformation, implemented in conditional_replacement,
2957 replaces
2959 bb0:
2960 if (cond) goto bb2; else goto bb1;
2961 bb1:
2962 bb2:
2963 x = PHI <0 (bb1), 1 (bb0), ...>;
2965 with
2967 bb0:
2968 x' = cond;
2969 goto bb2;
2970 bb2:
2971 x = PHI <x' (bb0), ...>;
2973 We remove bb1 as it becomes unreachable. This occurs often due to
2974 gimplification of conditionals.
2976 Value Replacement
2977 -----------------
2979 This transformation, implemented in value_replacement, replaces
2981 bb0:
2982 if (a != b) goto bb2; else goto bb1;
2983 bb1:
2984 bb2:
2985 x = PHI <a (bb1), b (bb0), ...>;
2987 with
2989 bb0:
2990 bb2:
2991 x = PHI <b (bb0), ...>;
2993 This opportunity can sometimes occur as a result of other
2994 optimizations.
2997 Another case caught by value replacement looks like this:
2999 bb0:
3000 t1 = a == CONST;
3001 t2 = b > c;
3002 t3 = t1 & t2;
3003 if (t3 != 0) goto bb1; else goto bb2;
3004 bb1:
3005 bb2:
3006 x = PHI (CONST, a)
3008 Gets replaced with:
3009 bb0:
3010 bb2:
3011 t1 = a == CONST;
3012 t2 = b > c;
3013 t3 = t1 & t2;
3014 x = a;
3016 ABS Replacement
3017 ---------------
3019 This transformation, implemented in abs_replacement, replaces
3021 bb0:
3022 if (a >= 0) goto bb2; else goto bb1;
3023 bb1:
3024 x = -a;
3025 bb2:
3026 x = PHI <x (bb1), a (bb0), ...>;
3028 with
3030 bb0:
3031 x' = ABS_EXPR< a >;
3032 bb2:
3033 x = PHI <x' (bb0), ...>;
3035 MIN/MAX Replacement
3036 -------------------
3038 This transformation, minmax_replacement replaces
3040 bb0:
3041 if (a <= b) goto bb2; else goto bb1;
3042 bb1:
3043 bb2:
3044 x = PHI <b (bb1), a (bb0), ...>;
3046 with
3048 bb0:
3049 x' = MIN_EXPR (a, b)
3050 bb2:
3051 x = PHI <x' (bb0), ...>;
3053 A similar transformation is done for MAX_EXPR.
3056 This pass also performs a fifth transformation of a slightly different
3057 flavor.
3059 Factor conversion in COND_EXPR
3060 ------------------------------
3062 This transformation factors the conversion out of COND_EXPR with
3063 factor_out_conditional_conversion.
3065 For example:
3066 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3067 <bb 3>:
3068 tmp = (int) a;
3069 <bb 4>:
3070 tmp = PHI <tmp, CST>
3072 Into:
3073 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3074 <bb 3>:
3075 <bb 4>:
3076 a = PHI <a, CST>
3077 tmp = (int) a;
3079 Adjacent Load Hoisting
3080 ----------------------
3082 This transformation replaces
3084 bb0:
3085 if (...) goto bb2; else goto bb1;
3086 bb1:
3087 x1 = (<expr>).field1;
3088 goto bb3;
3089 bb2:
3090 x2 = (<expr>).field2;
3091 bb3:
3092 # x = PHI <x1, x2>;
3094 with
3096 bb0:
3097 x1 = (<expr>).field1;
3098 x2 = (<expr>).field2;
3099 if (...) goto bb2; else goto bb1;
3100 bb1:
3101 goto bb3;
3102 bb2:
3103 bb3:
3104 # x = PHI <x1, x2>;
3106 The purpose of this transformation is to enable generation of conditional
3107 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3108 the loads is speculative, the transformation is restricted to very
3109 specific cases to avoid introducing a page fault. We are looking for
3110 the common idiom:
3112 if (...)
3113 x = y->left;
3114 else
3115 x = y->right;
3117 where left and right are typically adjacent pointers in a tree structure. */
3119 namespace {
3121 const pass_data pass_data_phiopt =
3123 GIMPLE_PASS, /* type */
3124 "phiopt", /* name */
3125 OPTGROUP_NONE, /* optinfo_flags */
3126 TV_TREE_PHIOPT, /* tv_id */
3127 ( PROP_cfg | PROP_ssa ), /* properties_required */
3128 0, /* properties_provided */
3129 0, /* properties_destroyed */
3130 0, /* todo_flags_start */
3131 0, /* todo_flags_finish */
3134 class pass_phiopt : public gimple_opt_pass
3136 public:
3137 pass_phiopt (gcc::context *ctxt)
3138 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3141 /* opt_pass methods: */
3142 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3143 void set_pass_param (unsigned n, bool param)
3145 gcc_assert (n == 0);
3146 early_p = param;
3148 virtual bool gate (function *) { return flag_ssa_phiopt; }
3149 virtual unsigned int execute (function *)
3151 return tree_ssa_phiopt_worker (false,
3152 !early_p ? gate_hoist_loads () : false,
3153 early_p);
3156 private:
3157 bool early_p;
3158 }; // class pass_phiopt
3160 } // anon namespace
3162 gimple_opt_pass *
3163 make_pass_phiopt (gcc::context *ctxt)
3165 return new pass_phiopt (ctxt);
3168 namespace {
3170 const pass_data pass_data_cselim =
3172 GIMPLE_PASS, /* type */
3173 "cselim", /* name */
3174 OPTGROUP_NONE, /* optinfo_flags */
3175 TV_TREE_PHIOPT, /* tv_id */
3176 ( PROP_cfg | PROP_ssa ), /* properties_required */
3177 0, /* properties_provided */
3178 0, /* properties_destroyed */
3179 0, /* todo_flags_start */
3180 0, /* todo_flags_finish */
3183 class pass_cselim : public gimple_opt_pass
3185 public:
3186 pass_cselim (gcc::context *ctxt)
3187 : gimple_opt_pass (pass_data_cselim, ctxt)
3190 /* opt_pass methods: */
3191 virtual bool gate (function *) { return flag_tree_cselim; }
3192 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3194 }; // class pass_cselim
3196 } // anon namespace
3198 gimple_opt_pass *
3199 make_pass_cselim (gcc::context *ctxt)
3201 return new pass_cselim (ctxt);