Add emergency dump after an ICE
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobb97f863aaf350b9e2a1ccbac9334f327380a2079
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
48 #include "tree-eh.h"
49 #include "gimple-fold.h"
51 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
52 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
53 tree, tree);
54 static bool conditional_replacement (basic_block, basic_block,
55 edge, edge, gphi *, tree, tree);
56 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
57 gimple *);
58 static int value_replacement (basic_block, basic_block,
59 edge, edge, gimple *, tree, tree);
60 static bool minmax_replacement (basic_block, basic_block,
61 edge, edge, gimple *, tree, tree);
62 static bool abs_replacement (basic_block, basic_block,
63 edge, edge, gimple *, tree, tree);
64 static bool cond_removal_in_popcount_pattern (basic_block, basic_block,
65 edge, edge, gimple *, tree, tree);
66 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
67 hash_set<tree> *);
68 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
69 static hash_set<tree> * get_non_trapping ();
70 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
71 static void hoist_adjacent_loads (basic_block, basic_block,
72 basic_block, basic_block);
73 static bool gate_hoist_loads (void);
75 /* This pass tries to transform conditional stores into unconditional
76 ones, enabling further simplifications with the simpler then and else
77 blocks. In particular it replaces this:
79 bb0:
80 if (cond) goto bb2; else goto bb1;
81 bb1:
82 *p = RHS;
83 bb2:
85 with
87 bb0:
88 if (cond) goto bb1; else goto bb2;
89 bb1:
90 condtmp' = *p;
91 bb2:
92 condtmp = PHI <RHS, condtmp'>
93 *p = condtmp;
95 This transformation can only be done under several constraints,
96 documented below. It also replaces:
98 bb0:
99 if (cond) goto bb2; else goto bb1;
100 bb1:
101 *p = RHS1;
102 goto bb3;
103 bb2:
104 *p = RHS2;
105 bb3:
107 with
109 bb0:
110 if (cond) goto bb3; else goto bb1;
111 bb1:
112 bb3:
113 condtmp = PHI <RHS1, RHS2>
114 *p = condtmp; */
116 static unsigned int
117 tree_ssa_cs_elim (void)
119 unsigned todo;
120 /* ??? We are not interested in loop related info, but the following
121 will create it, ICEing as we didn't init loops with pre-headers.
122 An interfacing issue of find_data_references_in_bb. */
123 loop_optimizer_init (LOOPS_NORMAL);
124 scev_initialize ();
125 todo = tree_ssa_phiopt_worker (true, false, false);
126 scev_finalize ();
127 loop_optimizer_finalize ();
128 return todo;
131 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
133 static gphi *
134 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
136 gimple_stmt_iterator i;
137 gphi *phi = NULL;
138 if (gimple_seq_singleton_p (seq))
139 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
140 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
142 gphi *p = as_a <gphi *> (gsi_stmt (i));
143 /* If the PHI arguments are equal then we can skip this PHI. */
144 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
145 gimple_phi_arg_def (p, e1->dest_idx)))
146 continue;
148 /* If we already have a PHI that has the two edge arguments are
149 different, then return it is not a singleton for these PHIs. */
150 if (phi)
151 return NULL;
153 phi = p;
155 return phi;
158 /* The core routine of conditional store replacement and normal
159 phi optimizations. Both share much of the infrastructure in how
160 to match applicable basic block patterns. DO_STORE_ELIM is true
161 when we want to do conditional store replacement, false otherwise.
162 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
163 of diamond control flow patterns, false otherwise. */
164 static unsigned int
165 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
167 basic_block bb;
168 basic_block *bb_order;
169 unsigned n, i;
170 bool cfgchanged = false;
171 hash_set<tree> *nontrap = 0;
173 if (do_store_elim)
174 /* Calculate the set of non-trapping memory accesses. */
175 nontrap = get_non_trapping ();
177 /* Search every basic block for COND_EXPR we may be able to optimize.
179 We walk the blocks in order that guarantees that a block with
180 a single predecessor is processed before the predecessor.
181 This ensures that we collapse inner ifs before visiting the
182 outer ones, and also that we do not try to visit a removed
183 block. */
184 bb_order = single_pred_before_succ_order ();
185 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
187 for (i = 0; i < n; i++)
189 gimple *cond_stmt;
190 gphi *phi;
191 basic_block bb1, bb2;
192 edge e1, e2;
193 tree arg0, arg1;
195 bb = bb_order[i];
197 cond_stmt = last_stmt (bb);
198 /* Check to see if the last statement is a GIMPLE_COND. */
199 if (!cond_stmt
200 || gimple_code (cond_stmt) != GIMPLE_COND)
201 continue;
203 e1 = EDGE_SUCC (bb, 0);
204 bb1 = e1->dest;
205 e2 = EDGE_SUCC (bb, 1);
206 bb2 = e2->dest;
208 /* We cannot do the optimization on abnormal edges. */
209 if ((e1->flags & EDGE_ABNORMAL) != 0
210 || (e2->flags & EDGE_ABNORMAL) != 0)
211 continue;
213 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
214 if (EDGE_COUNT (bb1->succs) == 0
215 || bb2 == NULL
216 || EDGE_COUNT (bb2->succs) == 0)
217 continue;
219 /* Find the bb which is the fall through to the other. */
220 if (EDGE_SUCC (bb1, 0)->dest == bb2)
222 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
224 std::swap (bb1, bb2);
225 std::swap (e1, e2);
227 else if (do_store_elim
228 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
230 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
232 if (!single_succ_p (bb1)
233 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
234 || !single_succ_p (bb2)
235 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
236 || EDGE_COUNT (bb3->preds) != 2)
237 continue;
238 if (cond_if_else_store_replacement (bb1, bb2, bb3))
239 cfgchanged = true;
240 continue;
242 else if (do_hoist_loads
243 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
245 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
247 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
248 && single_succ_p (bb1)
249 && single_succ_p (bb2)
250 && single_pred_p (bb1)
251 && single_pred_p (bb2)
252 && EDGE_COUNT (bb->succs) == 2
253 && EDGE_COUNT (bb3->preds) == 2
254 /* If one edge or the other is dominant, a conditional move
255 is likely to perform worse than the well-predicted branch. */
256 && !predictable_edge_p (EDGE_SUCC (bb, 0))
257 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
258 hoist_adjacent_loads (bb, bb1, bb2, bb3);
259 continue;
261 else
262 continue;
264 e1 = EDGE_SUCC (bb1, 0);
266 /* Make sure that bb1 is just a fall through. */
267 if (!single_succ_p (bb1)
268 || (e1->flags & EDGE_FALLTHRU) == 0)
269 continue;
271 /* Also make sure that bb1 only have one predecessor and that it
272 is bb. */
273 if (!single_pred_p (bb1)
274 || single_pred (bb1) != bb)
275 continue;
277 if (do_store_elim)
279 /* bb1 is the middle block, bb2 the join block, bb the split block,
280 e1 the fallthrough edge from bb1 to bb2. We can't do the
281 optimization if the join block has more than two predecessors. */
282 if (EDGE_COUNT (bb2->preds) > 2)
283 continue;
284 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
285 cfgchanged = true;
287 else
289 gimple_seq phis = phi_nodes (bb2);
290 gimple_stmt_iterator gsi;
291 bool candorest = true;
293 /* Value replacement can work with more than one PHI
294 so try that first. */
295 if (!early_p)
296 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
298 phi = as_a <gphi *> (gsi_stmt (gsi));
299 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
300 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
301 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
303 candorest = false;
304 cfgchanged = true;
305 break;
309 if (!candorest)
310 continue;
312 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
313 if (!phi)
314 continue;
316 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
317 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
319 /* Something is wrong if we cannot find the arguments in the PHI
320 node. */
321 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
323 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
324 arg0, arg1,
325 cond_stmt);
326 if (newphi != NULL)
328 phi = newphi;
329 /* factor_out_conditional_conversion may create a new PHI in
330 BB2 and eliminate an existing PHI in BB2. Recompute values
331 that may be affected by that change. */
332 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
333 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
334 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
337 /* Do the replacement of conditional if it can be done. */
338 if (two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
339 cfgchanged = true;
340 else if (!early_p
341 && conditional_replacement (bb, bb1, e1, e2, phi,
342 arg0, arg1))
343 cfgchanged = true;
344 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
345 cfgchanged = true;
346 else if (!early_p
347 && cond_removal_in_popcount_pattern (bb, bb1, e1, e2,
348 phi, arg0, arg1))
349 cfgchanged = true;
350 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
351 cfgchanged = true;
355 free (bb_order);
357 if (do_store_elim)
358 delete nontrap;
359 /* If the CFG has changed, we should cleanup the CFG. */
360 if (cfgchanged && do_store_elim)
362 /* In cond-store replacement we have added some loads on edges
363 and new VOPS (as we moved the store, and created a load). */
364 gsi_commit_edge_inserts ();
365 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
367 else if (cfgchanged)
368 return TODO_cleanup_cfg;
369 return 0;
372 /* Replace PHI node element whose edge is E in block BB with variable NEW.
373 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
374 is known to have two edges, one of which must reach BB). */
376 static void
377 replace_phi_edge_with_variable (basic_block cond_block,
378 edge e, gimple *phi, tree new_tree)
380 basic_block bb = gimple_bb (phi);
381 basic_block block_to_remove;
382 gimple_stmt_iterator gsi;
384 /* Change the PHI argument to new. */
385 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
387 /* Remove the empty basic block. */
388 if (EDGE_SUCC (cond_block, 0)->dest == bb)
390 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
391 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
392 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
394 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
396 else
398 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
399 EDGE_SUCC (cond_block, 1)->flags
400 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
401 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
403 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
405 delete_basic_block (block_to_remove);
407 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
408 gsi = gsi_last_bb (cond_block);
409 gsi_remove (&gsi, true);
411 if (dump_file && (dump_flags & TDF_DETAILS))
412 fprintf (dump_file,
413 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
414 cond_block->index,
415 bb->index);
418 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
419 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
420 to the result of PHI stmt. COND_STMT is the controlling predicate.
421 Return the newly-created PHI, if any. */
423 static gphi *
424 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
425 tree arg0, tree arg1, gimple *cond_stmt)
427 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
428 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
429 tree temp, result;
430 gphi *newphi;
431 gimple_stmt_iterator gsi, gsi_for_def;
432 location_t locus = gimple_location (phi);
433 enum tree_code convert_code;
435 /* Handle only PHI statements with two arguments. TODO: If all
436 other arguments to PHI are INTEGER_CST or if their defining
437 statement have the same unary operation, we can handle more
438 than two arguments too. */
439 if (gimple_phi_num_args (phi) != 2)
440 return NULL;
442 /* First canonicalize to simplify tests. */
443 if (TREE_CODE (arg0) != SSA_NAME)
445 std::swap (arg0, arg1);
446 std::swap (e0, e1);
449 if (TREE_CODE (arg0) != SSA_NAME
450 || (TREE_CODE (arg1) != SSA_NAME
451 && TREE_CODE (arg1) != INTEGER_CST))
452 return NULL;
454 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
455 a conversion. */
456 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
457 if (!gimple_assign_cast_p (arg0_def_stmt))
458 return NULL;
460 /* Use the RHS as new_arg0. */
461 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
462 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
463 if (convert_code == VIEW_CONVERT_EXPR)
465 new_arg0 = TREE_OPERAND (new_arg0, 0);
466 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
467 return NULL;
470 if (TREE_CODE (arg1) == SSA_NAME)
472 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
473 is a conversion. */
474 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
475 if (!is_gimple_assign (arg1_def_stmt)
476 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
477 return NULL;
479 /* Use the RHS as new_arg1. */
480 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
481 if (convert_code == VIEW_CONVERT_EXPR)
482 new_arg1 = TREE_OPERAND (new_arg1, 0);
484 else
486 /* If arg1 is an INTEGER_CST, fold it to new type. */
487 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
488 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
490 if (gimple_assign_cast_p (arg0_def_stmt))
492 /* For the INTEGER_CST case, we are just moving the
493 conversion from one place to another, which can often
494 hurt as the conversion moves further away from the
495 statement that computes the value. So, perform this
496 only if new_arg0 is an operand of COND_STMT, or
497 if arg0_def_stmt is the only non-debug stmt in
498 its basic block, because then it is possible this
499 could enable further optimizations (minmax replacement
500 etc.). See PR71016. */
501 if (new_arg0 != gimple_cond_lhs (cond_stmt)
502 && new_arg0 != gimple_cond_rhs (cond_stmt)
503 && gimple_bb (arg0_def_stmt) == e0->src)
505 gsi = gsi_for_stmt (arg0_def_stmt);
506 gsi_prev_nondebug (&gsi);
507 if (!gsi_end_p (gsi))
509 if (gassign *assign
510 = dyn_cast <gassign *> (gsi_stmt (gsi)))
512 tree lhs = gimple_assign_lhs (assign);
513 enum tree_code ass_code
514 = gimple_assign_rhs_code (assign);
515 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
516 return NULL;
517 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
518 return NULL;
519 gsi_prev_nondebug (&gsi);
520 if (!gsi_end_p (gsi))
521 return NULL;
523 else
524 return NULL;
526 gsi = gsi_for_stmt (arg0_def_stmt);
527 gsi_next_nondebug (&gsi);
528 if (!gsi_end_p (gsi))
529 return NULL;
531 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
533 else
534 return NULL;
536 else
537 return NULL;
540 /* If arg0/arg1 have > 1 use, then this transformation actually increases
541 the number of expressions evaluated at runtime. */
542 if (!has_single_use (arg0)
543 || (arg1_def_stmt && !has_single_use (arg1)))
544 return NULL;
546 /* If types of new_arg0 and new_arg1 are different bailout. */
547 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
548 return NULL;
550 /* Create a new PHI stmt. */
551 result = PHI_RESULT (phi);
552 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
553 newphi = create_phi_node (temp, gimple_bb (phi));
555 if (dump_file && (dump_flags & TDF_DETAILS))
557 fprintf (dump_file, "PHI ");
558 print_generic_expr (dump_file, gimple_phi_result (phi));
559 fprintf (dump_file,
560 " changed to factor conversion out from COND_EXPR.\n");
561 fprintf (dump_file, "New stmt with CAST that defines ");
562 print_generic_expr (dump_file, result);
563 fprintf (dump_file, ".\n");
566 /* Remove the old cast(s) that has single use. */
567 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
568 gsi_remove (&gsi_for_def, true);
569 release_defs (arg0_def_stmt);
571 if (arg1_def_stmt)
573 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
574 gsi_remove (&gsi_for_def, true);
575 release_defs (arg1_def_stmt);
578 add_phi_arg (newphi, new_arg0, e0, locus);
579 add_phi_arg (newphi, new_arg1, e1, locus);
581 /* Create the conversion stmt and insert it. */
582 if (convert_code == VIEW_CONVERT_EXPR)
584 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
585 new_stmt = gimple_build_assign (result, temp);
587 else
588 new_stmt = gimple_build_assign (result, convert_code, temp);
589 gsi = gsi_after_labels (gimple_bb (phi));
590 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
592 /* Remove the original PHI stmt. */
593 gsi = gsi_for_stmt (phi);
594 gsi_remove (&gsi, true);
595 return newphi;
598 /* Optimize
599 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
600 if (x_5 op cstN) # where op is == or != and N is 1 or 2
601 goto bb3;
602 else
603 goto bb4;
604 bb3:
605 bb4:
606 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
608 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
609 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
610 of cst3 and cst4 is smaller. */
612 static bool
613 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
614 edge e1, gphi *phi, tree arg0, tree arg1)
616 /* Only look for adjacent integer constants. */
617 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
618 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
619 || TREE_CODE (arg0) != INTEGER_CST
620 || TREE_CODE (arg1) != INTEGER_CST
621 || (tree_int_cst_lt (arg0, arg1)
622 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
623 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
624 return false;
626 if (!empty_block_p (middle_bb))
627 return false;
629 gimple *stmt = last_stmt (cond_bb);
630 tree lhs = gimple_cond_lhs (stmt);
631 tree rhs = gimple_cond_rhs (stmt);
633 if (TREE_CODE (lhs) != SSA_NAME
634 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
635 || TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
636 || TREE_CODE (rhs) != INTEGER_CST)
637 return false;
639 switch (gimple_cond_code (stmt))
641 case EQ_EXPR:
642 case NE_EXPR:
643 break;
644 default:
645 return false;
648 wide_int min, max;
649 if (get_range_info (lhs, &min, &max) != VR_RANGE
650 || min + 1 != max
651 || (wi::to_wide (rhs) != min
652 && wi::to_wide (rhs) != max))
653 return false;
655 /* We need to know which is the true edge and which is the false
656 edge so that we know when to invert the condition below. */
657 edge true_edge, false_edge;
658 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
659 if ((gimple_cond_code (stmt) == EQ_EXPR)
660 ^ (wi::to_wide (rhs) == max)
661 ^ (e1 == false_edge))
662 std::swap (arg0, arg1);
664 tree type;
665 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
667 /* Avoid performing the arithmetics in bool type which has different
668 semantics, otherwise prefer unsigned types from the two with
669 the same precision. */
670 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
671 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
672 type = TREE_TYPE (lhs);
673 else
674 type = TREE_TYPE (arg0);
676 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
677 type = TREE_TYPE (lhs);
678 else
679 type = TREE_TYPE (arg0);
681 min = wide_int::from (min, TYPE_PRECISION (type),
682 TYPE_SIGN (TREE_TYPE (lhs)));
683 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
684 TYPE_SIGN (TREE_TYPE (arg0)));
685 enum tree_code code;
686 wi::overflow_type ovf;
687 if (tree_int_cst_lt (arg0, arg1))
689 code = PLUS_EXPR;
690 a -= min;
691 if (!TYPE_UNSIGNED (type))
693 /* lhs is known to be in range [min, min+1] and we want to add a
694 to it. Check if that operation can overflow for those 2 values
695 and if yes, force unsigned type. */
696 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
697 if (ovf)
698 type = unsigned_type_for (type);
701 else
703 code = MINUS_EXPR;
704 a += min;
705 if (!TYPE_UNSIGNED (type))
707 /* lhs is known to be in range [min, min+1] and we want to subtract
708 it from a. Check if that operation can overflow for those 2
709 values and if yes, force unsigned type. */
710 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
711 if (ovf)
712 type = unsigned_type_for (type);
716 tree arg = wide_int_to_tree (type, a);
717 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
718 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
719 lhs = gimplify_build1 (&gsi, NOP_EXPR, type, lhs);
720 tree new_rhs;
721 if (code == PLUS_EXPR)
722 new_rhs = gimplify_build2 (&gsi, PLUS_EXPR, type, lhs, arg);
723 else
724 new_rhs = gimplify_build2 (&gsi, MINUS_EXPR, type, arg, lhs);
725 if (!useless_type_conversion_p (TREE_TYPE (arg0), type))
726 new_rhs = gimplify_build1 (&gsi, NOP_EXPR, TREE_TYPE (arg0), new_rhs);
728 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
730 /* Note that we optimized this PHI. */
731 return true;
734 /* The function conditional_replacement does the main work of doing the
735 conditional replacement. Return true if the replacement is done.
736 Otherwise return false.
737 BB is the basic block where the replacement is going to be done on. ARG0
738 is argument 0 from PHI. Likewise for ARG1. */
740 static bool
741 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
742 edge e0, edge e1, gphi *phi,
743 tree arg0, tree arg1)
745 tree result;
746 gimple *stmt;
747 gassign *new_stmt;
748 tree cond;
749 gimple_stmt_iterator gsi;
750 edge true_edge, false_edge;
751 tree new_var, new_var2;
752 bool neg;
754 /* FIXME: Gimplification of complex type is too hard for now. */
755 /* We aren't prepared to handle vectors either (and it is a question
756 if it would be worthwhile anyway). */
757 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
758 || POINTER_TYPE_P (TREE_TYPE (arg0)))
759 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
760 || POINTER_TYPE_P (TREE_TYPE (arg1))))
761 return false;
763 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
764 convert it to the conditional. */
765 if ((integer_zerop (arg0) && integer_onep (arg1))
766 || (integer_zerop (arg1) && integer_onep (arg0)))
767 neg = false;
768 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
769 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
770 neg = true;
771 else
772 return false;
774 if (!empty_block_p (middle_bb))
775 return false;
777 /* At this point we know we have a GIMPLE_COND with two successors.
778 One successor is BB, the other successor is an empty block which
779 falls through into BB.
781 There is a single PHI node at the join point (BB) and its arguments
782 are constants (0, 1) or (0, -1).
784 So, given the condition COND, and the two PHI arguments, we can
785 rewrite this PHI into non-branching code:
787 dest = (COND) or dest = COND'
789 We use the condition as-is if the argument associated with the
790 true edge has the value one or the argument associated with the
791 false edge as the value zero. Note that those conditions are not
792 the same since only one of the outgoing edges from the GIMPLE_COND
793 will directly reach BB and thus be associated with an argument. */
795 stmt = last_stmt (cond_bb);
796 result = PHI_RESULT (phi);
798 /* To handle special cases like floating point comparison, it is easier and
799 less error-prone to build a tree and gimplify it on the fly though it is
800 less efficient. */
801 cond = fold_build2_loc (gimple_location (stmt),
802 gimple_cond_code (stmt), boolean_type_node,
803 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
805 /* We need to know which is the true edge and which is the false
806 edge so that we know when to invert the condition below. */
807 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
808 if ((e0 == true_edge && integer_zerop (arg0))
809 || (e0 == false_edge && !integer_zerop (arg0))
810 || (e1 == true_edge && integer_zerop (arg1))
811 || (e1 == false_edge && !integer_zerop (arg1)))
812 cond = fold_build1_loc (gimple_location (stmt),
813 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
815 if (neg)
817 cond = fold_convert_loc (gimple_location (stmt),
818 TREE_TYPE (result), cond);
819 cond = fold_build1_loc (gimple_location (stmt),
820 NEGATE_EXPR, TREE_TYPE (cond), cond);
823 /* Insert our new statements at the end of conditional block before the
824 COND_STMT. */
825 gsi = gsi_for_stmt (stmt);
826 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
827 GSI_SAME_STMT);
829 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
831 location_t locus_0, locus_1;
833 new_var2 = make_ssa_name (TREE_TYPE (result));
834 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
835 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
836 new_var = new_var2;
838 /* Set the locus to the first argument, unless is doesn't have one. */
839 locus_0 = gimple_phi_arg_location (phi, 0);
840 locus_1 = gimple_phi_arg_location (phi, 1);
841 if (locus_0 == UNKNOWN_LOCATION)
842 locus_0 = locus_1;
843 gimple_set_location (new_stmt, locus_0);
846 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
848 /* Note that we optimized this PHI. */
849 return true;
852 /* Update *ARG which is defined in STMT so that it contains the
853 computed value if that seems profitable. Return true if the
854 statement is made dead by that rewriting. */
856 static bool
857 jump_function_from_stmt (tree *arg, gimple *stmt)
859 enum tree_code code = gimple_assign_rhs_code (stmt);
860 if (code == ADDR_EXPR)
862 /* For arg = &p->i transform it to p, if possible. */
863 tree rhs1 = gimple_assign_rhs1 (stmt);
864 poly_int64 offset;
865 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
866 &offset);
867 if (tem
868 && TREE_CODE (tem) == MEM_REF
869 && known_eq (mem_ref_offset (tem) + offset, 0))
871 *arg = TREE_OPERAND (tem, 0);
872 return true;
875 /* TODO: Much like IPA-CP jump-functions we want to handle constant
876 additions symbolically here, and we'd need to update the comparison
877 code that compares the arg + cst tuples in our caller. For now the
878 code above exactly handles the VEC_BASE pattern from vec.h. */
879 return false;
882 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
883 of the form SSA_NAME NE 0.
885 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
886 the two input values of the EQ_EXPR match arg0 and arg1.
888 If so update *code and return TRUE. Otherwise return FALSE. */
890 static bool
891 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
892 enum tree_code *code, const_tree rhs)
894 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
895 statement. */
896 if (TREE_CODE (rhs) == SSA_NAME)
898 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
900 /* Verify the defining statement has an EQ_EXPR on the RHS. */
901 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
903 /* Finally verify the source operands of the EQ_EXPR are equal
904 to arg0 and arg1. */
905 tree op0 = gimple_assign_rhs1 (def1);
906 tree op1 = gimple_assign_rhs2 (def1);
907 if ((operand_equal_for_phi_arg_p (arg0, op0)
908 && operand_equal_for_phi_arg_p (arg1, op1))
909 || (operand_equal_for_phi_arg_p (arg0, op1)
910 && operand_equal_for_phi_arg_p (arg1, op0)))
912 /* We will perform the optimization. */
913 *code = gimple_assign_rhs_code (def1);
914 return true;
918 return false;
921 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
923 Also return TRUE if arg0/arg1 are equal to the source arguments of a
924 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
926 Return FALSE otherwise. */
928 static bool
929 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
930 enum tree_code *code, gimple *cond)
932 gimple *def;
933 tree lhs = gimple_cond_lhs (cond);
934 tree rhs = gimple_cond_rhs (cond);
936 if ((operand_equal_for_phi_arg_p (arg0, lhs)
937 && operand_equal_for_phi_arg_p (arg1, rhs))
938 || (operand_equal_for_phi_arg_p (arg1, lhs)
939 && operand_equal_for_phi_arg_p (arg0, rhs)))
940 return true;
942 /* Now handle more complex case where we have an EQ comparison
943 which feeds a BIT_AND_EXPR which feeds COND.
945 First verify that COND is of the form SSA_NAME NE 0. */
946 if (*code != NE_EXPR || !integer_zerop (rhs)
947 || TREE_CODE (lhs) != SSA_NAME)
948 return false;
950 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
951 def = SSA_NAME_DEF_STMT (lhs);
952 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
953 return false;
955 /* Now verify arg0/arg1 correspond to the source arguments of an
956 EQ comparison feeding the BIT_AND_EXPR. */
958 tree tmp = gimple_assign_rhs1 (def);
959 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
960 return true;
962 tmp = gimple_assign_rhs2 (def);
963 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
964 return true;
966 return false;
969 /* Returns true if ARG is a neutral element for operation CODE
970 on the RIGHT side. */
972 static bool
973 neutral_element_p (tree_code code, tree arg, bool right)
975 switch (code)
977 case PLUS_EXPR:
978 case BIT_IOR_EXPR:
979 case BIT_XOR_EXPR:
980 return integer_zerop (arg);
982 case LROTATE_EXPR:
983 case RROTATE_EXPR:
984 case LSHIFT_EXPR:
985 case RSHIFT_EXPR:
986 case MINUS_EXPR:
987 case POINTER_PLUS_EXPR:
988 return right && integer_zerop (arg);
990 case MULT_EXPR:
991 return integer_onep (arg);
993 case TRUNC_DIV_EXPR:
994 case CEIL_DIV_EXPR:
995 case FLOOR_DIV_EXPR:
996 case ROUND_DIV_EXPR:
997 case EXACT_DIV_EXPR:
998 return right && integer_onep (arg);
1000 case BIT_AND_EXPR:
1001 return integer_all_onesp (arg);
1003 default:
1004 return false;
1008 /* Returns true if ARG is an absorbing element for operation CODE. */
1010 static bool
1011 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1013 switch (code)
1015 case BIT_IOR_EXPR:
1016 return integer_all_onesp (arg);
1018 case MULT_EXPR:
1019 case BIT_AND_EXPR:
1020 return integer_zerop (arg);
1022 case LSHIFT_EXPR:
1023 case RSHIFT_EXPR:
1024 case LROTATE_EXPR:
1025 case RROTATE_EXPR:
1026 return !right && integer_zerop (arg);
1028 case TRUNC_DIV_EXPR:
1029 case CEIL_DIV_EXPR:
1030 case FLOOR_DIV_EXPR:
1031 case ROUND_DIV_EXPR:
1032 case EXACT_DIV_EXPR:
1033 case TRUNC_MOD_EXPR:
1034 case CEIL_MOD_EXPR:
1035 case FLOOR_MOD_EXPR:
1036 case ROUND_MOD_EXPR:
1037 return (!right
1038 && integer_zerop (arg)
1039 && tree_single_nonzero_warnv_p (rval, NULL));
1041 default:
1042 return false;
1046 /* The function value_replacement does the main work of doing the value
1047 replacement. Return non-zero if the replacement is done. Otherwise return
1048 0. If we remove the middle basic block, return 2.
1049 BB is the basic block where the replacement is going to be done on. ARG0
1050 is argument 0 from the PHI. Likewise for ARG1. */
1052 static int
1053 value_replacement (basic_block cond_bb, basic_block middle_bb,
1054 edge e0, edge e1, gimple *phi,
1055 tree arg0, tree arg1)
1057 gimple_stmt_iterator gsi;
1058 gimple *cond;
1059 edge true_edge, false_edge;
1060 enum tree_code code;
1061 bool empty_or_with_defined_p = true;
1063 /* If the type says honor signed zeros we cannot do this
1064 optimization. */
1065 if (HONOR_SIGNED_ZEROS (arg1))
1066 return 0;
1068 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1069 arguments, then adjust arg0 or arg1. */
1070 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1071 while (!gsi_end_p (gsi))
1073 gimple *stmt = gsi_stmt (gsi);
1074 tree lhs;
1075 gsi_next_nondebug (&gsi);
1076 if (!is_gimple_assign (stmt))
1078 if (gimple_code (stmt) != GIMPLE_PREDICT
1079 && gimple_code (stmt) != GIMPLE_NOP)
1080 empty_or_with_defined_p = false;
1081 continue;
1083 /* Now try to adjust arg0 or arg1 according to the computation
1084 in the statement. */
1085 lhs = gimple_assign_lhs (stmt);
1086 if (!(lhs == arg0
1087 && jump_function_from_stmt (&arg0, stmt))
1088 || (lhs == arg1
1089 && jump_function_from_stmt (&arg1, stmt)))
1090 empty_or_with_defined_p = false;
1093 cond = last_stmt (cond_bb);
1094 code = gimple_cond_code (cond);
1096 /* This transformation is only valid for equality comparisons. */
1097 if (code != NE_EXPR && code != EQ_EXPR)
1098 return 0;
1100 /* We need to know which is the true edge and which is the false
1101 edge so that we know if have abs or negative abs. */
1102 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1104 /* At this point we know we have a COND_EXPR with two successors.
1105 One successor is BB, the other successor is an empty block which
1106 falls through into BB.
1108 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1110 There is a single PHI node at the join point (BB) with two arguments.
1112 We now need to verify that the two arguments in the PHI node match
1113 the two arguments to the equality comparison. */
1115 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1117 edge e;
1118 tree arg;
1120 /* For NE_EXPR, we want to build an assignment result = arg where
1121 arg is the PHI argument associated with the true edge. For
1122 EQ_EXPR we want the PHI argument associated with the false edge. */
1123 e = (code == NE_EXPR ? true_edge : false_edge);
1125 /* Unfortunately, E may not reach BB (it may instead have gone to
1126 OTHER_BLOCK). If that is the case, then we want the single outgoing
1127 edge from OTHER_BLOCK which reaches BB and represents the desired
1128 path from COND_BLOCK. */
1129 if (e->dest == middle_bb)
1130 e = single_succ_edge (e->dest);
1132 /* Now we know the incoming edge to BB that has the argument for the
1133 RHS of our new assignment statement. */
1134 if (e0 == e)
1135 arg = arg0;
1136 else
1137 arg = arg1;
1139 /* If the middle basic block was empty or is defining the
1140 PHI arguments and this is a single phi where the args are different
1141 for the edges e0 and e1 then we can remove the middle basic block. */
1142 if (empty_or_with_defined_p
1143 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1144 e0, e1) == phi)
1146 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1147 /* Note that we optimized this PHI. */
1148 return 2;
1150 else
1152 /* Replace the PHI arguments with arg. */
1153 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1154 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1155 if (dump_file && (dump_flags & TDF_DETAILS))
1157 fprintf (dump_file, "PHI ");
1158 print_generic_expr (dump_file, gimple_phi_result (phi));
1159 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1160 cond_bb->index);
1161 print_generic_expr (dump_file, arg);
1162 fprintf (dump_file, ".\n");
1164 return 1;
1169 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1170 gsi = gsi_last_nondebug_bb (middle_bb);
1171 if (gsi_end_p (gsi))
1172 return 0;
1174 gimple *assign = gsi_stmt (gsi);
1175 if (!is_gimple_assign (assign)
1176 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1177 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1178 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1179 return 0;
1181 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1182 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1183 return 0;
1185 /* Allow up to 2 cheap preparation statements that prepare argument
1186 for assign, e.g.:
1187 if (y_4 != 0)
1188 goto <bb 3>;
1189 else
1190 goto <bb 4>;
1191 <bb 3>:
1192 _1 = (int) y_4;
1193 iftmp.0_6 = x_5(D) r<< _1;
1194 <bb 4>:
1195 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1197 if (y_3(D) == 0)
1198 goto <bb 4>;
1199 else
1200 goto <bb 3>;
1201 <bb 3>:
1202 y_4 = y_3(D) & 31;
1203 _1 = (int) y_4;
1204 _6 = x_5(D) r<< _1;
1205 <bb 4>:
1206 # _2 = PHI <x_5(D)(2), _6(3)> */
1207 gimple *prep_stmt[2] = { NULL, NULL };
1208 int prep_cnt;
1209 for (prep_cnt = 0; ; prep_cnt++)
1211 gsi_prev_nondebug (&gsi);
1212 if (gsi_end_p (gsi))
1213 break;
1215 gimple *g = gsi_stmt (gsi);
1216 if (gimple_code (g) == GIMPLE_LABEL)
1217 break;
1219 if (prep_cnt == 2 || !is_gimple_assign (g))
1220 return 0;
1222 tree lhs = gimple_assign_lhs (g);
1223 tree rhs1 = gimple_assign_rhs1 (g);
1224 use_operand_p use_p;
1225 gimple *use_stmt;
1226 if (TREE_CODE (lhs) != SSA_NAME
1227 || TREE_CODE (rhs1) != SSA_NAME
1228 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1229 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1230 || !single_imm_use (lhs, &use_p, &use_stmt)
1231 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1232 return 0;
1233 switch (gimple_assign_rhs_code (g))
1235 CASE_CONVERT:
1236 break;
1237 case PLUS_EXPR:
1238 case BIT_AND_EXPR:
1239 case BIT_IOR_EXPR:
1240 case BIT_XOR_EXPR:
1241 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1242 return 0;
1243 break;
1244 default:
1245 return 0;
1247 prep_stmt[prep_cnt] = g;
1250 /* Only transform if it removes the condition. */
1251 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1252 return 0;
1254 /* Size-wise, this is always profitable. */
1255 if (optimize_bb_for_speed_p (cond_bb)
1256 /* The special case is useless if it has a low probability. */
1257 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1258 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1259 /* If assign is cheap, there is no point avoiding it. */
1260 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1261 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1262 return 0;
1264 tree lhs = gimple_assign_lhs (assign);
1265 tree rhs1 = gimple_assign_rhs1 (assign);
1266 tree rhs2 = gimple_assign_rhs2 (assign);
1267 enum tree_code code_def = gimple_assign_rhs_code (assign);
1268 tree cond_lhs = gimple_cond_lhs (cond);
1269 tree cond_rhs = gimple_cond_rhs (cond);
1271 /* Propagate the cond_rhs constant through preparation stmts,
1272 make sure UB isn't invoked while doing that. */
1273 for (int i = prep_cnt - 1; i >= 0; --i)
1275 gimple *g = prep_stmt[i];
1276 tree grhs1 = gimple_assign_rhs1 (g);
1277 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1278 return 0;
1279 cond_lhs = gimple_assign_lhs (g);
1280 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1281 if (TREE_CODE (cond_rhs) != INTEGER_CST
1282 || TREE_OVERFLOW (cond_rhs))
1283 return 0;
1284 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1286 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1287 gimple_assign_rhs2 (g));
1288 if (TREE_OVERFLOW (cond_rhs))
1289 return 0;
1291 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1292 if (TREE_CODE (cond_rhs) != INTEGER_CST
1293 || TREE_OVERFLOW (cond_rhs))
1294 return 0;
1297 if (((code == NE_EXPR && e1 == false_edge)
1298 || (code == EQ_EXPR && e1 == true_edge))
1299 && arg0 == lhs
1300 && ((arg1 == rhs1
1301 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1302 && neutral_element_p (code_def, cond_rhs, true))
1303 || (arg1 == rhs2
1304 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1305 && neutral_element_p (code_def, cond_rhs, false))
1306 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1307 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1308 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1309 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1310 && absorbing_element_p (code_def,
1311 cond_rhs, false, rhs2))))))
1313 gsi = gsi_for_stmt (cond);
1314 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1315 def-stmt in:
1316 if (n_5 != 0)
1317 goto <bb 3>;
1318 else
1319 goto <bb 4>;
1321 <bb 3>:
1322 # RANGE [0, 4294967294]
1323 u_6 = n_5 + 4294967295;
1325 <bb 4>:
1326 # u_3 = PHI <u_6(3), 4294967295(2)> */
1327 reset_flow_sensitive_info (lhs);
1328 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1330 /* If available, we can use VR of phi result at least. */
1331 tree phires = gimple_phi_result (phi);
1332 struct range_info_def *phires_range_info
1333 = SSA_NAME_RANGE_INFO (phires);
1334 if (phires_range_info)
1335 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1336 phires_range_info);
1338 gimple_stmt_iterator gsi_from;
1339 for (int i = prep_cnt - 1; i >= 0; --i)
1341 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1342 reset_flow_sensitive_info (plhs);
1343 gsi_from = gsi_for_stmt (prep_stmt[i]);
1344 gsi_move_before (&gsi_from, &gsi);
1346 gsi_from = gsi_for_stmt (assign);
1347 gsi_move_before (&gsi_from, &gsi);
1348 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1349 return 2;
1352 return 0;
1355 /* The function minmax_replacement does the main work of doing the minmax
1356 replacement. Return true if the replacement is done. Otherwise return
1357 false.
1358 BB is the basic block where the replacement is going to be done on. ARG0
1359 is argument 0 from the PHI. Likewise for ARG1. */
1361 static bool
1362 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1363 edge e0, edge e1, gimple *phi,
1364 tree arg0, tree arg1)
1366 tree result;
1367 edge true_edge, false_edge;
1368 enum tree_code minmax, ass_code;
1369 tree smaller, larger, arg_true, arg_false;
1370 gimple_stmt_iterator gsi, gsi_from;
1372 tree type = TREE_TYPE (PHI_RESULT (phi));
1374 /* The optimization may be unsafe due to NaNs. */
1375 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1376 return false;
1378 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1379 enum tree_code cmp = gimple_cond_code (cond);
1380 tree rhs = gimple_cond_rhs (cond);
1382 /* Turn EQ/NE of extreme values to order comparisons. */
1383 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1384 && TREE_CODE (rhs) == INTEGER_CST
1385 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1387 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1389 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1390 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1391 wi::min_value (TREE_TYPE (rhs)) + 1);
1393 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1395 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1396 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1397 wi::max_value (TREE_TYPE (rhs)) - 1);
1401 /* This transformation is only valid for order comparisons. Record which
1402 operand is smaller/larger if the result of the comparison is true. */
1403 tree alt_smaller = NULL_TREE;
1404 tree alt_larger = NULL_TREE;
1405 if (cmp == LT_EXPR || cmp == LE_EXPR)
1407 smaller = gimple_cond_lhs (cond);
1408 larger = rhs;
1409 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1410 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1411 if (TREE_CODE (larger) == INTEGER_CST
1412 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1414 if (cmp == LT_EXPR)
1416 wi::overflow_type overflow;
1417 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1418 TYPE_SIGN (TREE_TYPE (larger)),
1419 &overflow);
1420 if (! overflow)
1421 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1423 else
1425 wi::overflow_type overflow;
1426 wide_int alt = wi::add (wi::to_wide (larger), 1,
1427 TYPE_SIGN (TREE_TYPE (larger)),
1428 &overflow);
1429 if (! overflow)
1430 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1434 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1436 smaller = rhs;
1437 larger = gimple_cond_lhs (cond);
1438 /* If we have larger > CST it is equivalent to larger >= CST+1.
1439 Likewise larger >= CST is equivalent to larger > CST-1. */
1440 if (TREE_CODE (smaller) == INTEGER_CST
1441 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1443 wi::overflow_type overflow;
1444 if (cmp == GT_EXPR)
1446 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1447 TYPE_SIGN (TREE_TYPE (smaller)),
1448 &overflow);
1449 if (! overflow)
1450 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1452 else
1454 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1455 TYPE_SIGN (TREE_TYPE (smaller)),
1456 &overflow);
1457 if (! overflow)
1458 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1462 else
1463 return false;
1465 /* Handle the special case of (signed_type)x < 0 being equivalent
1466 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1467 to x <= MAX_VAL(signed_type). */
1468 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1469 && INTEGRAL_TYPE_P (type)
1470 && TYPE_UNSIGNED (type)
1471 && integer_zerop (rhs))
1473 tree op = gimple_cond_lhs (cond);
1474 if (TREE_CODE (op) == SSA_NAME
1475 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1476 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1478 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1479 if (gimple_assign_cast_p (def_stmt))
1481 tree op1 = gimple_assign_rhs1 (def_stmt);
1482 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1483 && TYPE_UNSIGNED (TREE_TYPE (op1))
1484 && (TYPE_PRECISION (TREE_TYPE (op))
1485 == TYPE_PRECISION (TREE_TYPE (op1)))
1486 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1488 wide_int w1 = wi::max_value (TREE_TYPE (op));
1489 wide_int w2 = wi::add (w1, 1);
1490 if (cmp == LT_EXPR)
1492 larger = op1;
1493 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1494 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1495 alt_larger = NULL_TREE;
1497 else
1499 smaller = op1;
1500 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1501 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1502 alt_smaller = NULL_TREE;
1509 /* We need to know which is the true edge and which is the false
1510 edge so that we know if have abs or negative abs. */
1511 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1513 /* Forward the edges over the middle basic block. */
1514 if (true_edge->dest == middle_bb)
1515 true_edge = EDGE_SUCC (true_edge->dest, 0);
1516 if (false_edge->dest == middle_bb)
1517 false_edge = EDGE_SUCC (false_edge->dest, 0);
1519 if (true_edge == e0)
1521 gcc_assert (false_edge == e1);
1522 arg_true = arg0;
1523 arg_false = arg1;
1525 else
1527 gcc_assert (false_edge == e0);
1528 gcc_assert (true_edge == e1);
1529 arg_true = arg1;
1530 arg_false = arg0;
1533 if (empty_block_p (middle_bb))
1535 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1536 || (alt_smaller
1537 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1538 && (operand_equal_for_phi_arg_p (arg_false, larger)
1539 || (alt_larger
1540 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1542 /* Case
1544 if (smaller < larger)
1545 rslt = smaller;
1546 else
1547 rslt = larger; */
1548 minmax = MIN_EXPR;
1550 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1551 || (alt_smaller
1552 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1553 && (operand_equal_for_phi_arg_p (arg_true, larger)
1554 || (alt_larger
1555 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1556 minmax = MAX_EXPR;
1557 else
1558 return false;
1560 else
1562 /* Recognize the following case, assuming d <= u:
1564 if (a <= u)
1565 b = MAX (a, d);
1566 x = PHI <b, u>
1568 This is equivalent to
1570 b = MAX (a, d);
1571 x = MIN (b, u); */
1573 gimple *assign = last_and_only_stmt (middle_bb);
1574 tree lhs, op0, op1, bound;
1576 if (!assign
1577 || gimple_code (assign) != GIMPLE_ASSIGN)
1578 return false;
1580 lhs = gimple_assign_lhs (assign);
1581 ass_code = gimple_assign_rhs_code (assign);
1582 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1583 return false;
1584 op0 = gimple_assign_rhs1 (assign);
1585 op1 = gimple_assign_rhs2 (assign);
1587 if (true_edge->src == middle_bb)
1589 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1590 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1591 return false;
1593 if (operand_equal_for_phi_arg_p (arg_false, larger)
1594 || (alt_larger
1595 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1597 /* Case
1599 if (smaller < larger)
1601 r' = MAX_EXPR (smaller, bound)
1603 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1604 if (ass_code != MAX_EXPR)
1605 return false;
1607 minmax = MIN_EXPR;
1608 if (operand_equal_for_phi_arg_p (op0, smaller)
1609 || (alt_smaller
1610 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1611 bound = op1;
1612 else if (operand_equal_for_phi_arg_p (op1, smaller)
1613 || (alt_smaller
1614 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1615 bound = op0;
1616 else
1617 return false;
1619 /* We need BOUND <= LARGER. */
1620 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1621 bound, larger)))
1622 return false;
1624 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1625 || (alt_smaller
1626 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1628 /* Case
1630 if (smaller < larger)
1632 r' = MIN_EXPR (larger, bound)
1634 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1635 if (ass_code != MIN_EXPR)
1636 return false;
1638 minmax = MAX_EXPR;
1639 if (operand_equal_for_phi_arg_p (op0, larger)
1640 || (alt_larger
1641 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1642 bound = op1;
1643 else if (operand_equal_for_phi_arg_p (op1, larger)
1644 || (alt_larger
1645 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1646 bound = op0;
1647 else
1648 return false;
1650 /* We need BOUND >= SMALLER. */
1651 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1652 bound, smaller)))
1653 return false;
1655 else
1656 return false;
1658 else
1660 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1661 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1662 return false;
1664 if (operand_equal_for_phi_arg_p (arg_true, larger)
1665 || (alt_larger
1666 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1668 /* Case
1670 if (smaller > larger)
1672 r' = MIN_EXPR (smaller, bound)
1674 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1675 if (ass_code != MIN_EXPR)
1676 return false;
1678 minmax = MAX_EXPR;
1679 if (operand_equal_for_phi_arg_p (op0, smaller)
1680 || (alt_smaller
1681 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1682 bound = op1;
1683 else if (operand_equal_for_phi_arg_p (op1, smaller)
1684 || (alt_smaller
1685 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1686 bound = op0;
1687 else
1688 return false;
1690 /* We need BOUND >= LARGER. */
1691 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1692 bound, larger)))
1693 return false;
1695 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1696 || (alt_smaller
1697 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1699 /* Case
1701 if (smaller > larger)
1703 r' = MAX_EXPR (larger, bound)
1705 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1706 if (ass_code != MAX_EXPR)
1707 return false;
1709 minmax = MIN_EXPR;
1710 if (operand_equal_for_phi_arg_p (op0, larger))
1711 bound = op1;
1712 else if (operand_equal_for_phi_arg_p (op1, larger))
1713 bound = op0;
1714 else
1715 return false;
1717 /* We need BOUND <= SMALLER. */
1718 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1719 bound, smaller)))
1720 return false;
1722 else
1723 return false;
1726 /* Move the statement from the middle block. */
1727 gsi = gsi_last_bb (cond_bb);
1728 gsi_from = gsi_last_nondebug_bb (middle_bb);
1729 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1730 SSA_OP_DEF));
1731 gsi_move_before (&gsi_from, &gsi);
1734 /* Emit the statement to compute min/max. */
1735 gimple_seq stmts = NULL;
1736 tree phi_result = PHI_RESULT (phi);
1737 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1738 /* Duplicate range info if we're the only things setting the target PHI. */
1739 if (!gimple_seq_empty_p (stmts)
1740 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
1741 && !POINTER_TYPE_P (TREE_TYPE (phi_result))
1742 && SSA_NAME_RANGE_INFO (phi_result))
1743 duplicate_ssa_name_range_info (result, SSA_NAME_RANGE_TYPE (phi_result),
1744 SSA_NAME_RANGE_INFO (phi_result));
1746 gsi = gsi_last_bb (cond_bb);
1747 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1749 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1751 return true;
1754 /* Convert
1756 <bb 2>
1757 if (b_4(D) != 0)
1758 goto <bb 3>
1759 else
1760 goto <bb 4>
1762 <bb 3>
1763 _2 = (unsigned long) b_4(D);
1764 _9 = __builtin_popcountl (_2);
1766 _9 = __builtin_popcountl (b_4(D));
1768 <bb 4>
1769 c_12 = PHI <0(2), _9(3)>
1771 Into
1772 <bb 2>
1773 _2 = (unsigned long) b_4(D);
1774 _9 = __builtin_popcountl (_2);
1776 _9 = __builtin_popcountl (b_4(D));
1778 <bb 4>
1779 c_12 = PHI <_9(2)>
1782 static bool
1783 cond_removal_in_popcount_pattern (basic_block cond_bb, basic_block middle_bb,
1784 edge e1, edge e2,
1785 gimple *phi, tree arg0, tree arg1)
1787 gimple *cond;
1788 gimple_stmt_iterator gsi, gsi_from;
1789 gimple *popcount;
1790 gimple *cast = NULL;
1791 tree lhs, arg;
1793 /* Check that
1794 _2 = (unsigned long) b_4(D);
1795 _9 = __builtin_popcountl (_2);
1797 _9 = __builtin_popcountl (b_4(D));
1798 are the only stmts in the middle_bb. */
1800 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1801 if (gsi_end_p (gsi))
1802 return false;
1803 cast = gsi_stmt (gsi);
1804 gsi_next_nondebug (&gsi);
1805 if (!gsi_end_p (gsi))
1807 popcount = gsi_stmt (gsi);
1808 gsi_next_nondebug (&gsi);
1809 if (!gsi_end_p (gsi))
1810 return false;
1812 else
1814 popcount = cast;
1815 cast = NULL;
1818 /* Check that we have a popcount builtin. */
1819 if (!is_gimple_call (popcount))
1820 return false;
1821 combined_fn cfn = gimple_call_combined_fn (popcount);
1822 switch (cfn)
1824 CASE_CFN_POPCOUNT:
1825 break;
1826 default:
1827 return false;
1830 arg = gimple_call_arg (popcount, 0);
1831 lhs = gimple_get_lhs (popcount);
1833 if (cast)
1835 /* We have a cast stmt feeding popcount builtin. */
1836 /* Check that we have a cast prior to that. */
1837 if (gimple_code (cast) != GIMPLE_ASSIGN
1838 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1839 return false;
1840 /* Result of the cast stmt is the argument to the builtin. */
1841 if (arg != gimple_assign_lhs (cast))
1842 return false;
1843 arg = gimple_assign_rhs1 (cast);
1846 cond = last_stmt (cond_bb);
1848 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount
1849 builtin. */
1850 if (gimple_code (cond) != GIMPLE_COND
1851 || (gimple_cond_code (cond) != NE_EXPR
1852 && gimple_cond_code (cond) != EQ_EXPR)
1853 || !integer_zerop (gimple_cond_rhs (cond))
1854 || arg != gimple_cond_lhs (cond))
1855 return false;
1857 /* Canonicalize. */
1858 if ((e2->flags & EDGE_TRUE_VALUE
1859 && gimple_cond_code (cond) == NE_EXPR)
1860 || (e1->flags & EDGE_TRUE_VALUE
1861 && gimple_cond_code (cond) == EQ_EXPR))
1863 std::swap (arg0, arg1);
1864 std::swap (e1, e2);
1867 /* Check PHI arguments. */
1868 if (lhs != arg0 || !integer_zerop (arg1))
1869 return false;
1871 /* And insert the popcount builtin and cast stmt before the cond_bb. */
1872 gsi = gsi_last_bb (cond_bb);
1873 if (cast)
1875 gsi_from = gsi_for_stmt (cast);
1876 gsi_move_before (&gsi_from, &gsi);
1877 reset_flow_sensitive_info (gimple_get_lhs (cast));
1879 gsi_from = gsi_for_stmt (popcount);
1880 gsi_move_before (&gsi_from, &gsi);
1881 reset_flow_sensitive_info (gimple_get_lhs (popcount));
1883 /* Now update the PHI and remove unneeded bbs. */
1884 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1885 return true;
1888 /* The function absolute_replacement does the main work of doing the absolute
1889 replacement. Return true if the replacement is done. Otherwise return
1890 false.
1891 bb is the basic block where the replacement is going to be done on. arg0
1892 is argument 0 from the phi. Likewise for arg1. */
1894 static bool
1895 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1896 edge e0 ATTRIBUTE_UNUSED, edge e1,
1897 gimple *phi, tree arg0, tree arg1)
1899 tree result;
1900 gassign *new_stmt;
1901 gimple *cond;
1902 gimple_stmt_iterator gsi;
1903 edge true_edge, false_edge;
1904 gimple *assign;
1905 edge e;
1906 tree rhs, lhs;
1907 bool negate;
1908 enum tree_code cond_code;
1910 /* If the type says honor signed zeros we cannot do this
1911 optimization. */
1912 if (HONOR_SIGNED_ZEROS (arg1))
1913 return false;
1915 /* OTHER_BLOCK must have only one executable statement which must have the
1916 form arg0 = -arg1 or arg1 = -arg0. */
1918 assign = last_and_only_stmt (middle_bb);
1919 /* If we did not find the proper negation assignment, then we cannot
1920 optimize. */
1921 if (assign == NULL)
1922 return false;
1924 /* If we got here, then we have found the only executable statement
1925 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1926 arg1 = -arg0, then we cannot optimize. */
1927 if (gimple_code (assign) != GIMPLE_ASSIGN)
1928 return false;
1930 lhs = gimple_assign_lhs (assign);
1932 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1933 return false;
1935 rhs = gimple_assign_rhs1 (assign);
1937 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1938 if (!(lhs == arg0 && rhs == arg1)
1939 && !(lhs == arg1 && rhs == arg0))
1940 return false;
1942 cond = last_stmt (cond_bb);
1943 result = PHI_RESULT (phi);
1945 /* Only relationals comparing arg[01] against zero are interesting. */
1946 cond_code = gimple_cond_code (cond);
1947 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1948 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1949 return false;
1951 /* Make sure the conditional is arg[01] OP y. */
1952 if (gimple_cond_lhs (cond) != rhs)
1953 return false;
1955 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1956 ? real_zerop (gimple_cond_rhs (cond))
1957 : integer_zerop (gimple_cond_rhs (cond)))
1959 else
1960 return false;
1962 /* We need to know which is the true edge and which is the false
1963 edge so that we know if have abs or negative abs. */
1964 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1966 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1967 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1968 the false edge goes to OTHER_BLOCK. */
1969 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1970 e = true_edge;
1971 else
1972 e = false_edge;
1974 if (e->dest == middle_bb)
1975 negate = true;
1976 else
1977 negate = false;
1979 /* If the code negates only iff positive then make sure to not
1980 introduce undefined behavior when negating or computing the absolute.
1981 ??? We could use range info if present to check for arg1 == INT_MIN. */
1982 if (negate
1983 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
1984 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
1985 return false;
1987 result = duplicate_ssa_name (result, NULL);
1989 if (negate)
1990 lhs = make_ssa_name (TREE_TYPE (result));
1991 else
1992 lhs = result;
1994 /* Build the modify expression with abs expression. */
1995 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1997 gsi = gsi_last_bb (cond_bb);
1998 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2000 if (negate)
2002 /* Get the right GSI. We want to insert after the recently
2003 added ABS_EXPR statement (which we know is the first statement
2004 in the block. */
2005 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2007 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2010 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2012 /* Note that we optimized this PHI. */
2013 return true;
2016 /* Auxiliary functions to determine the set of memory accesses which
2017 can't trap because they are preceded by accesses to the same memory
2018 portion. We do that for MEM_REFs, so we only need to track
2019 the SSA_NAME of the pointer indirectly referenced. The algorithm
2020 simply is a walk over all instructions in dominator order. When
2021 we see an MEM_REF we determine if we've already seen a same
2022 ref anywhere up to the root of the dominator tree. If we do the
2023 current access can't trap. If we don't see any dominating access
2024 the current access might trap, but might also make later accesses
2025 non-trapping, so we remember it. We need to be careful with loads
2026 or stores, for instance a load might not trap, while a store would,
2027 so if we see a dominating read access this doesn't mean that a later
2028 write access would not trap. Hence we also need to differentiate the
2029 type of access(es) seen.
2031 ??? We currently are very conservative and assume that a load might
2032 trap even if a store doesn't (write-only memory). This probably is
2033 overly conservative.
2035 We currently support a special case that for !TREE_ADDRESSABLE automatic
2036 variables, it could ignore whether something is a load or store because the
2037 local stack should be always writable. */
2039 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2040 basic block an *_REF through it was seen, which would constitute a
2041 no-trap region for same accesses.
2043 Size is needed to support 2 MEM_REFs of different types, like
2044 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2045 OEP_ADDRESS_OF. */
2046 struct ref_to_bb
2048 tree exp;
2049 HOST_WIDE_INT size;
2050 unsigned int phase;
2051 basic_block bb;
2054 /* Hashtable helpers. */
2056 struct refs_hasher : free_ptr_hash<ref_to_bb>
2058 static inline hashval_t hash (const ref_to_bb *);
2059 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2062 /* Used for quick clearing of the hash-table when we see calls.
2063 Hash entries with phase < nt_call_phase are invalid. */
2064 static unsigned int nt_call_phase;
2066 /* The hash function. */
2068 inline hashval_t
2069 refs_hasher::hash (const ref_to_bb *n)
2071 inchash::hash hstate;
2072 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2073 hstate.add_hwi (n->size);
2074 return hstate.end ();
2077 /* The equality function of *P1 and *P2. */
2079 inline bool
2080 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2082 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2083 && n1->size == n2->size;
2086 class nontrapping_dom_walker : public dom_walker
2088 public:
2089 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2090 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2093 virtual edge before_dom_children (basic_block);
2094 virtual void after_dom_children (basic_block);
2096 private:
2098 /* We see the expression EXP in basic block BB. If it's an interesting
2099 expression (an MEM_REF through an SSA_NAME) possibly insert the
2100 expression into the set NONTRAP or the hash table of seen expressions.
2101 STORE is true if this expression is on the LHS, otherwise it's on
2102 the RHS. */
2103 void add_or_mark_expr (basic_block, tree, bool);
2105 hash_set<tree> *m_nontrapping;
2107 /* The hash table for remembering what we've seen. */
2108 hash_table<refs_hasher> m_seen_refs;
2111 /* Called by walk_dominator_tree, when entering the block BB. */
2112 edge
2113 nontrapping_dom_walker::before_dom_children (basic_block bb)
2115 edge e;
2116 edge_iterator ei;
2117 gimple_stmt_iterator gsi;
2119 /* If we haven't seen all our predecessors, clear the hash-table. */
2120 FOR_EACH_EDGE (e, ei, bb->preds)
2121 if ((((size_t)e->src->aux) & 2) == 0)
2123 nt_call_phase++;
2124 break;
2127 /* Mark this BB as being on the path to dominator root and as visited. */
2128 bb->aux = (void*)(1 | 2);
2130 /* And walk the statements in order. */
2131 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2133 gimple *stmt = gsi_stmt (gsi);
2135 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2136 || (is_gimple_call (stmt)
2137 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2138 nt_call_phase++;
2139 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2141 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2142 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2145 return NULL;
2148 /* Called by walk_dominator_tree, when basic block BB is exited. */
2149 void
2150 nontrapping_dom_walker::after_dom_children (basic_block bb)
2152 /* This BB isn't on the path to dominator root anymore. */
2153 bb->aux = (void*)2;
2156 /* We see the expression EXP in basic block BB. If it's an interesting
2157 expression of:
2158 1) MEM_REF
2159 2) ARRAY_REF
2160 3) COMPONENT_REF
2161 possibly insert the expression into the set NONTRAP or the hash table
2162 of seen expressions. STORE is true if this expression is on the LHS,
2163 otherwise it's on the RHS. */
2164 void
2165 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2167 HOST_WIDE_INT size;
2169 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2170 || TREE_CODE (exp) == COMPONENT_REF)
2171 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2173 struct ref_to_bb map;
2174 ref_to_bb **slot;
2175 struct ref_to_bb *r2bb;
2176 basic_block found_bb = 0;
2178 if (!store)
2180 tree base = get_base_address (exp);
2181 /* Only record a LOAD of a local variable without address-taken, as
2182 the local stack is always writable. This allows cselim on a STORE
2183 with a dominating LOAD. */
2184 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2185 return;
2188 /* Try to find the last seen *_REF, which can trap. */
2189 map.exp = exp;
2190 map.size = size;
2191 slot = m_seen_refs.find_slot (&map, INSERT);
2192 r2bb = *slot;
2193 if (r2bb && r2bb->phase >= nt_call_phase)
2194 found_bb = r2bb->bb;
2196 /* If we've found a trapping *_REF, _and_ it dominates EXP
2197 (it's in a basic block on the path from us to the dominator root)
2198 then we can't trap. */
2199 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2201 m_nontrapping->add (exp);
2203 else
2205 /* EXP might trap, so insert it into the hash table. */
2206 if (r2bb)
2208 r2bb->phase = nt_call_phase;
2209 r2bb->bb = bb;
2211 else
2213 r2bb = XNEW (struct ref_to_bb);
2214 r2bb->phase = nt_call_phase;
2215 r2bb->bb = bb;
2216 r2bb->exp = exp;
2217 r2bb->size = size;
2218 *slot = r2bb;
2224 /* This is the entry point of gathering non trapping memory accesses.
2225 It will do a dominator walk over the whole function, and it will
2226 make use of the bb->aux pointers. It returns a set of trees
2227 (the MEM_REFs itself) which can't trap. */
2228 static hash_set<tree> *
2229 get_non_trapping (void)
2231 nt_call_phase = 0;
2232 hash_set<tree> *nontrap = new hash_set<tree>;
2233 /* We're going to do a dominator walk, so ensure that we have
2234 dominance information. */
2235 calculate_dominance_info (CDI_DOMINATORS);
2237 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2238 .walk (cfun->cfg->x_entry_block_ptr);
2240 clear_aux_for_blocks ();
2241 return nontrap;
2244 /* Do the main work of conditional store replacement. We already know
2245 that the recognized pattern looks like so:
2247 split:
2248 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2249 MIDDLE_BB:
2250 something
2251 fallthrough (edge E0)
2252 JOIN_BB:
2253 some more
2255 We check that MIDDLE_BB contains only one store, that that store
2256 doesn't trap (not via NOTRAP, but via checking if an access to the same
2257 memory location dominates us, or the store is to a local addressable
2258 object) and that the store has a "simple" RHS. */
2260 static bool
2261 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2262 edge e0, edge e1, hash_set<tree> *nontrap)
2264 gimple *assign = last_and_only_stmt (middle_bb);
2265 tree lhs, rhs, name, name2;
2266 gphi *newphi;
2267 gassign *new_stmt;
2268 gimple_stmt_iterator gsi;
2269 location_t locus;
2271 /* Check if middle_bb contains of only one store. */
2272 if (!assign
2273 || !gimple_assign_single_p (assign)
2274 || gimple_has_volatile_ops (assign))
2275 return false;
2277 /* And no PHI nodes so all uses in the single stmt are also
2278 available where we insert to. */
2279 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2280 return false;
2282 locus = gimple_location (assign);
2283 lhs = gimple_assign_lhs (assign);
2284 rhs = gimple_assign_rhs1 (assign);
2285 if ((TREE_CODE (lhs) != MEM_REF
2286 && TREE_CODE (lhs) != ARRAY_REF
2287 && TREE_CODE (lhs) != COMPONENT_REF)
2288 || !is_gimple_reg_type (TREE_TYPE (lhs)))
2289 return false;
2291 /* Prove that we can move the store down. We could also check
2292 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2293 whose value is not available readily, which we want to avoid. */
2294 if (!nontrap->contains (lhs))
2296 /* If LHS is an access to a local variable without address-taken
2297 (or when we allow data races) and known not to trap, we could
2298 always safely move down the store. */
2299 tree base = get_base_address (lhs);
2300 if (!auto_var_p (base)
2301 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
2302 || tree_could_trap_p (lhs))
2303 return false;
2306 /* Now we've checked the constraints, so do the transformation:
2307 1) Remove the single store. */
2308 gsi = gsi_for_stmt (assign);
2309 unlink_stmt_vdef (assign);
2310 gsi_remove (&gsi, true);
2311 release_defs (assign);
2313 /* Make both store and load use alias-set zero as we have to
2314 deal with the case of the store being a conditional change
2315 of the dynamic type. */
2316 lhs = unshare_expr (lhs);
2317 tree *basep = &lhs;
2318 while (handled_component_p (*basep))
2319 basep = &TREE_OPERAND (*basep, 0);
2320 if (TREE_CODE (*basep) == MEM_REF
2321 || TREE_CODE (*basep) == TARGET_MEM_REF)
2322 TREE_OPERAND (*basep, 1)
2323 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2324 else
2325 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2326 build_fold_addr_expr (*basep),
2327 build_zero_cst (ptr_type_node));
2329 /* 2) Insert a load from the memory of the store to the temporary
2330 on the edge which did not contain the store. */
2331 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2332 new_stmt = gimple_build_assign (name, lhs);
2333 gimple_set_location (new_stmt, locus);
2334 lhs = unshare_expr (lhs);
2335 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2336 warnings. */
2337 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt)) = 1;
2338 gsi_insert_on_edge (e1, new_stmt);
2340 /* 3) Create a PHI node at the join block, with one argument
2341 holding the old RHS, and the other holding the temporary
2342 where we stored the old memory contents. */
2343 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2344 newphi = create_phi_node (name2, join_bb);
2345 add_phi_arg (newphi, rhs, e0, locus);
2346 add_phi_arg (newphi, name, e1, locus);
2348 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2350 /* 4) Insert that PHI node. */
2351 gsi = gsi_after_labels (join_bb);
2352 if (gsi_end_p (gsi))
2354 gsi = gsi_last_bb (join_bb);
2355 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2357 else
2358 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2360 if (dump_file && (dump_flags & TDF_DETAILS))
2362 fprintf (dump_file, "\nConditional store replacement happened!");
2363 fprintf (dump_file, "\nReplaced the store with a load.");
2364 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
2365 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
2368 return true;
2371 /* Do the main work of conditional store replacement. */
2373 static bool
2374 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
2375 basic_block join_bb, gimple *then_assign,
2376 gimple *else_assign)
2378 tree lhs_base, lhs, then_rhs, else_rhs, name;
2379 location_t then_locus, else_locus;
2380 gimple_stmt_iterator gsi;
2381 gphi *newphi;
2382 gassign *new_stmt;
2384 if (then_assign == NULL
2385 || !gimple_assign_single_p (then_assign)
2386 || gimple_clobber_p (then_assign)
2387 || gimple_has_volatile_ops (then_assign)
2388 || else_assign == NULL
2389 || !gimple_assign_single_p (else_assign)
2390 || gimple_clobber_p (else_assign)
2391 || gimple_has_volatile_ops (else_assign))
2392 return false;
2394 lhs = gimple_assign_lhs (then_assign);
2395 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2396 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2397 return false;
2399 lhs_base = get_base_address (lhs);
2400 if (lhs_base == NULL_TREE
2401 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2402 return false;
2404 then_rhs = gimple_assign_rhs1 (then_assign);
2405 else_rhs = gimple_assign_rhs1 (else_assign);
2406 then_locus = gimple_location (then_assign);
2407 else_locus = gimple_location (else_assign);
2409 /* Now we've checked the constraints, so do the transformation:
2410 1) Remove the stores. */
2411 gsi = gsi_for_stmt (then_assign);
2412 unlink_stmt_vdef (then_assign);
2413 gsi_remove (&gsi, true);
2414 release_defs (then_assign);
2416 gsi = gsi_for_stmt (else_assign);
2417 unlink_stmt_vdef (else_assign);
2418 gsi_remove (&gsi, true);
2419 release_defs (else_assign);
2421 /* 2) Create a PHI node at the join block, with one argument
2422 holding the old RHS, and the other holding the temporary
2423 where we stored the old memory contents. */
2424 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2425 newphi = create_phi_node (name, join_bb);
2426 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2427 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2429 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2431 /* 3) Insert that PHI node. */
2432 gsi = gsi_after_labels (join_bb);
2433 if (gsi_end_p (gsi))
2435 gsi = gsi_last_bb (join_bb);
2436 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2438 else
2439 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2441 return true;
2444 /* Return the single store in BB with VDEF or NULL if there are
2445 other stores in the BB or loads following the store. */
2447 static gimple *
2448 single_trailing_store_in_bb (basic_block bb, tree vdef)
2450 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2451 return NULL;
2452 gimple *store = SSA_NAME_DEF_STMT (vdef);
2453 if (gimple_bb (store) != bb
2454 || gimple_code (store) == GIMPLE_PHI)
2455 return NULL;
2457 /* Verify there is no other store in this BB. */
2458 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2459 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2460 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2461 return NULL;
2463 /* Verify there is no load or store after the store. */
2464 use_operand_p use_p;
2465 imm_use_iterator imm_iter;
2466 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2467 if (USE_STMT (use_p) != store
2468 && gimple_bb (USE_STMT (use_p)) == bb)
2469 return NULL;
2471 return store;
2474 /* Conditional store replacement. We already know
2475 that the recognized pattern looks like so:
2477 split:
2478 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2479 THEN_BB:
2481 X = Y;
2483 goto JOIN_BB;
2484 ELSE_BB:
2486 X = Z;
2488 fallthrough (edge E0)
2489 JOIN_BB:
2490 some more
2492 We check that it is safe to sink the store to JOIN_BB by verifying that
2493 there are no read-after-write or write-after-write dependencies in
2494 THEN_BB and ELSE_BB. */
2496 static bool
2497 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2498 basic_block join_bb)
2500 vec<data_reference_p> then_datarefs, else_datarefs;
2501 vec<ddr_p> then_ddrs, else_ddrs;
2502 gimple *then_store, *else_store;
2503 bool found, ok = false, res;
2504 struct data_dependence_relation *ddr;
2505 data_reference_p then_dr, else_dr;
2506 int i, j;
2507 tree then_lhs, else_lhs;
2508 basic_block blocks[3];
2510 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2511 cheap enough to always handle as it allows us to elide dependence
2512 checking. */
2513 gphi *vphi = NULL;
2514 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2515 gsi_next (&si))
2516 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2518 vphi = si.phi ();
2519 break;
2521 if (!vphi)
2522 return false;
2523 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2524 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2525 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2526 if (then_assign)
2528 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2529 if (else_assign)
2530 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2531 then_assign, else_assign);
2534 /* If either vectorization or if-conversion is disabled then do
2535 not sink any stores. */
2536 if (param_max_stores_to_sink == 0
2537 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
2538 || !flag_tree_loop_if_convert)
2539 return false;
2541 /* Find data references. */
2542 then_datarefs.create (1);
2543 else_datarefs.create (1);
2544 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2545 == chrec_dont_know)
2546 || !then_datarefs.length ()
2547 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2548 == chrec_dont_know)
2549 || !else_datarefs.length ())
2551 free_data_refs (then_datarefs);
2552 free_data_refs (else_datarefs);
2553 return false;
2556 /* Find pairs of stores with equal LHS. */
2557 auto_vec<gimple *, 1> then_stores, else_stores;
2558 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2560 if (DR_IS_READ (then_dr))
2561 continue;
2563 then_store = DR_STMT (then_dr);
2564 then_lhs = gimple_get_lhs (then_store);
2565 if (then_lhs == NULL_TREE)
2566 continue;
2567 found = false;
2569 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2571 if (DR_IS_READ (else_dr))
2572 continue;
2574 else_store = DR_STMT (else_dr);
2575 else_lhs = gimple_get_lhs (else_store);
2576 if (else_lhs == NULL_TREE)
2577 continue;
2579 if (operand_equal_p (then_lhs, else_lhs, 0))
2581 found = true;
2582 break;
2586 if (!found)
2587 continue;
2589 then_stores.safe_push (then_store);
2590 else_stores.safe_push (else_store);
2593 /* No pairs of stores found. */
2594 if (!then_stores.length ()
2595 || then_stores.length () > (unsigned) param_max_stores_to_sink)
2597 free_data_refs (then_datarefs);
2598 free_data_refs (else_datarefs);
2599 return false;
2602 /* Compute and check data dependencies in both basic blocks. */
2603 then_ddrs.create (1);
2604 else_ddrs.create (1);
2605 if (!compute_all_dependences (then_datarefs, &then_ddrs,
2606 vNULL, false)
2607 || !compute_all_dependences (else_datarefs, &else_ddrs,
2608 vNULL, false))
2610 free_dependence_relations (then_ddrs);
2611 free_dependence_relations (else_ddrs);
2612 free_data_refs (then_datarefs);
2613 free_data_refs (else_datarefs);
2614 return false;
2616 blocks[0] = then_bb;
2617 blocks[1] = else_bb;
2618 blocks[2] = join_bb;
2619 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2621 /* Check that there are no read-after-write or write-after-write dependencies
2622 in THEN_BB. */
2623 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2625 struct data_reference *dra = DDR_A (ddr);
2626 struct data_reference *drb = DDR_B (ddr);
2628 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2629 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2630 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2631 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2632 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2633 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2635 free_dependence_relations (then_ddrs);
2636 free_dependence_relations (else_ddrs);
2637 free_data_refs (then_datarefs);
2638 free_data_refs (else_datarefs);
2639 return false;
2643 /* Check that there are no read-after-write or write-after-write dependencies
2644 in ELSE_BB. */
2645 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2647 struct data_reference *dra = DDR_A (ddr);
2648 struct data_reference *drb = DDR_B (ddr);
2650 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2651 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2652 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2653 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2654 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2655 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2657 free_dependence_relations (then_ddrs);
2658 free_dependence_relations (else_ddrs);
2659 free_data_refs (then_datarefs);
2660 free_data_refs (else_datarefs);
2661 return false;
2665 /* Sink stores with same LHS. */
2666 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2668 else_store = else_stores[i];
2669 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2670 then_store, else_store);
2671 ok = ok || res;
2674 free_dependence_relations (then_ddrs);
2675 free_dependence_relations (else_ddrs);
2676 free_data_refs (then_datarefs);
2677 free_data_refs (else_datarefs);
2679 return ok;
2682 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2684 static bool
2685 local_mem_dependence (gimple *stmt, basic_block bb)
2687 tree vuse = gimple_vuse (stmt);
2688 gimple *def;
2690 if (!vuse)
2691 return false;
2693 def = SSA_NAME_DEF_STMT (vuse);
2694 return (def && gimple_bb (def) == bb);
2697 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2698 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2699 and BB3 rejoins control flow following BB1 and BB2, look for
2700 opportunities to hoist loads as follows. If BB3 contains a PHI of
2701 two loads, one each occurring in BB1 and BB2, and the loads are
2702 provably of adjacent fields in the same structure, then move both
2703 loads into BB0. Of course this can only be done if there are no
2704 dependencies preventing such motion.
2706 One of the hoisted loads will always be speculative, so the
2707 transformation is currently conservative:
2709 - The fields must be strictly adjacent.
2710 - The two fields must occupy a single memory block that is
2711 guaranteed to not cross a page boundary.
2713 The last is difficult to prove, as such memory blocks should be
2714 aligned on the minimum of the stack alignment boundary and the
2715 alignment guaranteed by heap allocation interfaces. Thus we rely
2716 on a parameter for the alignment value.
2718 Provided a good value is used for the last case, the first
2719 restriction could possibly be relaxed. */
2721 static void
2722 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2723 basic_block bb2, basic_block bb3)
2725 int param_align = param_l1_cache_line_size;
2726 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2727 gphi_iterator gsi;
2729 /* Walk the phis in bb3 looking for an opportunity. We are looking
2730 for phis of two SSA names, one each of which is defined in bb1 and
2731 bb2. */
2732 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2734 gphi *phi_stmt = gsi.phi ();
2735 gimple *def1, *def2;
2736 tree arg1, arg2, ref1, ref2, field1, field2;
2737 tree tree_offset1, tree_offset2, tree_size2, next;
2738 int offset1, offset2, size2;
2739 unsigned align1;
2740 gimple_stmt_iterator gsi2;
2741 basic_block bb_for_def1, bb_for_def2;
2743 if (gimple_phi_num_args (phi_stmt) != 2
2744 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2745 continue;
2747 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2748 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2750 if (TREE_CODE (arg1) != SSA_NAME
2751 || TREE_CODE (arg2) != SSA_NAME
2752 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2753 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2754 continue;
2756 def1 = SSA_NAME_DEF_STMT (arg1);
2757 def2 = SSA_NAME_DEF_STMT (arg2);
2759 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2760 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2761 continue;
2763 /* Check the mode of the arguments to be sure a conditional move
2764 can be generated for it. */
2765 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2766 == CODE_FOR_nothing)
2767 continue;
2769 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2770 if (!gimple_assign_single_p (def1)
2771 || !gimple_assign_single_p (def2)
2772 || gimple_has_volatile_ops (def1)
2773 || gimple_has_volatile_ops (def2))
2774 continue;
2776 ref1 = gimple_assign_rhs1 (def1);
2777 ref2 = gimple_assign_rhs1 (def2);
2779 if (TREE_CODE (ref1) != COMPONENT_REF
2780 || TREE_CODE (ref2) != COMPONENT_REF)
2781 continue;
2783 /* The zeroth operand of the two component references must be
2784 identical. It is not sufficient to compare get_base_address of
2785 the two references, because this could allow for different
2786 elements of the same array in the two trees. It is not safe to
2787 assume that the existence of one array element implies the
2788 existence of a different one. */
2789 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2790 continue;
2792 field1 = TREE_OPERAND (ref1, 1);
2793 field2 = TREE_OPERAND (ref2, 1);
2795 /* Check for field adjacency, and ensure field1 comes first. */
2796 for (next = DECL_CHAIN (field1);
2797 next && TREE_CODE (next) != FIELD_DECL;
2798 next = DECL_CHAIN (next))
2801 if (next != field2)
2803 for (next = DECL_CHAIN (field2);
2804 next && TREE_CODE (next) != FIELD_DECL;
2805 next = DECL_CHAIN (next))
2808 if (next != field1)
2809 continue;
2811 std::swap (field1, field2);
2812 std::swap (def1, def2);
2815 bb_for_def1 = gimple_bb (def1);
2816 bb_for_def2 = gimple_bb (def2);
2818 /* Check for proper alignment of the first field. */
2819 tree_offset1 = bit_position (field1);
2820 tree_offset2 = bit_position (field2);
2821 tree_size2 = DECL_SIZE (field2);
2823 if (!tree_fits_uhwi_p (tree_offset1)
2824 || !tree_fits_uhwi_p (tree_offset2)
2825 || !tree_fits_uhwi_p (tree_size2))
2826 continue;
2828 offset1 = tree_to_uhwi (tree_offset1);
2829 offset2 = tree_to_uhwi (tree_offset2);
2830 size2 = tree_to_uhwi (tree_size2);
2831 align1 = DECL_ALIGN (field1) % param_align_bits;
2833 if (offset1 % BITS_PER_UNIT != 0)
2834 continue;
2836 /* For profitability, the two field references should fit within
2837 a single cache line. */
2838 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2839 continue;
2841 /* The two expressions cannot be dependent upon vdefs defined
2842 in bb1/bb2. */
2843 if (local_mem_dependence (def1, bb_for_def1)
2844 || local_mem_dependence (def2, bb_for_def2))
2845 continue;
2847 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2848 bb0. We hoist the first one first so that a cache miss is handled
2849 efficiently regardless of hardware cache-fill policy. */
2850 gsi2 = gsi_for_stmt (def1);
2851 gsi_move_to_bb_end (&gsi2, bb0);
2852 gsi2 = gsi_for_stmt (def2);
2853 gsi_move_to_bb_end (&gsi2, bb0);
2855 if (dump_file && (dump_flags & TDF_DETAILS))
2857 fprintf (dump_file,
2858 "\nHoisting adjacent loads from %d and %d into %d: \n",
2859 bb_for_def1->index, bb_for_def2->index, bb0->index);
2860 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2861 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2866 /* Determine whether we should attempt to hoist adjacent loads out of
2867 diamond patterns in pass_phiopt. Always hoist loads if
2868 -fhoist-adjacent-loads is specified and the target machine has
2869 both a conditional move instruction and a defined cache line size. */
2871 static bool
2872 gate_hoist_loads (void)
2874 return (flag_hoist_adjacent_loads == 1
2875 && param_l1_cache_line_size
2876 && HAVE_conditional_move);
2879 /* This pass tries to replaces an if-then-else block with an
2880 assignment. We have four kinds of transformations. Some of these
2881 transformations are also performed by the ifcvt RTL optimizer.
2883 Conditional Replacement
2884 -----------------------
2886 This transformation, implemented in conditional_replacement,
2887 replaces
2889 bb0:
2890 if (cond) goto bb2; else goto bb1;
2891 bb1:
2892 bb2:
2893 x = PHI <0 (bb1), 1 (bb0), ...>;
2895 with
2897 bb0:
2898 x' = cond;
2899 goto bb2;
2900 bb2:
2901 x = PHI <x' (bb0), ...>;
2903 We remove bb1 as it becomes unreachable. This occurs often due to
2904 gimplification of conditionals.
2906 Value Replacement
2907 -----------------
2909 This transformation, implemented in value_replacement, replaces
2911 bb0:
2912 if (a != b) goto bb2; else goto bb1;
2913 bb1:
2914 bb2:
2915 x = PHI <a (bb1), b (bb0), ...>;
2917 with
2919 bb0:
2920 bb2:
2921 x = PHI <b (bb0), ...>;
2923 This opportunity can sometimes occur as a result of other
2924 optimizations.
2927 Another case caught by value replacement looks like this:
2929 bb0:
2930 t1 = a == CONST;
2931 t2 = b > c;
2932 t3 = t1 & t2;
2933 if (t3 != 0) goto bb1; else goto bb2;
2934 bb1:
2935 bb2:
2936 x = PHI (CONST, a)
2938 Gets replaced with:
2939 bb0:
2940 bb2:
2941 t1 = a == CONST;
2942 t2 = b > c;
2943 t3 = t1 & t2;
2944 x = a;
2946 ABS Replacement
2947 ---------------
2949 This transformation, implemented in abs_replacement, replaces
2951 bb0:
2952 if (a >= 0) goto bb2; else goto bb1;
2953 bb1:
2954 x = -a;
2955 bb2:
2956 x = PHI <x (bb1), a (bb0), ...>;
2958 with
2960 bb0:
2961 x' = ABS_EXPR< a >;
2962 bb2:
2963 x = PHI <x' (bb0), ...>;
2965 MIN/MAX Replacement
2966 -------------------
2968 This transformation, minmax_replacement replaces
2970 bb0:
2971 if (a <= b) goto bb2; else goto bb1;
2972 bb1:
2973 bb2:
2974 x = PHI <b (bb1), a (bb0), ...>;
2976 with
2978 bb0:
2979 x' = MIN_EXPR (a, b)
2980 bb2:
2981 x = PHI <x' (bb0), ...>;
2983 A similar transformation is done for MAX_EXPR.
2986 This pass also performs a fifth transformation of a slightly different
2987 flavor.
2989 Factor conversion in COND_EXPR
2990 ------------------------------
2992 This transformation factors the conversion out of COND_EXPR with
2993 factor_out_conditional_conversion.
2995 For example:
2996 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2997 <bb 3>:
2998 tmp = (int) a;
2999 <bb 4>:
3000 tmp = PHI <tmp, CST>
3002 Into:
3003 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3004 <bb 3>:
3005 <bb 4>:
3006 a = PHI <a, CST>
3007 tmp = (int) a;
3009 Adjacent Load Hoisting
3010 ----------------------
3012 This transformation replaces
3014 bb0:
3015 if (...) goto bb2; else goto bb1;
3016 bb1:
3017 x1 = (<expr>).field1;
3018 goto bb3;
3019 bb2:
3020 x2 = (<expr>).field2;
3021 bb3:
3022 # x = PHI <x1, x2>;
3024 with
3026 bb0:
3027 x1 = (<expr>).field1;
3028 x2 = (<expr>).field2;
3029 if (...) goto bb2; else goto bb1;
3030 bb1:
3031 goto bb3;
3032 bb2:
3033 bb3:
3034 # x = PHI <x1, x2>;
3036 The purpose of this transformation is to enable generation of conditional
3037 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3038 the loads is speculative, the transformation is restricted to very
3039 specific cases to avoid introducing a page fault. We are looking for
3040 the common idiom:
3042 if (...)
3043 x = y->left;
3044 else
3045 x = y->right;
3047 where left and right are typically adjacent pointers in a tree structure. */
3049 namespace {
3051 const pass_data pass_data_phiopt =
3053 GIMPLE_PASS, /* type */
3054 "phiopt", /* name */
3055 OPTGROUP_NONE, /* optinfo_flags */
3056 TV_TREE_PHIOPT, /* tv_id */
3057 ( PROP_cfg | PROP_ssa ), /* properties_required */
3058 0, /* properties_provided */
3059 0, /* properties_destroyed */
3060 0, /* todo_flags_start */
3061 0, /* todo_flags_finish */
3064 class pass_phiopt : public gimple_opt_pass
3066 public:
3067 pass_phiopt (gcc::context *ctxt)
3068 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3071 /* opt_pass methods: */
3072 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3073 void set_pass_param (unsigned n, bool param)
3075 gcc_assert (n == 0);
3076 early_p = param;
3078 virtual bool gate (function *) { return flag_ssa_phiopt; }
3079 virtual unsigned int execute (function *)
3081 return tree_ssa_phiopt_worker (false,
3082 !early_p ? gate_hoist_loads () : false,
3083 early_p);
3086 private:
3087 bool early_p;
3088 }; // class pass_phiopt
3090 } // anon namespace
3092 gimple_opt_pass *
3093 make_pass_phiopt (gcc::context *ctxt)
3095 return new pass_phiopt (ctxt);
3098 namespace {
3100 const pass_data pass_data_cselim =
3102 GIMPLE_PASS, /* type */
3103 "cselim", /* name */
3104 OPTGROUP_NONE, /* optinfo_flags */
3105 TV_TREE_PHIOPT, /* tv_id */
3106 ( PROP_cfg | PROP_ssa ), /* properties_required */
3107 0, /* properties_provided */
3108 0, /* properties_destroyed */
3109 0, /* todo_flags_start */
3110 0, /* todo_flags_finish */
3113 class pass_cselim : public gimple_opt_pass
3115 public:
3116 pass_cselim (gcc::context *ctxt)
3117 : gimple_opt_pass (pass_data_cselim, ctxt)
3120 /* opt_pass methods: */
3121 virtual bool gate (function *) { return flag_tree_cselim; }
3122 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3124 }; // class pass_cselim
3126 } // anon namespace
3128 gimple_opt_pass *
3129 make_pass_cselim (gcc::context *ctxt)
3131 return new pass_cselim (ctxt);