Update ChangeLog and version files for release
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobf6880ba88f356cbd113ef43dff8f5aad507c82a5
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "params.h"
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block, basic_block,
51 edge, edge, gphi *, tree, tree);
52 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree);
53 static int value_replacement (basic_block, basic_block,
54 edge, edge, gimple *, tree, tree);
55 static bool minmax_replacement (basic_block, basic_block,
56 edge, edge, gimple *, tree, tree);
57 static bool abs_replacement (basic_block, basic_block,
58 edge, edge, gimple *, tree, tree);
59 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
60 hash_set<tree> *);
61 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
62 static hash_set<tree> * get_non_trapping ();
63 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
64 static void hoist_adjacent_loads (basic_block, basic_block,
65 basic_block, basic_block);
66 static bool gate_hoist_loads (void);
68 /* This pass tries to transform conditional stores into unconditional
69 ones, enabling further simplifications with the simpler then and else
70 blocks. In particular it replaces this:
72 bb0:
73 if (cond) goto bb2; else goto bb1;
74 bb1:
75 *p = RHS;
76 bb2:
78 with
80 bb0:
81 if (cond) goto bb1; else goto bb2;
82 bb1:
83 condtmp' = *p;
84 bb2:
85 condtmp = PHI <RHS, condtmp'>
86 *p = condtmp;
88 This transformation can only be done under several constraints,
89 documented below. It also replaces:
91 bb0:
92 if (cond) goto bb2; else goto bb1;
93 bb1:
94 *p = RHS1;
95 goto bb3;
96 bb2:
97 *p = RHS2;
98 bb3:
100 with
102 bb0:
103 if (cond) goto bb3; else goto bb1;
104 bb1:
105 bb3:
106 condtmp = PHI <RHS1, RHS2>
107 *p = condtmp; */
109 static unsigned int
110 tree_ssa_cs_elim (void)
112 unsigned todo;
113 /* ??? We are not interested in loop related info, but the following
114 will create it, ICEing as we didn't init loops with pre-headers.
115 An interfacing issue of find_data_references_in_bb. */
116 loop_optimizer_init (LOOPS_NORMAL);
117 scev_initialize ();
118 todo = tree_ssa_phiopt_worker (true, false);
119 scev_finalize ();
120 loop_optimizer_finalize ();
121 return todo;
124 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
126 static gphi *
127 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
129 gimple_stmt_iterator i;
130 gphi *phi = NULL;
131 if (gimple_seq_singleton_p (seq))
132 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
133 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
135 gphi *p = as_a <gphi *> (gsi_stmt (i));
136 /* If the PHI arguments are equal then we can skip this PHI. */
137 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
138 gimple_phi_arg_def (p, e1->dest_idx)))
139 continue;
141 /* If we already have a PHI that has the two edge arguments are
142 different, then return it is not a singleton for these PHIs. */
143 if (phi)
144 return NULL;
146 phi = p;
148 return phi;
151 /* The core routine of conditional store replacement and normal
152 phi optimizations. Both share much of the infrastructure in how
153 to match applicable basic block patterns. DO_STORE_ELIM is true
154 when we want to do conditional store replacement, false otherwise.
155 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
156 of diamond control flow patterns, false otherwise. */
157 static unsigned int
158 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
160 basic_block bb;
161 basic_block *bb_order;
162 unsigned n, i;
163 bool cfgchanged = false;
164 hash_set<tree> *nontrap = 0;
166 if (do_store_elim)
167 /* Calculate the set of non-trapping memory accesses. */
168 nontrap = get_non_trapping ();
170 /* Search every basic block for COND_EXPR we may be able to optimize.
172 We walk the blocks in order that guarantees that a block with
173 a single predecessor is processed before the predecessor.
174 This ensures that we collapse inner ifs before visiting the
175 outer ones, and also that we do not try to visit a removed
176 block. */
177 bb_order = single_pred_before_succ_order ();
178 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
180 for (i = 0; i < n; i++)
182 gimple *cond_stmt;
183 gphi *phi;
184 basic_block bb1, bb2;
185 edge e1, e2;
186 tree arg0, arg1;
188 bb = bb_order[i];
190 cond_stmt = last_stmt (bb);
191 /* Check to see if the last statement is a GIMPLE_COND. */
192 if (!cond_stmt
193 || gimple_code (cond_stmt) != GIMPLE_COND)
194 continue;
196 e1 = EDGE_SUCC (bb, 0);
197 bb1 = e1->dest;
198 e2 = EDGE_SUCC (bb, 1);
199 bb2 = e2->dest;
201 /* We cannot do the optimization on abnormal edges. */
202 if ((e1->flags & EDGE_ABNORMAL) != 0
203 || (e2->flags & EDGE_ABNORMAL) != 0)
204 continue;
206 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
207 if (EDGE_COUNT (bb1->succs) == 0
208 || bb2 == NULL
209 || EDGE_COUNT (bb2->succs) == 0)
210 continue;
212 /* Find the bb which is the fall through to the other. */
213 if (EDGE_SUCC (bb1, 0)->dest == bb2)
215 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
217 std::swap (bb1, bb2);
218 std::swap (e1, e2);
220 else if (do_store_elim
221 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
223 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
225 if (!single_succ_p (bb1)
226 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
227 || !single_succ_p (bb2)
228 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
229 || EDGE_COUNT (bb3->preds) != 2)
230 continue;
231 if (cond_if_else_store_replacement (bb1, bb2, bb3))
232 cfgchanged = true;
233 continue;
235 else if (do_hoist_loads
236 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
238 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
240 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
241 && single_succ_p (bb1)
242 && single_succ_p (bb2)
243 && single_pred_p (bb1)
244 && single_pred_p (bb2)
245 && EDGE_COUNT (bb->succs) == 2
246 && EDGE_COUNT (bb3->preds) == 2
247 /* If one edge or the other is dominant, a conditional move
248 is likely to perform worse than the well-predicted branch. */
249 && !predictable_edge_p (EDGE_SUCC (bb, 0))
250 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
251 hoist_adjacent_loads (bb, bb1, bb2, bb3);
252 continue;
254 else
255 continue;
257 e1 = EDGE_SUCC (bb1, 0);
259 /* Make sure that bb1 is just a fall through. */
260 if (!single_succ_p (bb1)
261 || (e1->flags & EDGE_FALLTHRU) == 0)
262 continue;
264 /* Also make sure that bb1 only have one predecessor and that it
265 is bb. */
266 if (!single_pred_p (bb1)
267 || single_pred (bb1) != bb)
268 continue;
270 if (do_store_elim)
272 /* bb1 is the middle block, bb2 the join block, bb the split block,
273 e1 the fallthrough edge from bb1 to bb2. We can't do the
274 optimization if the join block has more than two predecessors. */
275 if (EDGE_COUNT (bb2->preds) > 2)
276 continue;
277 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
278 cfgchanged = true;
280 else
282 gimple_seq phis = phi_nodes (bb2);
283 gimple_stmt_iterator gsi;
284 bool candorest = true;
286 /* Value replacement can work with more than one PHI
287 so try that first. */
288 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
290 phi = as_a <gphi *> (gsi_stmt (gsi));
291 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
292 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
293 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
295 candorest = false;
296 cfgchanged = true;
297 break;
301 if (!candorest)
302 continue;
304 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
305 if (!phi)
306 continue;
308 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
309 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
311 /* Something is wrong if we cannot find the arguments in the PHI
312 node. */
313 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
315 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
316 arg0, arg1);
317 if (newphi != NULL)
319 phi = newphi;
320 /* factor_out_conditional_conversion may create a new PHI in
321 BB2 and eliminate an existing PHI in BB2. Recompute values
322 that may be affected by that change. */
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
328 /* Do the replacement of conditional if it can be done. */
329 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
330 cfgchanged = true;
331 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332 cfgchanged = true;
333 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334 cfgchanged = true;
338 free (bb_order);
340 if (do_store_elim)
341 delete nontrap;
342 /* If the CFG has changed, we should cleanup the CFG. */
343 if (cfgchanged && do_store_elim)
345 /* In cond-store replacement we have added some loads on edges
346 and new VOPS (as we moved the store, and created a load). */
347 gsi_commit_edge_inserts ();
348 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
350 else if (cfgchanged)
351 return TODO_cleanup_cfg;
352 return 0;
355 /* Replace PHI node element whose edge is E in block BB with variable NEW.
356 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
357 is known to have two edges, one of which must reach BB). */
359 static void
360 replace_phi_edge_with_variable (basic_block cond_block,
361 edge e, gimple *phi, tree new_tree)
363 basic_block bb = gimple_bb (phi);
364 basic_block block_to_remove;
365 gimple_stmt_iterator gsi;
367 /* Change the PHI argument to new. */
368 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
370 /* Remove the empty basic block. */
371 if (EDGE_SUCC (cond_block, 0)->dest == bb)
373 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
374 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
375 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
376 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
378 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
380 else
382 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
383 EDGE_SUCC (cond_block, 1)->flags
384 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
385 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
386 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
388 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
390 delete_basic_block (block_to_remove);
392 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393 gsi = gsi_last_bb (cond_block);
394 gsi_remove (&gsi, true);
396 if (dump_file && (dump_flags & TDF_DETAILS))
397 fprintf (dump_file,
398 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
399 cond_block->index,
400 bb->index);
403 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405 to the result of PHI stmt. Return the newly-created PHI, if any. */
407 static gphi *
408 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
409 tree arg0, tree arg1)
411 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
412 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
413 tree temp, result;
414 gphi *newphi;
415 gimple_stmt_iterator gsi, gsi_for_def;
416 source_location locus = gimple_location (phi);
417 enum tree_code convert_code;
419 /* Handle only PHI statements with two arguments. TODO: If all
420 other arguments to PHI are INTEGER_CST or if their defining
421 statement have the same unary operation, we can handle more
422 than two arguments too. */
423 if (gimple_phi_num_args (phi) != 2)
424 return NULL;
426 /* First canonicalize to simplify tests. */
427 if (TREE_CODE (arg0) != SSA_NAME)
429 std::swap (arg0, arg1);
430 std::swap (e0, e1);
433 if (TREE_CODE (arg0) != SSA_NAME
434 || (TREE_CODE (arg1) != SSA_NAME
435 && TREE_CODE (arg1) != INTEGER_CST))
436 return NULL;
438 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
439 a conversion. */
440 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
441 if (!gimple_assign_cast_p (arg0_def_stmt))
442 return NULL;
444 /* Use the RHS as new_arg0. */
445 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
446 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
447 if (convert_code == VIEW_CONVERT_EXPR)
449 new_arg0 = TREE_OPERAND (new_arg0, 0);
450 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
451 return NULL;
454 if (TREE_CODE (arg1) == SSA_NAME)
456 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
457 is a conversion. */
458 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
459 if (!is_gimple_assign (arg1_def_stmt)
460 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
461 return NULL;
463 /* Use the RHS as new_arg1. */
464 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
465 if (convert_code == VIEW_CONVERT_EXPR)
466 new_arg1 = TREE_OPERAND (new_arg1, 0);
468 else
470 /* If arg1 is an INTEGER_CST, fold it to new type. */
471 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
472 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
474 if (gimple_assign_cast_p (arg0_def_stmt))
475 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
476 else
477 return NULL;
479 else
480 return NULL;
483 /* If arg0/arg1 have > 1 use, then this transformation actually increases
484 the number of expressions evaluated at runtime. */
485 if (!has_single_use (arg0)
486 || (arg1_def_stmt && !has_single_use (arg1)))
487 return NULL;
489 /* If types of new_arg0 and new_arg1 are different bailout. */
490 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
491 return NULL;
493 /* Create a new PHI stmt. */
494 result = PHI_RESULT (phi);
495 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
496 newphi = create_phi_node (temp, gimple_bb (phi));
498 if (dump_file && (dump_flags & TDF_DETAILS))
500 fprintf (dump_file, "PHI ");
501 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
502 fprintf (dump_file,
503 " changed to factor conversion out from COND_EXPR.\n");
504 fprintf (dump_file, "New stmt with CAST that defines ");
505 print_generic_expr (dump_file, result, 0);
506 fprintf (dump_file, ".\n");
509 /* Remove the old cast(s) that has single use. */
510 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
511 gsi_remove (&gsi_for_def, true);
512 release_defs (arg0_def_stmt);
514 if (arg1_def_stmt)
516 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
517 gsi_remove (&gsi_for_def, true);
518 release_defs (arg1_def_stmt);
521 add_phi_arg (newphi, new_arg0, e0, locus);
522 add_phi_arg (newphi, new_arg1, e1, locus);
524 /* Create the conversion stmt and insert it. */
525 if (convert_code == VIEW_CONVERT_EXPR)
526 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
527 new_stmt = gimple_build_assign (result, convert_code, temp);
528 gsi = gsi_after_labels (gimple_bb (phi));
529 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
531 /* Remove the original PHI stmt. */
532 gsi = gsi_for_stmt (phi);
533 gsi_remove (&gsi, true);
534 return newphi;
537 /* The function conditional_replacement does the main work of doing the
538 conditional replacement. Return true if the replacement is done.
539 Otherwise return false.
540 BB is the basic block where the replacement is going to be done on. ARG0
541 is argument 0 from PHI. Likewise for ARG1. */
543 static bool
544 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
545 edge e0, edge e1, gphi *phi,
546 tree arg0, tree arg1)
548 tree result;
549 gimple *stmt;
550 gassign *new_stmt;
551 tree cond;
552 gimple_stmt_iterator gsi;
553 edge true_edge, false_edge;
554 tree new_var, new_var2;
555 bool neg;
557 /* FIXME: Gimplification of complex type is too hard for now. */
558 /* We aren't prepared to handle vectors either (and it is a question
559 if it would be worthwhile anyway). */
560 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
561 || POINTER_TYPE_P (TREE_TYPE (arg0)))
562 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
563 || POINTER_TYPE_P (TREE_TYPE (arg1))))
564 return false;
566 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
567 convert it to the conditional. */
568 if ((integer_zerop (arg0) && integer_onep (arg1))
569 || (integer_zerop (arg1) && integer_onep (arg0)))
570 neg = false;
571 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
572 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
573 neg = true;
574 else
575 return false;
577 if (!empty_block_p (middle_bb))
578 return false;
580 /* At this point we know we have a GIMPLE_COND with two successors.
581 One successor is BB, the other successor is an empty block which
582 falls through into BB.
584 There is a single PHI node at the join point (BB) and its arguments
585 are constants (0, 1) or (0, -1).
587 So, given the condition COND, and the two PHI arguments, we can
588 rewrite this PHI into non-branching code:
590 dest = (COND) or dest = COND'
592 We use the condition as-is if the argument associated with the
593 true edge has the value one or the argument associated with the
594 false edge as the value zero. Note that those conditions are not
595 the same since only one of the outgoing edges from the GIMPLE_COND
596 will directly reach BB and thus be associated with an argument. */
598 stmt = last_stmt (cond_bb);
599 result = PHI_RESULT (phi);
601 /* To handle special cases like floating point comparison, it is easier and
602 less error-prone to build a tree and gimplify it on the fly though it is
603 less efficient. */
604 cond = fold_build2_loc (gimple_location (stmt),
605 gimple_cond_code (stmt), boolean_type_node,
606 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
608 /* We need to know which is the true edge and which is the false
609 edge so that we know when to invert the condition below. */
610 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
611 if ((e0 == true_edge && integer_zerop (arg0))
612 || (e0 == false_edge && !integer_zerop (arg0))
613 || (e1 == true_edge && integer_zerop (arg1))
614 || (e1 == false_edge && !integer_zerop (arg1)))
615 cond = fold_build1_loc (gimple_location (stmt),
616 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
618 if (neg)
620 cond = fold_convert_loc (gimple_location (stmt),
621 TREE_TYPE (result), cond);
622 cond = fold_build1_loc (gimple_location (stmt),
623 NEGATE_EXPR, TREE_TYPE (cond), cond);
626 /* Insert our new statements at the end of conditional block before the
627 COND_STMT. */
628 gsi = gsi_for_stmt (stmt);
629 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
630 GSI_SAME_STMT);
632 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
634 source_location locus_0, locus_1;
636 new_var2 = make_ssa_name (TREE_TYPE (result));
637 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
638 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
639 new_var = new_var2;
641 /* Set the locus to the first argument, unless is doesn't have one. */
642 locus_0 = gimple_phi_arg_location (phi, 0);
643 locus_1 = gimple_phi_arg_location (phi, 1);
644 if (locus_0 == UNKNOWN_LOCATION)
645 locus_0 = locus_1;
646 gimple_set_location (new_stmt, locus_0);
649 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
650 reset_flow_sensitive_info_in_bb (cond_bb);
652 /* Note that we optimized this PHI. */
653 return true;
656 /* Update *ARG which is defined in STMT so that it contains the
657 computed value if that seems profitable. Return true if the
658 statement is made dead by that rewriting. */
660 static bool
661 jump_function_from_stmt (tree *arg, gimple *stmt)
663 enum tree_code code = gimple_assign_rhs_code (stmt);
664 if (code == ADDR_EXPR)
666 /* For arg = &p->i transform it to p, if possible. */
667 tree rhs1 = gimple_assign_rhs1 (stmt);
668 HOST_WIDE_INT offset;
669 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
670 &offset);
671 if (tem
672 && TREE_CODE (tem) == MEM_REF
673 && (mem_ref_offset (tem) + offset) == 0)
675 *arg = TREE_OPERAND (tem, 0);
676 return true;
679 /* TODO: Much like IPA-CP jump-functions we want to handle constant
680 additions symbolically here, and we'd need to update the comparison
681 code that compares the arg + cst tuples in our caller. For now the
682 code above exactly handles the VEC_BASE pattern from vec.h. */
683 return false;
686 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
687 of the form SSA_NAME NE 0.
689 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
690 the two input values of the EQ_EXPR match arg0 and arg1.
692 If so update *code and return TRUE. Otherwise return FALSE. */
694 static bool
695 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
696 enum tree_code *code, const_tree rhs)
698 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
699 statement. */
700 if (TREE_CODE (rhs) == SSA_NAME)
702 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
704 /* Verify the defining statement has an EQ_EXPR on the RHS. */
705 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
707 /* Finally verify the source operands of the EQ_EXPR are equal
708 to arg0 and arg1. */
709 tree op0 = gimple_assign_rhs1 (def1);
710 tree op1 = gimple_assign_rhs2 (def1);
711 if ((operand_equal_for_phi_arg_p (arg0, op0)
712 && operand_equal_for_phi_arg_p (arg1, op1))
713 || (operand_equal_for_phi_arg_p (arg0, op1)
714 && operand_equal_for_phi_arg_p (arg1, op0)))
716 /* We will perform the optimization. */
717 *code = gimple_assign_rhs_code (def1);
718 return true;
722 return false;
725 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
727 Also return TRUE if arg0/arg1 are equal to the source arguments of a
728 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
730 Return FALSE otherwise. */
732 static bool
733 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
734 enum tree_code *code, gimple *cond)
736 gimple *def;
737 tree lhs = gimple_cond_lhs (cond);
738 tree rhs = gimple_cond_rhs (cond);
740 if ((operand_equal_for_phi_arg_p (arg0, lhs)
741 && operand_equal_for_phi_arg_p (arg1, rhs))
742 || (operand_equal_for_phi_arg_p (arg1, lhs)
743 && operand_equal_for_phi_arg_p (arg0, rhs)))
744 return true;
746 /* Now handle more complex case where we have an EQ comparison
747 which feeds a BIT_AND_EXPR which feeds COND.
749 First verify that COND is of the form SSA_NAME NE 0. */
750 if (*code != NE_EXPR || !integer_zerop (rhs)
751 || TREE_CODE (lhs) != SSA_NAME)
752 return false;
754 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
755 def = SSA_NAME_DEF_STMT (lhs);
756 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
757 return false;
759 /* Now verify arg0/arg1 correspond to the source arguments of an
760 EQ comparison feeding the BIT_AND_EXPR. */
762 tree tmp = gimple_assign_rhs1 (def);
763 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
764 return true;
766 tmp = gimple_assign_rhs2 (def);
767 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
768 return true;
770 return false;
773 /* Returns true if ARG is a neutral element for operation CODE
774 on the RIGHT side. */
776 static bool
777 neutral_element_p (tree_code code, tree arg, bool right)
779 switch (code)
781 case PLUS_EXPR:
782 case BIT_IOR_EXPR:
783 case BIT_XOR_EXPR:
784 return integer_zerop (arg);
786 case LROTATE_EXPR:
787 case RROTATE_EXPR:
788 case LSHIFT_EXPR:
789 case RSHIFT_EXPR:
790 case MINUS_EXPR:
791 case POINTER_PLUS_EXPR:
792 return right && integer_zerop (arg);
794 case MULT_EXPR:
795 return integer_onep (arg);
797 case TRUNC_DIV_EXPR:
798 case CEIL_DIV_EXPR:
799 case FLOOR_DIV_EXPR:
800 case ROUND_DIV_EXPR:
801 case EXACT_DIV_EXPR:
802 return right && integer_onep (arg);
804 case BIT_AND_EXPR:
805 return integer_all_onesp (arg);
807 default:
808 return false;
812 /* Returns true if ARG is an absorbing element for operation CODE. */
814 static bool
815 absorbing_element_p (tree_code code, tree arg)
817 switch (code)
819 case BIT_IOR_EXPR:
820 return integer_all_onesp (arg);
822 case MULT_EXPR:
823 case BIT_AND_EXPR:
824 return integer_zerop (arg);
826 default:
827 return false;
831 /* The function value_replacement does the main work of doing the value
832 replacement. Return non-zero if the replacement is done. Otherwise return
833 0. If we remove the middle basic block, return 2.
834 BB is the basic block where the replacement is going to be done on. ARG0
835 is argument 0 from the PHI. Likewise for ARG1. */
837 static int
838 value_replacement (basic_block cond_bb, basic_block middle_bb,
839 edge e0, edge e1, gimple *phi,
840 tree arg0, tree arg1)
842 gimple_stmt_iterator gsi;
843 gimple *cond;
844 edge true_edge, false_edge;
845 enum tree_code code;
846 bool emtpy_or_with_defined_p = true;
848 /* If the type says honor signed zeros we cannot do this
849 optimization. */
850 if (HONOR_SIGNED_ZEROS (arg1))
851 return 0;
853 /* If there is a statement in MIDDLE_BB that defines one of the PHI
854 arguments, then adjust arg0 or arg1. */
855 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
856 while (!gsi_end_p (gsi))
858 gimple *stmt = gsi_stmt (gsi);
859 tree lhs;
860 gsi_next_nondebug (&gsi);
861 if (!is_gimple_assign (stmt))
863 emtpy_or_with_defined_p = false;
864 continue;
866 /* Now try to adjust arg0 or arg1 according to the computation
867 in the statement. */
868 lhs = gimple_assign_lhs (stmt);
869 if (!(lhs == arg0
870 && jump_function_from_stmt (&arg0, stmt))
871 || (lhs == arg1
872 && jump_function_from_stmt (&arg1, stmt)))
873 emtpy_or_with_defined_p = false;
876 cond = last_stmt (cond_bb);
877 code = gimple_cond_code (cond);
879 /* This transformation is only valid for equality comparisons. */
880 if (code != NE_EXPR && code != EQ_EXPR)
881 return 0;
883 /* We need to know which is the true edge and which is the false
884 edge so that we know if have abs or negative abs. */
885 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
887 /* At this point we know we have a COND_EXPR with two successors.
888 One successor is BB, the other successor is an empty block which
889 falls through into BB.
891 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
893 There is a single PHI node at the join point (BB) with two arguments.
895 We now need to verify that the two arguments in the PHI node match
896 the two arguments to the equality comparison. */
898 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
900 edge e;
901 tree arg;
903 /* For NE_EXPR, we want to build an assignment result = arg where
904 arg is the PHI argument associated with the true edge. For
905 EQ_EXPR we want the PHI argument associated with the false edge. */
906 e = (code == NE_EXPR ? true_edge : false_edge);
908 /* Unfortunately, E may not reach BB (it may instead have gone to
909 OTHER_BLOCK). If that is the case, then we want the single outgoing
910 edge from OTHER_BLOCK which reaches BB and represents the desired
911 path from COND_BLOCK. */
912 if (e->dest == middle_bb)
913 e = single_succ_edge (e->dest);
915 /* Now we know the incoming edge to BB that has the argument for the
916 RHS of our new assignment statement. */
917 if (e0 == e)
918 arg = arg0;
919 else
920 arg = arg1;
922 /* If the middle basic block was empty or is defining the
923 PHI arguments and this is a single phi where the args are different
924 for the edges e0 and e1 then we can remove the middle basic block. */
925 if (emtpy_or_with_defined_p
926 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
927 e0, e1) == phi)
929 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
930 /* Note that we optimized this PHI. */
931 return 2;
933 else
935 /* Replace the PHI arguments with arg. */
936 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
937 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
938 if (dump_file && (dump_flags & TDF_DETAILS))
940 fprintf (dump_file, "PHI ");
941 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
942 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
943 cond_bb->index);
944 print_generic_expr (dump_file, arg, 0);
945 fprintf (dump_file, ".\n");
947 return 1;
952 /* Now optimize (x != 0) ? x + y : y to just y.
953 The following condition is too restrictive, there can easily be another
954 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
955 gimple *assign = last_and_only_stmt (middle_bb);
956 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
957 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
958 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
959 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
960 return 0;
962 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
963 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
964 return 0;
966 /* Only transform if it removes the condition. */
967 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
968 return 0;
970 /* Size-wise, this is always profitable. */
971 if (optimize_bb_for_speed_p (cond_bb)
972 /* The special case is useless if it has a low probability. */
973 && profile_status_for_fn (cfun) != PROFILE_ABSENT
974 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
975 /* If assign is cheap, there is no point avoiding it. */
976 && estimate_num_insns (assign, &eni_time_weights)
977 >= 3 * estimate_num_insns (cond, &eni_time_weights))
978 return 0;
980 tree lhs = gimple_assign_lhs (assign);
981 tree rhs1 = gimple_assign_rhs1 (assign);
982 tree rhs2 = gimple_assign_rhs2 (assign);
983 enum tree_code code_def = gimple_assign_rhs_code (assign);
984 tree cond_lhs = gimple_cond_lhs (cond);
985 tree cond_rhs = gimple_cond_rhs (cond);
987 if (((code == NE_EXPR && e1 == false_edge)
988 || (code == EQ_EXPR && e1 == true_edge))
989 && arg0 == lhs
990 && ((arg1 == rhs1
991 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
992 && neutral_element_p (code_def, cond_rhs, true))
993 || (arg1 == rhs2
994 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
995 && neutral_element_p (code_def, cond_rhs, false))
996 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
997 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
998 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
999 && absorbing_element_p (code_def, cond_rhs))))
1001 gsi = gsi_for_stmt (cond);
1002 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1004 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1005 def-stmt in:
1006 if (n_5 != 0)
1007 goto <bb 3>;
1008 else
1009 goto <bb 4>;
1011 <bb 3>:
1012 # RANGE [0, 4294967294]
1013 u_6 = n_5 + 4294967295;
1015 <bb 4>:
1016 # u_3 = PHI <u_6(3), 4294967295(2)> */
1017 SSA_NAME_RANGE_INFO (lhs) = NULL;
1018 /* If available, we can use VR of phi result at least. */
1019 tree phires = gimple_phi_result (phi);
1020 struct range_info_def *phires_range_info
1021 = SSA_NAME_RANGE_INFO (phires);
1022 if (phires_range_info)
1023 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1024 phires_range_info);
1026 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1027 gsi_move_before (&gsi_from, &gsi);
1028 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1029 return 2;
1032 return 0;
1035 /* The function minmax_replacement does the main work of doing the minmax
1036 replacement. Return true if the replacement is done. Otherwise return
1037 false.
1038 BB is the basic block where the replacement is going to be done on. ARG0
1039 is argument 0 from the PHI. Likewise for ARG1. */
1041 static bool
1042 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1043 edge e0, edge e1, gimple *phi,
1044 tree arg0, tree arg1)
1046 tree result, type;
1047 gcond *cond;
1048 gassign *new_stmt;
1049 edge true_edge, false_edge;
1050 enum tree_code cmp, minmax, ass_code;
1051 tree smaller, alt_smaller, larger, alt_larger, arg_true, arg_false;
1052 gimple_stmt_iterator gsi, gsi_from;
1054 type = TREE_TYPE (PHI_RESULT (phi));
1056 /* The optimization may be unsafe due to NaNs. */
1057 if (HONOR_NANS (type))
1058 return false;
1060 cond = as_a <gcond *> (last_stmt (cond_bb));
1061 cmp = gimple_cond_code (cond);
1063 /* This transformation is only valid for order comparisons. Record which
1064 operand is smaller/larger if the result of the comparison is true. */
1065 alt_smaller = NULL_TREE;
1066 alt_larger = NULL_TREE;
1067 if (cmp == LT_EXPR || cmp == LE_EXPR)
1069 smaller = gimple_cond_lhs (cond);
1070 larger = gimple_cond_rhs (cond);
1071 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1072 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1073 if (TREE_CODE (larger) == INTEGER_CST)
1075 if (cmp == LT_EXPR)
1077 bool overflow;
1078 wide_int alt = wi::sub (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1079 &overflow);
1080 if (! overflow)
1081 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1083 else
1085 bool overflow;
1086 wide_int alt = wi::add (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1087 &overflow);
1088 if (! overflow)
1089 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1093 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1095 smaller = gimple_cond_rhs (cond);
1096 larger = gimple_cond_lhs (cond);
1097 /* If we have larger > CST it is equivalent to larger >= CST+1.
1098 Likewise larger >= CST is equivalent to larger > CST-1. */
1099 if (TREE_CODE (smaller) == INTEGER_CST)
1101 if (cmp == GT_EXPR)
1103 bool overflow;
1104 wide_int alt = wi::add (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1105 &overflow);
1106 if (! overflow)
1107 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1109 else
1111 bool overflow;
1112 wide_int alt = wi::sub (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1113 &overflow);
1114 if (! overflow)
1115 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1119 else
1120 return false;
1122 /* We need to know which is the true edge and which is the false
1123 edge so that we know if have abs or negative abs. */
1124 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1126 /* Forward the edges over the middle basic block. */
1127 if (true_edge->dest == middle_bb)
1128 true_edge = EDGE_SUCC (true_edge->dest, 0);
1129 if (false_edge->dest == middle_bb)
1130 false_edge = EDGE_SUCC (false_edge->dest, 0);
1132 if (true_edge == e0)
1134 gcc_assert (false_edge == e1);
1135 arg_true = arg0;
1136 arg_false = arg1;
1138 else
1140 gcc_assert (false_edge == e0);
1141 gcc_assert (true_edge == e1);
1142 arg_true = arg1;
1143 arg_false = arg0;
1146 if (empty_block_p (middle_bb))
1148 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1149 || (alt_smaller
1150 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1151 && (operand_equal_for_phi_arg_p (arg_false, larger)
1152 || (alt_larger
1153 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1155 /* Case
1157 if (smaller < larger)
1158 rslt = smaller;
1159 else
1160 rslt = larger; */
1161 minmax = MIN_EXPR;
1163 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1164 || (alt_smaller
1165 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1166 && (operand_equal_for_phi_arg_p (arg_true, larger)
1167 || (alt_larger
1168 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1169 minmax = MAX_EXPR;
1170 else
1171 return false;
1173 else
1175 /* Recognize the following case, assuming d <= u:
1177 if (a <= u)
1178 b = MAX (a, d);
1179 x = PHI <b, u>
1181 This is equivalent to
1183 b = MAX (a, d);
1184 x = MIN (b, u); */
1186 gimple *assign = last_and_only_stmt (middle_bb);
1187 tree lhs, op0, op1, bound;
1189 if (!assign
1190 || gimple_code (assign) != GIMPLE_ASSIGN)
1191 return false;
1193 lhs = gimple_assign_lhs (assign);
1194 ass_code = gimple_assign_rhs_code (assign);
1195 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1196 return false;
1197 op0 = gimple_assign_rhs1 (assign);
1198 op1 = gimple_assign_rhs2 (assign);
1200 if (true_edge->src == middle_bb)
1202 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1203 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1204 return false;
1206 if (operand_equal_for_phi_arg_p (arg_false, larger)
1207 || (alt_larger
1208 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1210 /* Case
1212 if (smaller < larger)
1214 r' = MAX_EXPR (smaller, bound)
1216 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1217 if (ass_code != MAX_EXPR)
1218 return false;
1220 minmax = MIN_EXPR;
1221 if (operand_equal_for_phi_arg_p (op0, smaller)
1222 || (alt_smaller
1223 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1224 bound = op1;
1225 else if (operand_equal_for_phi_arg_p (op1, smaller)
1226 || (alt_smaller
1227 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1228 bound = op0;
1229 else
1230 return false;
1232 /* We need BOUND <= LARGER. */
1233 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1234 bound, larger)))
1235 return false;
1237 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1238 || (alt_smaller
1239 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1241 /* Case
1243 if (smaller < larger)
1245 r' = MIN_EXPR (larger, bound)
1247 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1248 if (ass_code != MIN_EXPR)
1249 return false;
1251 minmax = MAX_EXPR;
1252 if (operand_equal_for_phi_arg_p (op0, larger)
1253 || (alt_larger
1254 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1255 bound = op1;
1256 else if (operand_equal_for_phi_arg_p (op1, larger)
1257 || (alt_larger
1258 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1259 bound = op0;
1260 else
1261 return false;
1263 /* We need BOUND >= SMALLER. */
1264 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1265 bound, smaller)))
1266 return false;
1268 else
1269 return false;
1271 else
1273 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1274 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1275 return false;
1277 if (operand_equal_for_phi_arg_p (arg_true, larger)
1278 || (alt_larger
1279 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1281 /* Case
1283 if (smaller > larger)
1285 r' = MIN_EXPR (smaller, bound)
1287 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1288 if (ass_code != MIN_EXPR)
1289 return false;
1291 minmax = MAX_EXPR;
1292 if (operand_equal_for_phi_arg_p (op0, smaller)
1293 || (alt_smaller
1294 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1295 bound = op1;
1296 else if (operand_equal_for_phi_arg_p (op1, smaller)
1297 || (alt_smaller
1298 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1299 bound = op0;
1300 else
1301 return false;
1303 /* We need BOUND >= LARGER. */
1304 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1305 bound, larger)))
1306 return false;
1308 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1309 || (alt_smaller
1310 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1312 /* Case
1314 if (smaller > larger)
1316 r' = MAX_EXPR (larger, bound)
1318 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1319 if (ass_code != MAX_EXPR)
1320 return false;
1322 minmax = MIN_EXPR;
1323 if (operand_equal_for_phi_arg_p (op0, larger))
1324 bound = op1;
1325 else if (operand_equal_for_phi_arg_p (op1, larger))
1326 bound = op0;
1327 else
1328 return false;
1330 /* We need BOUND <= SMALLER. */
1331 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1332 bound, smaller)))
1333 return false;
1335 else
1336 return false;
1339 /* Move the statement from the middle block. */
1340 gsi = gsi_last_bb (cond_bb);
1341 gsi_from = gsi_last_nondebug_bb (middle_bb);
1342 gsi_move_before (&gsi_from, &gsi);
1345 /* Create an SSA var to hold the min/max result. If we're the only
1346 things setting the target PHI, then we can clone the PHI
1347 variable. Otherwise we must create a new one. */
1348 result = PHI_RESULT (phi);
1349 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1350 result = duplicate_ssa_name (result, NULL);
1351 else
1352 result = make_ssa_name (TREE_TYPE (result));
1354 /* Emit the statement to compute min/max. */
1355 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1356 gsi = gsi_last_bb (cond_bb);
1357 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1359 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1360 reset_flow_sensitive_info_in_bb (cond_bb);
1362 return true;
1365 /* The function absolute_replacement does the main work of doing the absolute
1366 replacement. Return true if the replacement is done. Otherwise return
1367 false.
1368 bb is the basic block where the replacement is going to be done on. arg0
1369 is argument 0 from the phi. Likewise for arg1. */
1371 static bool
1372 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1373 edge e0 ATTRIBUTE_UNUSED, edge e1,
1374 gimple *phi, tree arg0, tree arg1)
1376 tree result;
1377 gassign *new_stmt;
1378 gimple *cond;
1379 gimple_stmt_iterator gsi;
1380 edge true_edge, false_edge;
1381 gimple *assign;
1382 edge e;
1383 tree rhs, lhs;
1384 bool negate;
1385 enum tree_code cond_code;
1387 /* If the type says honor signed zeros we cannot do this
1388 optimization. */
1389 if (HONOR_SIGNED_ZEROS (arg1))
1390 return false;
1392 /* OTHER_BLOCK must have only one executable statement which must have the
1393 form arg0 = -arg1 or arg1 = -arg0. */
1395 assign = last_and_only_stmt (middle_bb);
1396 /* If we did not find the proper negation assignment, then we can not
1397 optimize. */
1398 if (assign == NULL)
1399 return false;
1401 /* If we got here, then we have found the only executable statement
1402 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1403 arg1 = -arg0, then we can not optimize. */
1404 if (gimple_code (assign) != GIMPLE_ASSIGN)
1405 return false;
1407 lhs = gimple_assign_lhs (assign);
1409 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1410 return false;
1412 rhs = gimple_assign_rhs1 (assign);
1414 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1415 if (!(lhs == arg0 && rhs == arg1)
1416 && !(lhs == arg1 && rhs == arg0))
1417 return false;
1419 cond = last_stmt (cond_bb);
1420 result = PHI_RESULT (phi);
1422 /* Only relationals comparing arg[01] against zero are interesting. */
1423 cond_code = gimple_cond_code (cond);
1424 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1425 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1426 return false;
1428 /* Make sure the conditional is arg[01] OP y. */
1429 if (gimple_cond_lhs (cond) != rhs)
1430 return false;
1432 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1433 ? real_zerop (gimple_cond_rhs (cond))
1434 : integer_zerop (gimple_cond_rhs (cond)))
1436 else
1437 return false;
1439 /* We need to know which is the true edge and which is the false
1440 edge so that we know if have abs or negative abs. */
1441 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1443 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1444 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1445 the false edge goes to OTHER_BLOCK. */
1446 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1447 e = true_edge;
1448 else
1449 e = false_edge;
1451 if (e->dest == middle_bb)
1452 negate = true;
1453 else
1454 negate = false;
1456 /* If the code negates only iff positive then make sure to not
1457 introduce undefined behavior when negating or computing the absolute.
1458 ??? We could use range info if present to check for arg1 == INT_MIN. */
1459 if (negate
1460 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
1461 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
1462 return false;
1464 result = duplicate_ssa_name (result, NULL);
1466 if (negate)
1467 lhs = make_ssa_name (TREE_TYPE (result));
1468 else
1469 lhs = result;
1471 /* Build the modify expression with abs expression. */
1472 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1474 gsi = gsi_last_bb (cond_bb);
1475 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1477 if (negate)
1479 /* Get the right GSI. We want to insert after the recently
1480 added ABS_EXPR statement (which we know is the first statement
1481 in the block. */
1482 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1484 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1487 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1488 reset_flow_sensitive_info_in_bb (cond_bb);
1490 /* Note that we optimized this PHI. */
1491 return true;
1494 /* Auxiliary functions to determine the set of memory accesses which
1495 can't trap because they are preceded by accesses to the same memory
1496 portion. We do that for MEM_REFs, so we only need to track
1497 the SSA_NAME of the pointer indirectly referenced. The algorithm
1498 simply is a walk over all instructions in dominator order. When
1499 we see an MEM_REF we determine if we've already seen a same
1500 ref anywhere up to the root of the dominator tree. If we do the
1501 current access can't trap. If we don't see any dominating access
1502 the current access might trap, but might also make later accesses
1503 non-trapping, so we remember it. We need to be careful with loads
1504 or stores, for instance a load might not trap, while a store would,
1505 so if we see a dominating read access this doesn't mean that a later
1506 write access would not trap. Hence we also need to differentiate the
1507 type of access(es) seen.
1509 ??? We currently are very conservative and assume that a load might
1510 trap even if a store doesn't (write-only memory). This probably is
1511 overly conservative. */
1513 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1514 through it was seen, which would constitute a no-trap region for
1515 same accesses. */
1516 struct name_to_bb
1518 unsigned int ssa_name_ver;
1519 unsigned int phase;
1520 bool store;
1521 HOST_WIDE_INT offset, size;
1522 basic_block bb;
1525 /* Hashtable helpers. */
1527 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1529 static inline hashval_t hash (const name_to_bb *);
1530 static inline bool equal (const name_to_bb *, const name_to_bb *);
1533 /* Used for quick clearing of the hash-table when we see calls.
1534 Hash entries with phase < nt_call_phase are invalid. */
1535 static unsigned int nt_call_phase;
1537 /* The hash function. */
1539 inline hashval_t
1540 ssa_names_hasher::hash (const name_to_bb *n)
1542 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1543 ^ (n->offset << 6) ^ (n->size << 3);
1546 /* The equality function of *P1 and *P2. */
1548 inline bool
1549 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1551 return n1->ssa_name_ver == n2->ssa_name_ver
1552 && n1->store == n2->store
1553 && n1->offset == n2->offset
1554 && n1->size == n2->size;
1557 class nontrapping_dom_walker : public dom_walker
1559 public:
1560 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1561 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1563 virtual edge before_dom_children (basic_block);
1564 virtual void after_dom_children (basic_block);
1566 private:
1568 /* We see the expression EXP in basic block BB. If it's an interesting
1569 expression (an MEM_REF through an SSA_NAME) possibly insert the
1570 expression into the set NONTRAP or the hash table of seen expressions.
1571 STORE is true if this expression is on the LHS, otherwise it's on
1572 the RHS. */
1573 void add_or_mark_expr (basic_block, tree, bool);
1575 hash_set<tree> *m_nontrapping;
1577 /* The hash table for remembering what we've seen. */
1578 hash_table<ssa_names_hasher> m_seen_ssa_names;
1581 /* Called by walk_dominator_tree, when entering the block BB. */
1582 edge
1583 nontrapping_dom_walker::before_dom_children (basic_block bb)
1585 edge e;
1586 edge_iterator ei;
1587 gimple_stmt_iterator gsi;
1589 /* If we haven't seen all our predecessors, clear the hash-table. */
1590 FOR_EACH_EDGE (e, ei, bb->preds)
1591 if ((((size_t)e->src->aux) & 2) == 0)
1593 nt_call_phase++;
1594 break;
1597 /* Mark this BB as being on the path to dominator root and as visited. */
1598 bb->aux = (void*)(1 | 2);
1600 /* And walk the statements in order. */
1601 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1603 gimple *stmt = gsi_stmt (gsi);
1605 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1606 || (is_gimple_call (stmt)
1607 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1608 nt_call_phase++;
1609 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1611 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1612 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1615 return NULL;
1618 /* Called by walk_dominator_tree, when basic block BB is exited. */
1619 void
1620 nontrapping_dom_walker::after_dom_children (basic_block bb)
1622 /* This BB isn't on the path to dominator root anymore. */
1623 bb->aux = (void*)2;
1626 /* We see the expression EXP in basic block BB. If it's an interesting
1627 expression (an MEM_REF through an SSA_NAME) possibly insert the
1628 expression into the set NONTRAP or the hash table of seen expressions.
1629 STORE is true if this expression is on the LHS, otherwise it's on
1630 the RHS. */
1631 void
1632 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1634 HOST_WIDE_INT size;
1636 if (TREE_CODE (exp) == MEM_REF
1637 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1638 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1639 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1641 tree name = TREE_OPERAND (exp, 0);
1642 struct name_to_bb map;
1643 name_to_bb **slot;
1644 struct name_to_bb *n2bb;
1645 basic_block found_bb = 0;
1647 /* Try to find the last seen MEM_REF through the same
1648 SSA_NAME, which can trap. */
1649 map.ssa_name_ver = SSA_NAME_VERSION (name);
1650 map.phase = 0;
1651 map.bb = 0;
1652 map.store = store;
1653 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1654 map.size = size;
1656 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1657 n2bb = *slot;
1658 if (n2bb && n2bb->phase >= nt_call_phase)
1659 found_bb = n2bb->bb;
1661 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1662 (it's in a basic block on the path from us to the dominator root)
1663 then we can't trap. */
1664 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1666 m_nontrapping->add (exp);
1668 else
1670 /* EXP might trap, so insert it into the hash table. */
1671 if (n2bb)
1673 n2bb->phase = nt_call_phase;
1674 n2bb->bb = bb;
1676 else
1678 n2bb = XNEW (struct name_to_bb);
1679 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1680 n2bb->phase = nt_call_phase;
1681 n2bb->bb = bb;
1682 n2bb->store = store;
1683 n2bb->offset = map.offset;
1684 n2bb->size = size;
1685 *slot = n2bb;
1691 /* This is the entry point of gathering non trapping memory accesses.
1692 It will do a dominator walk over the whole function, and it will
1693 make use of the bb->aux pointers. It returns a set of trees
1694 (the MEM_REFs itself) which can't trap. */
1695 static hash_set<tree> *
1696 get_non_trapping (void)
1698 nt_call_phase = 0;
1699 hash_set<tree> *nontrap = new hash_set<tree>;
1700 /* We're going to do a dominator walk, so ensure that we have
1701 dominance information. */
1702 calculate_dominance_info (CDI_DOMINATORS);
1704 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1705 .walk (cfun->cfg->x_entry_block_ptr);
1707 clear_aux_for_blocks ();
1708 return nontrap;
1711 /* Do the main work of conditional store replacement. We already know
1712 that the recognized pattern looks like so:
1714 split:
1715 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1716 MIDDLE_BB:
1717 something
1718 fallthrough (edge E0)
1719 JOIN_BB:
1720 some more
1722 We check that MIDDLE_BB contains only one store, that that store
1723 doesn't trap (not via NOTRAP, but via checking if an access to the same
1724 memory location dominates us) and that the store has a "simple" RHS. */
1726 static bool
1727 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1728 edge e0, edge e1, hash_set<tree> *nontrap)
1730 gimple *assign = last_and_only_stmt (middle_bb);
1731 tree lhs, rhs, name, name2;
1732 gphi *newphi;
1733 gassign *new_stmt;
1734 gimple_stmt_iterator gsi;
1735 source_location locus;
1737 /* Check if middle_bb contains of only one store. */
1738 if (!assign
1739 || !gimple_assign_single_p (assign)
1740 || gimple_has_volatile_ops (assign))
1741 return false;
1743 locus = gimple_location (assign);
1744 lhs = gimple_assign_lhs (assign);
1745 rhs = gimple_assign_rhs1 (assign);
1746 if (TREE_CODE (lhs) != MEM_REF
1747 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1748 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1749 return false;
1751 /* Prove that we can move the store down. We could also check
1752 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1753 whose value is not available readily, which we want to avoid. */
1754 if (!nontrap->contains (lhs))
1755 return false;
1757 /* Now we've checked the constraints, so do the transformation:
1758 1) Remove the single store. */
1759 gsi = gsi_for_stmt (assign);
1760 unlink_stmt_vdef (assign);
1761 gsi_remove (&gsi, true);
1762 release_defs (assign);
1764 /* 2) Insert a load from the memory of the store to the temporary
1765 on the edge which did not contain the store. */
1766 lhs = unshare_expr (lhs);
1767 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1768 new_stmt = gimple_build_assign (name, lhs);
1769 gimple_set_location (new_stmt, locus);
1770 gsi_insert_on_edge (e1, new_stmt);
1772 /* 3) Create a PHI node at the join block, with one argument
1773 holding the old RHS, and the other holding the temporary
1774 where we stored the old memory contents. */
1775 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1776 newphi = create_phi_node (name2, join_bb);
1777 add_phi_arg (newphi, rhs, e0, locus);
1778 add_phi_arg (newphi, name, e1, locus);
1780 lhs = unshare_expr (lhs);
1781 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1783 /* 4) Insert that PHI node. */
1784 gsi = gsi_after_labels (join_bb);
1785 if (gsi_end_p (gsi))
1787 gsi = gsi_last_bb (join_bb);
1788 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1790 else
1791 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1793 return true;
1796 /* Do the main work of conditional store replacement. */
1798 static bool
1799 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1800 basic_block join_bb, gimple *then_assign,
1801 gimple *else_assign)
1803 tree lhs_base, lhs, then_rhs, else_rhs, name;
1804 source_location then_locus, else_locus;
1805 gimple_stmt_iterator gsi;
1806 gphi *newphi;
1807 gassign *new_stmt;
1809 if (then_assign == NULL
1810 || !gimple_assign_single_p (then_assign)
1811 || gimple_clobber_p (then_assign)
1812 || gimple_has_volatile_ops (then_assign)
1813 || else_assign == NULL
1814 || !gimple_assign_single_p (else_assign)
1815 || gimple_clobber_p (else_assign)
1816 || gimple_has_volatile_ops (else_assign))
1817 return false;
1819 lhs = gimple_assign_lhs (then_assign);
1820 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1821 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1822 return false;
1824 lhs_base = get_base_address (lhs);
1825 if (lhs_base == NULL_TREE
1826 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1827 return false;
1829 then_rhs = gimple_assign_rhs1 (then_assign);
1830 else_rhs = gimple_assign_rhs1 (else_assign);
1831 then_locus = gimple_location (then_assign);
1832 else_locus = gimple_location (else_assign);
1834 /* Now we've checked the constraints, so do the transformation:
1835 1) Remove the stores. */
1836 gsi = gsi_for_stmt (then_assign);
1837 unlink_stmt_vdef (then_assign);
1838 gsi_remove (&gsi, true);
1839 release_defs (then_assign);
1841 gsi = gsi_for_stmt (else_assign);
1842 unlink_stmt_vdef (else_assign);
1843 gsi_remove (&gsi, true);
1844 release_defs (else_assign);
1846 /* 2) Create a PHI node at the join block, with one argument
1847 holding the old RHS, and the other holding the temporary
1848 where we stored the old memory contents. */
1849 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1850 newphi = create_phi_node (name, join_bb);
1851 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1852 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1854 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1856 /* 3) Insert that PHI node. */
1857 gsi = gsi_after_labels (join_bb);
1858 if (gsi_end_p (gsi))
1860 gsi = gsi_last_bb (join_bb);
1861 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1863 else
1864 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1866 return true;
1869 /* Conditional store replacement. We already know
1870 that the recognized pattern looks like so:
1872 split:
1873 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1874 THEN_BB:
1876 X = Y;
1878 goto JOIN_BB;
1879 ELSE_BB:
1881 X = Z;
1883 fallthrough (edge E0)
1884 JOIN_BB:
1885 some more
1887 We check that it is safe to sink the store to JOIN_BB by verifying that
1888 there are no read-after-write or write-after-write dependencies in
1889 THEN_BB and ELSE_BB. */
1891 static bool
1892 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1893 basic_block join_bb)
1895 gimple *then_assign = last_and_only_stmt (then_bb);
1896 gimple *else_assign = last_and_only_stmt (else_bb);
1897 vec<data_reference_p> then_datarefs, else_datarefs;
1898 vec<ddr_p> then_ddrs, else_ddrs;
1899 gimple *then_store, *else_store;
1900 bool found, ok = false, res;
1901 struct data_dependence_relation *ddr;
1902 data_reference_p then_dr, else_dr;
1903 int i, j;
1904 tree then_lhs, else_lhs;
1905 basic_block blocks[3];
1907 if (MAX_STORES_TO_SINK == 0)
1908 return false;
1910 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1911 if (then_assign && else_assign)
1912 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1913 then_assign, else_assign);
1915 /* Find data references. */
1916 then_datarefs.create (1);
1917 else_datarefs.create (1);
1918 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1919 == chrec_dont_know)
1920 || !then_datarefs.length ()
1921 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1922 == chrec_dont_know)
1923 || !else_datarefs.length ())
1925 free_data_refs (then_datarefs);
1926 free_data_refs (else_datarefs);
1927 return false;
1930 /* Find pairs of stores with equal LHS. */
1931 auto_vec<gimple *, 1> then_stores, else_stores;
1932 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1934 if (DR_IS_READ (then_dr))
1935 continue;
1937 then_store = DR_STMT (then_dr);
1938 then_lhs = gimple_get_lhs (then_store);
1939 if (then_lhs == NULL_TREE)
1940 continue;
1941 found = false;
1943 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1945 if (DR_IS_READ (else_dr))
1946 continue;
1948 else_store = DR_STMT (else_dr);
1949 else_lhs = gimple_get_lhs (else_store);
1950 if (else_lhs == NULL_TREE)
1951 continue;
1953 if (operand_equal_p (then_lhs, else_lhs, 0))
1955 found = true;
1956 break;
1960 if (!found)
1961 continue;
1963 then_stores.safe_push (then_store);
1964 else_stores.safe_push (else_store);
1967 /* No pairs of stores found. */
1968 if (!then_stores.length ()
1969 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1971 free_data_refs (then_datarefs);
1972 free_data_refs (else_datarefs);
1973 return false;
1976 /* Compute and check data dependencies in both basic blocks. */
1977 then_ddrs.create (1);
1978 else_ddrs.create (1);
1979 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1980 vNULL, false)
1981 || !compute_all_dependences (else_datarefs, &else_ddrs,
1982 vNULL, false))
1984 free_dependence_relations (then_ddrs);
1985 free_dependence_relations (else_ddrs);
1986 free_data_refs (then_datarefs);
1987 free_data_refs (else_datarefs);
1988 return false;
1990 blocks[0] = then_bb;
1991 blocks[1] = else_bb;
1992 blocks[2] = join_bb;
1993 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1995 /* Check that there are no read-after-write or write-after-write dependencies
1996 in THEN_BB. */
1997 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1999 struct data_reference *dra = DDR_A (ddr);
2000 struct data_reference *drb = DDR_B (ddr);
2002 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2003 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2004 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2005 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2006 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2007 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2009 free_dependence_relations (then_ddrs);
2010 free_dependence_relations (else_ddrs);
2011 free_data_refs (then_datarefs);
2012 free_data_refs (else_datarefs);
2013 return false;
2017 /* Check that there are no read-after-write or write-after-write dependencies
2018 in ELSE_BB. */
2019 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2021 struct data_reference *dra = DDR_A (ddr);
2022 struct data_reference *drb = DDR_B (ddr);
2024 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2025 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2026 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2027 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2028 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2029 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2031 free_dependence_relations (then_ddrs);
2032 free_dependence_relations (else_ddrs);
2033 free_data_refs (then_datarefs);
2034 free_data_refs (else_datarefs);
2035 return false;
2039 /* Sink stores with same LHS. */
2040 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2042 else_store = else_stores[i];
2043 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2044 then_store, else_store);
2045 ok = ok || res;
2048 free_dependence_relations (then_ddrs);
2049 free_dependence_relations (else_ddrs);
2050 free_data_refs (then_datarefs);
2051 free_data_refs (else_datarefs);
2053 return ok;
2056 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2058 static bool
2059 local_mem_dependence (gimple *stmt, basic_block bb)
2061 tree vuse = gimple_vuse (stmt);
2062 gimple *def;
2064 if (!vuse)
2065 return false;
2067 def = SSA_NAME_DEF_STMT (vuse);
2068 return (def && gimple_bb (def) == bb);
2071 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2072 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2073 and BB3 rejoins control flow following BB1 and BB2, look for
2074 opportunities to hoist loads as follows. If BB3 contains a PHI of
2075 two loads, one each occurring in BB1 and BB2, and the loads are
2076 provably of adjacent fields in the same structure, then move both
2077 loads into BB0. Of course this can only be done if there are no
2078 dependencies preventing such motion.
2080 One of the hoisted loads will always be speculative, so the
2081 transformation is currently conservative:
2083 - The fields must be strictly adjacent.
2084 - The two fields must occupy a single memory block that is
2085 guaranteed to not cross a page boundary.
2087 The last is difficult to prove, as such memory blocks should be
2088 aligned on the minimum of the stack alignment boundary and the
2089 alignment guaranteed by heap allocation interfaces. Thus we rely
2090 on a parameter for the alignment value.
2092 Provided a good value is used for the last case, the first
2093 restriction could possibly be relaxed. */
2095 static void
2096 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2097 basic_block bb2, basic_block bb3)
2099 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2100 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2101 gphi_iterator gsi;
2103 /* Walk the phis in bb3 looking for an opportunity. We are looking
2104 for phis of two SSA names, one each of which is defined in bb1 and
2105 bb2. */
2106 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2108 gphi *phi_stmt = gsi.phi ();
2109 gimple *def1, *def2;
2110 tree arg1, arg2, ref1, ref2, field1, field2;
2111 tree tree_offset1, tree_offset2, tree_size2, next;
2112 int offset1, offset2, size2;
2113 unsigned align1;
2114 gimple_stmt_iterator gsi2;
2115 basic_block bb_for_def1, bb_for_def2;
2117 if (gimple_phi_num_args (phi_stmt) != 2
2118 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2119 continue;
2121 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2122 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2124 if (TREE_CODE (arg1) != SSA_NAME
2125 || TREE_CODE (arg2) != SSA_NAME
2126 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2127 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2128 continue;
2130 def1 = SSA_NAME_DEF_STMT (arg1);
2131 def2 = SSA_NAME_DEF_STMT (arg2);
2133 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2134 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2135 continue;
2137 /* Check the mode of the arguments to be sure a conditional move
2138 can be generated for it. */
2139 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2140 == CODE_FOR_nothing)
2141 continue;
2143 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2144 if (!gimple_assign_single_p (def1)
2145 || !gimple_assign_single_p (def2)
2146 || gimple_has_volatile_ops (def1)
2147 || gimple_has_volatile_ops (def2))
2148 continue;
2150 ref1 = gimple_assign_rhs1 (def1);
2151 ref2 = gimple_assign_rhs1 (def2);
2153 if (TREE_CODE (ref1) != COMPONENT_REF
2154 || TREE_CODE (ref2) != COMPONENT_REF)
2155 continue;
2157 /* The zeroth operand of the two component references must be
2158 identical. It is not sufficient to compare get_base_address of
2159 the two references, because this could allow for different
2160 elements of the same array in the two trees. It is not safe to
2161 assume that the existence of one array element implies the
2162 existence of a different one. */
2163 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2164 continue;
2166 field1 = TREE_OPERAND (ref1, 1);
2167 field2 = TREE_OPERAND (ref2, 1);
2169 /* Check for field adjacency, and ensure field1 comes first. */
2170 for (next = DECL_CHAIN (field1);
2171 next && TREE_CODE (next) != FIELD_DECL;
2172 next = DECL_CHAIN (next))
2175 if (next != field2)
2177 for (next = DECL_CHAIN (field2);
2178 next && TREE_CODE (next) != FIELD_DECL;
2179 next = DECL_CHAIN (next))
2182 if (next != field1)
2183 continue;
2185 std::swap (field1, field2);
2186 std::swap (def1, def2);
2189 bb_for_def1 = gimple_bb (def1);
2190 bb_for_def2 = gimple_bb (def2);
2192 /* Check for proper alignment of the first field. */
2193 tree_offset1 = bit_position (field1);
2194 tree_offset2 = bit_position (field2);
2195 tree_size2 = DECL_SIZE (field2);
2197 if (!tree_fits_uhwi_p (tree_offset1)
2198 || !tree_fits_uhwi_p (tree_offset2)
2199 || !tree_fits_uhwi_p (tree_size2))
2200 continue;
2202 offset1 = tree_to_uhwi (tree_offset1);
2203 offset2 = tree_to_uhwi (tree_offset2);
2204 size2 = tree_to_uhwi (tree_size2);
2205 align1 = DECL_ALIGN (field1) % param_align_bits;
2207 if (offset1 % BITS_PER_UNIT != 0)
2208 continue;
2210 /* For profitability, the two field references should fit within
2211 a single cache line. */
2212 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2213 continue;
2215 /* The two expressions cannot be dependent upon vdefs defined
2216 in bb1/bb2. */
2217 if (local_mem_dependence (def1, bb_for_def1)
2218 || local_mem_dependence (def2, bb_for_def2))
2219 continue;
2221 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2222 bb0. We hoist the first one first so that a cache miss is handled
2223 efficiently regardless of hardware cache-fill policy. */
2224 gsi2 = gsi_for_stmt (def1);
2225 gsi_move_to_bb_end (&gsi2, bb0);
2226 gsi2 = gsi_for_stmt (def2);
2227 gsi_move_to_bb_end (&gsi2, bb0);
2229 if (dump_file && (dump_flags & TDF_DETAILS))
2231 fprintf (dump_file,
2232 "\nHoisting adjacent loads from %d and %d into %d: \n",
2233 bb_for_def1->index, bb_for_def2->index, bb0->index);
2234 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2235 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2240 /* Determine whether we should attempt to hoist adjacent loads out of
2241 diamond patterns in pass_phiopt. Always hoist loads if
2242 -fhoist-adjacent-loads is specified and the target machine has
2243 both a conditional move instruction and a defined cache line size. */
2245 static bool
2246 gate_hoist_loads (void)
2248 return (flag_hoist_adjacent_loads == 1
2249 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2250 && HAVE_conditional_move);
2253 /* This pass tries to replaces an if-then-else block with an
2254 assignment. We have four kinds of transformations. Some of these
2255 transformations are also performed by the ifcvt RTL optimizer.
2257 Conditional Replacement
2258 -----------------------
2260 This transformation, implemented in conditional_replacement,
2261 replaces
2263 bb0:
2264 if (cond) goto bb2; else goto bb1;
2265 bb1:
2266 bb2:
2267 x = PHI <0 (bb1), 1 (bb0), ...>;
2269 with
2271 bb0:
2272 x' = cond;
2273 goto bb2;
2274 bb2:
2275 x = PHI <x' (bb0), ...>;
2277 We remove bb1 as it becomes unreachable. This occurs often due to
2278 gimplification of conditionals.
2280 Value Replacement
2281 -----------------
2283 This transformation, implemented in value_replacement, replaces
2285 bb0:
2286 if (a != b) goto bb2; else goto bb1;
2287 bb1:
2288 bb2:
2289 x = PHI <a (bb1), b (bb0), ...>;
2291 with
2293 bb0:
2294 bb2:
2295 x = PHI <b (bb0), ...>;
2297 This opportunity can sometimes occur as a result of other
2298 optimizations.
2301 Another case caught by value replacement looks like this:
2303 bb0:
2304 t1 = a == CONST;
2305 t2 = b > c;
2306 t3 = t1 & t2;
2307 if (t3 != 0) goto bb1; else goto bb2;
2308 bb1:
2309 bb2:
2310 x = PHI (CONST, a)
2312 Gets replaced with:
2313 bb0:
2314 bb2:
2315 t1 = a == CONST;
2316 t2 = b > c;
2317 t3 = t1 & t2;
2318 x = a;
2320 ABS Replacement
2321 ---------------
2323 This transformation, implemented in abs_replacement, replaces
2325 bb0:
2326 if (a >= 0) goto bb2; else goto bb1;
2327 bb1:
2328 x = -a;
2329 bb2:
2330 x = PHI <x (bb1), a (bb0), ...>;
2332 with
2334 bb0:
2335 x' = ABS_EXPR< a >;
2336 bb2:
2337 x = PHI <x' (bb0), ...>;
2339 MIN/MAX Replacement
2340 -------------------
2342 This transformation, minmax_replacement replaces
2344 bb0:
2345 if (a <= b) goto bb2; else goto bb1;
2346 bb1:
2347 bb2:
2348 x = PHI <b (bb1), a (bb0), ...>;
2350 with
2352 bb0:
2353 x' = MIN_EXPR (a, b)
2354 bb2:
2355 x = PHI <x' (bb0), ...>;
2357 A similar transformation is done for MAX_EXPR.
2360 This pass also performs a fifth transformation of a slightly different
2361 flavor.
2363 Factor conversion in COND_EXPR
2364 ------------------------------
2366 This transformation factors the conversion out of COND_EXPR with
2367 factor_out_conditional_conversion.
2369 For example:
2370 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2371 <bb 3>:
2372 tmp = (int) a;
2373 <bb 4>:
2374 tmp = PHI <tmp, CST>
2376 Into:
2377 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2378 <bb 3>:
2379 <bb 4>:
2380 a = PHI <a, CST>
2381 tmp = (int) a;
2383 Adjacent Load Hoisting
2384 ----------------------
2386 This transformation replaces
2388 bb0:
2389 if (...) goto bb2; else goto bb1;
2390 bb1:
2391 x1 = (<expr>).field1;
2392 goto bb3;
2393 bb2:
2394 x2 = (<expr>).field2;
2395 bb3:
2396 # x = PHI <x1, x2>;
2398 with
2400 bb0:
2401 x1 = (<expr>).field1;
2402 x2 = (<expr>).field2;
2403 if (...) goto bb2; else goto bb1;
2404 bb1:
2405 goto bb3;
2406 bb2:
2407 bb3:
2408 # x = PHI <x1, x2>;
2410 The purpose of this transformation is to enable generation of conditional
2411 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2412 the loads is speculative, the transformation is restricted to very
2413 specific cases to avoid introducing a page fault. We are looking for
2414 the common idiom:
2416 if (...)
2417 x = y->left;
2418 else
2419 x = y->right;
2421 where left and right are typically adjacent pointers in a tree structure. */
2423 namespace {
2425 const pass_data pass_data_phiopt =
2427 GIMPLE_PASS, /* type */
2428 "phiopt", /* name */
2429 OPTGROUP_NONE, /* optinfo_flags */
2430 TV_TREE_PHIOPT, /* tv_id */
2431 ( PROP_cfg | PROP_ssa ), /* properties_required */
2432 0, /* properties_provided */
2433 0, /* properties_destroyed */
2434 0, /* todo_flags_start */
2435 0, /* todo_flags_finish */
2438 class pass_phiopt : public gimple_opt_pass
2440 public:
2441 pass_phiopt (gcc::context *ctxt)
2442 : gimple_opt_pass (pass_data_phiopt, ctxt)
2445 /* opt_pass methods: */
2446 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2447 virtual bool gate (function *) { return flag_ssa_phiopt; }
2448 virtual unsigned int execute (function *)
2450 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2453 }; // class pass_phiopt
2455 } // anon namespace
2457 gimple_opt_pass *
2458 make_pass_phiopt (gcc::context *ctxt)
2460 return new pass_phiopt (ctxt);
2463 namespace {
2465 const pass_data pass_data_cselim =
2467 GIMPLE_PASS, /* type */
2468 "cselim", /* name */
2469 OPTGROUP_NONE, /* optinfo_flags */
2470 TV_TREE_PHIOPT, /* tv_id */
2471 ( PROP_cfg | PROP_ssa ), /* properties_required */
2472 0, /* properties_provided */
2473 0, /* properties_destroyed */
2474 0, /* todo_flags_start */
2475 0, /* todo_flags_finish */
2478 class pass_cselim : public gimple_opt_pass
2480 public:
2481 pass_cselim (gcc::context *ctxt)
2482 : gimple_opt_pass (pass_data_cselim, ctxt)
2485 /* opt_pass methods: */
2486 virtual bool gate (function *) { return flag_tree_cselim; }
2487 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2489 }; // class pass_cselim
2491 } // anon namespace
2493 gimple_opt_pass *
2494 make_pass_cselim (gcc::context *ctxt)
2496 return new pass_cselim (ctxt);