[Patch AArch64 1/3] Enable CRC by default for armv8.1-a
[official-gcc.git] / gcc / tree-ssa-phiopt.c
bloba752fe0fd1c216016c30945ee32374e6f68f4c31
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "params.h"
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block, basic_block,
51 edge, edge, gphi *, tree, tree);
52 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree);
53 static int value_replacement (basic_block, basic_block,
54 edge, edge, gimple *, tree, tree);
55 static bool minmax_replacement (basic_block, basic_block,
56 edge, edge, gimple *, tree, tree);
57 static bool abs_replacement (basic_block, basic_block,
58 edge, edge, gimple *, tree, tree);
59 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
60 hash_set<tree> *);
61 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
62 static hash_set<tree> * get_non_trapping ();
63 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
64 static void hoist_adjacent_loads (basic_block, basic_block,
65 basic_block, basic_block);
66 static bool gate_hoist_loads (void);
68 /* This pass tries to transform conditional stores into unconditional
69 ones, enabling further simplifications with the simpler then and else
70 blocks. In particular it replaces this:
72 bb0:
73 if (cond) goto bb2; else goto bb1;
74 bb1:
75 *p = RHS;
76 bb2:
78 with
80 bb0:
81 if (cond) goto bb1; else goto bb2;
82 bb1:
83 condtmp' = *p;
84 bb2:
85 condtmp = PHI <RHS, condtmp'>
86 *p = condtmp;
88 This transformation can only be done under several constraints,
89 documented below. It also replaces:
91 bb0:
92 if (cond) goto bb2; else goto bb1;
93 bb1:
94 *p = RHS1;
95 goto bb3;
96 bb2:
97 *p = RHS2;
98 bb3:
100 with
102 bb0:
103 if (cond) goto bb3; else goto bb1;
104 bb1:
105 bb3:
106 condtmp = PHI <RHS1, RHS2>
107 *p = condtmp; */
109 static unsigned int
110 tree_ssa_cs_elim (void)
112 unsigned todo;
113 /* ??? We are not interested in loop related info, but the following
114 will create it, ICEing as we didn't init loops with pre-headers.
115 An interfacing issue of find_data_references_in_bb. */
116 loop_optimizer_init (LOOPS_NORMAL);
117 scev_initialize ();
118 todo = tree_ssa_phiopt_worker (true, false);
119 scev_finalize ();
120 loop_optimizer_finalize ();
121 return todo;
124 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
126 static gphi *
127 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
129 gimple_stmt_iterator i;
130 gphi *phi = NULL;
131 if (gimple_seq_singleton_p (seq))
132 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
133 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
135 gphi *p = as_a <gphi *> (gsi_stmt (i));
136 /* If the PHI arguments are equal then we can skip this PHI. */
137 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
138 gimple_phi_arg_def (p, e1->dest_idx)))
139 continue;
141 /* If we already have a PHI that has the two edge arguments are
142 different, then return it is not a singleton for these PHIs. */
143 if (phi)
144 return NULL;
146 phi = p;
148 return phi;
151 /* The core routine of conditional store replacement and normal
152 phi optimizations. Both share much of the infrastructure in how
153 to match applicable basic block patterns. DO_STORE_ELIM is true
154 when we want to do conditional store replacement, false otherwise.
155 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
156 of diamond control flow patterns, false otherwise. */
157 static unsigned int
158 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
160 basic_block bb;
161 basic_block *bb_order;
162 unsigned n, i;
163 bool cfgchanged = false;
164 hash_set<tree> *nontrap = 0;
166 if (do_store_elim)
167 /* Calculate the set of non-trapping memory accesses. */
168 nontrap = get_non_trapping ();
170 /* Search every basic block for COND_EXPR we may be able to optimize.
172 We walk the blocks in order that guarantees that a block with
173 a single predecessor is processed before the predecessor.
174 This ensures that we collapse inner ifs before visiting the
175 outer ones, and also that we do not try to visit a removed
176 block. */
177 bb_order = single_pred_before_succ_order ();
178 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
180 for (i = 0; i < n; i++)
182 gimple *cond_stmt;
183 gphi *phi;
184 basic_block bb1, bb2;
185 edge e1, e2;
186 tree arg0, arg1;
188 bb = bb_order[i];
190 cond_stmt = last_stmt (bb);
191 /* Check to see if the last statement is a GIMPLE_COND. */
192 if (!cond_stmt
193 || gimple_code (cond_stmt) != GIMPLE_COND)
194 continue;
196 e1 = EDGE_SUCC (bb, 0);
197 bb1 = e1->dest;
198 e2 = EDGE_SUCC (bb, 1);
199 bb2 = e2->dest;
201 /* We cannot do the optimization on abnormal edges. */
202 if ((e1->flags & EDGE_ABNORMAL) != 0
203 || (e2->flags & EDGE_ABNORMAL) != 0)
204 continue;
206 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
207 if (EDGE_COUNT (bb1->succs) == 0
208 || bb2 == NULL
209 || EDGE_COUNT (bb2->succs) == 0)
210 continue;
212 /* Find the bb which is the fall through to the other. */
213 if (EDGE_SUCC (bb1, 0)->dest == bb2)
215 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
217 std::swap (bb1, bb2);
218 std::swap (e1, e2);
220 else if (do_store_elim
221 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
223 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
225 if (!single_succ_p (bb1)
226 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
227 || !single_succ_p (bb2)
228 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
229 || EDGE_COUNT (bb3->preds) != 2)
230 continue;
231 if (cond_if_else_store_replacement (bb1, bb2, bb3))
232 cfgchanged = true;
233 continue;
235 else if (do_hoist_loads
236 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
238 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
240 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
241 && single_succ_p (bb1)
242 && single_succ_p (bb2)
243 && single_pred_p (bb1)
244 && single_pred_p (bb2)
245 && EDGE_COUNT (bb->succs) == 2
246 && EDGE_COUNT (bb3->preds) == 2
247 /* If one edge or the other is dominant, a conditional move
248 is likely to perform worse than the well-predicted branch. */
249 && !predictable_edge_p (EDGE_SUCC (bb, 0))
250 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
251 hoist_adjacent_loads (bb, bb1, bb2, bb3);
252 continue;
254 else
255 continue;
257 e1 = EDGE_SUCC (bb1, 0);
259 /* Make sure that bb1 is just a fall through. */
260 if (!single_succ_p (bb1)
261 || (e1->flags & EDGE_FALLTHRU) == 0)
262 continue;
264 /* Also make sure that bb1 only have one predecessor and that it
265 is bb. */
266 if (!single_pred_p (bb1)
267 || single_pred (bb1) != bb)
268 continue;
270 if (do_store_elim)
272 /* bb1 is the middle block, bb2 the join block, bb the split block,
273 e1 the fallthrough edge from bb1 to bb2. We can't do the
274 optimization if the join block has more than two predecessors. */
275 if (EDGE_COUNT (bb2->preds) > 2)
276 continue;
277 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
278 cfgchanged = true;
280 else
282 gimple_seq phis = phi_nodes (bb2);
283 gimple_stmt_iterator gsi;
284 bool candorest = true;
286 /* Value replacement can work with more than one PHI
287 so try that first. */
288 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
290 phi = as_a <gphi *> (gsi_stmt (gsi));
291 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
292 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
293 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
295 candorest = false;
296 cfgchanged = true;
297 break;
301 if (!candorest)
302 continue;
304 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
305 if (!phi)
306 continue;
308 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
309 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
311 /* Something is wrong if we cannot find the arguments in the PHI
312 node. */
313 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
315 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
316 arg0, arg1);
317 if (newphi != NULL)
319 phi = newphi;
320 /* factor_out_conditional_conversion may create a new PHI in
321 BB2 and eliminate an existing PHI in BB2. Recompute values
322 that may be affected by that change. */
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
328 /* Do the replacement of conditional if it can be done. */
329 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
330 cfgchanged = true;
331 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332 cfgchanged = true;
333 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334 cfgchanged = true;
338 free (bb_order);
340 if (do_store_elim)
341 delete nontrap;
342 /* If the CFG has changed, we should cleanup the CFG. */
343 if (cfgchanged && do_store_elim)
345 /* In cond-store replacement we have added some loads on edges
346 and new VOPS (as we moved the store, and created a load). */
347 gsi_commit_edge_inserts ();
348 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
350 else if (cfgchanged)
351 return TODO_cleanup_cfg;
352 return 0;
355 /* Replace PHI node element whose edge is E in block BB with variable NEW.
356 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
357 is known to have two edges, one of which must reach BB). */
359 static void
360 replace_phi_edge_with_variable (basic_block cond_block,
361 edge e, gimple *phi, tree new_tree)
363 basic_block bb = gimple_bb (phi);
364 basic_block block_to_remove;
365 gimple_stmt_iterator gsi;
367 /* Change the PHI argument to new. */
368 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
370 /* Remove the empty basic block. */
371 if (EDGE_SUCC (cond_block, 0)->dest == bb)
373 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
374 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
375 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
376 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
378 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
380 else
382 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
383 EDGE_SUCC (cond_block, 1)->flags
384 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
385 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
386 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
388 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
390 delete_basic_block (block_to_remove);
392 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393 gsi = gsi_last_bb (cond_block);
394 gsi_remove (&gsi, true);
396 if (dump_file && (dump_flags & TDF_DETAILS))
397 fprintf (dump_file,
398 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
399 cond_block->index,
400 bb->index);
403 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405 to the result of PHI stmt. Return the newly-created PHI, if any. */
407 static gphi *
408 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
409 tree arg0, tree arg1)
411 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
412 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
413 tree temp, result;
414 gphi *newphi;
415 gimple_stmt_iterator gsi, gsi_for_def;
416 source_location locus = gimple_location (phi);
417 enum tree_code convert_code;
419 /* Handle only PHI statements with two arguments. TODO: If all
420 other arguments to PHI are INTEGER_CST or if their defining
421 statement have the same unary operation, we can handle more
422 than two arguments too. */
423 if (gimple_phi_num_args (phi) != 2)
424 return NULL;
426 /* First canonicalize to simplify tests. */
427 if (TREE_CODE (arg0) != SSA_NAME)
429 std::swap (arg0, arg1);
430 std::swap (e0, e1);
433 if (TREE_CODE (arg0) != SSA_NAME
434 || (TREE_CODE (arg1) != SSA_NAME
435 && TREE_CODE (arg1) != INTEGER_CST))
436 return NULL;
438 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
439 a conversion. */
440 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
441 if (!is_gimple_assign (arg0_def_stmt)
442 || !gimple_assign_cast_p (arg0_def_stmt))
443 return NULL;
445 /* Use the RHS as new_arg0. */
446 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
447 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
448 if (convert_code == VIEW_CONVERT_EXPR)
449 new_arg0 = TREE_OPERAND (new_arg0, 0);
451 if (TREE_CODE (arg1) == SSA_NAME)
453 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
454 is a conversion. */
455 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
456 if (!is_gimple_assign (arg1_def_stmt)
457 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
458 return NULL;
460 /* Use the RHS as new_arg1. */
461 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
462 if (convert_code == VIEW_CONVERT_EXPR)
463 new_arg1 = TREE_OPERAND (new_arg1, 0);
465 else
467 /* If arg1 is an INTEGER_CST, fold it to new type. */
468 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
469 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
471 if (gimple_assign_cast_p (arg0_def_stmt))
472 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
473 else
474 return NULL;
476 else
477 return NULL;
480 /* If arg0/arg1 have > 1 use, then this transformation actually increases
481 the number of expressions evaluated at runtime. */
482 if (!has_single_use (arg0)
483 || (arg1_def_stmt && !has_single_use (arg1)))
484 return NULL;
486 /* If types of new_arg0 and new_arg1 are different bailout. */
487 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
488 return NULL;
490 /* Create a new PHI stmt. */
491 result = PHI_RESULT (phi);
492 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
493 newphi = create_phi_node (temp, gimple_bb (phi));
495 if (dump_file && (dump_flags & TDF_DETAILS))
497 fprintf (dump_file, "PHI ");
498 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
499 fprintf (dump_file,
500 " changed to factor conversion out from COND_EXPR.\n");
501 fprintf (dump_file, "New stmt with CAST that defines ");
502 print_generic_expr (dump_file, result, 0);
503 fprintf (dump_file, ".\n");
506 /* Remove the old cast(s) that has single use. */
507 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
508 gsi_remove (&gsi_for_def, true);
509 release_defs (arg0_def_stmt);
511 if (arg1_def_stmt)
513 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
514 gsi_remove (&gsi_for_def, true);
515 release_defs (arg1_def_stmt);
518 add_phi_arg (newphi, new_arg0, e0, locus);
519 add_phi_arg (newphi, new_arg1, e1, locus);
521 /* Create the conversion stmt and insert it. */
522 if (convert_code == VIEW_CONVERT_EXPR)
523 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
524 new_stmt = gimple_build_assign (result, convert_code, temp);
525 gsi = gsi_after_labels (gimple_bb (phi));
526 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
528 /* Remove the original PHI stmt. */
529 gsi = gsi_for_stmt (phi);
530 gsi_remove (&gsi, true);
531 return newphi;
534 /* The function conditional_replacement does the main work of doing the
535 conditional replacement. Return true if the replacement is done.
536 Otherwise return false.
537 BB is the basic block where the replacement is going to be done on. ARG0
538 is argument 0 from PHI. Likewise for ARG1. */
540 static bool
541 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
542 edge e0, edge e1, gphi *phi,
543 tree arg0, tree arg1)
545 tree result;
546 gimple *stmt;
547 gassign *new_stmt;
548 tree cond;
549 gimple_stmt_iterator gsi;
550 edge true_edge, false_edge;
551 tree new_var, new_var2;
552 bool neg;
554 /* FIXME: Gimplification of complex type is too hard for now. */
555 /* We aren't prepared to handle vectors either (and it is a question
556 if it would be worthwhile anyway). */
557 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
558 || POINTER_TYPE_P (TREE_TYPE (arg0)))
559 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
560 || POINTER_TYPE_P (TREE_TYPE (arg1))))
561 return false;
563 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
564 convert it to the conditional. */
565 if ((integer_zerop (arg0) && integer_onep (arg1))
566 || (integer_zerop (arg1) && integer_onep (arg0)))
567 neg = false;
568 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
569 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
570 neg = true;
571 else
572 return false;
574 if (!empty_block_p (middle_bb))
575 return false;
577 /* At this point we know we have a GIMPLE_COND with two successors.
578 One successor is BB, the other successor is an empty block which
579 falls through into BB.
581 There is a single PHI node at the join point (BB) and its arguments
582 are constants (0, 1) or (0, -1).
584 So, given the condition COND, and the two PHI arguments, we can
585 rewrite this PHI into non-branching code:
587 dest = (COND) or dest = COND'
589 We use the condition as-is if the argument associated with the
590 true edge has the value one or the argument associated with the
591 false edge as the value zero. Note that those conditions are not
592 the same since only one of the outgoing edges from the GIMPLE_COND
593 will directly reach BB and thus be associated with an argument. */
595 stmt = last_stmt (cond_bb);
596 result = PHI_RESULT (phi);
598 /* To handle special cases like floating point comparison, it is easier and
599 less error-prone to build a tree and gimplify it on the fly though it is
600 less efficient. */
601 cond = fold_build2_loc (gimple_location (stmt),
602 gimple_cond_code (stmt), boolean_type_node,
603 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
605 /* We need to know which is the true edge and which is the false
606 edge so that we know when to invert the condition below. */
607 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
608 if ((e0 == true_edge && integer_zerop (arg0))
609 || (e0 == false_edge && !integer_zerop (arg0))
610 || (e1 == true_edge && integer_zerop (arg1))
611 || (e1 == false_edge && !integer_zerop (arg1)))
612 cond = fold_build1_loc (gimple_location (stmt),
613 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
615 if (neg)
617 cond = fold_convert_loc (gimple_location (stmt),
618 TREE_TYPE (result), cond);
619 cond = fold_build1_loc (gimple_location (stmt),
620 NEGATE_EXPR, TREE_TYPE (cond), cond);
623 /* Insert our new statements at the end of conditional block before the
624 COND_STMT. */
625 gsi = gsi_for_stmt (stmt);
626 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
627 GSI_SAME_STMT);
629 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
631 source_location locus_0, locus_1;
633 new_var2 = make_ssa_name (TREE_TYPE (result));
634 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
635 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
636 new_var = new_var2;
638 /* Set the locus to the first argument, unless is doesn't have one. */
639 locus_0 = gimple_phi_arg_location (phi, 0);
640 locus_1 = gimple_phi_arg_location (phi, 1);
641 if (locus_0 == UNKNOWN_LOCATION)
642 locus_0 = locus_1;
643 gimple_set_location (new_stmt, locus_0);
646 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
647 reset_flow_sensitive_info_in_bb (cond_bb);
649 /* Note that we optimized this PHI. */
650 return true;
653 /* Update *ARG which is defined in STMT so that it contains the
654 computed value if that seems profitable. Return true if the
655 statement is made dead by that rewriting. */
657 static bool
658 jump_function_from_stmt (tree *arg, gimple *stmt)
660 enum tree_code code = gimple_assign_rhs_code (stmt);
661 if (code == ADDR_EXPR)
663 /* For arg = &p->i transform it to p, if possible. */
664 tree rhs1 = gimple_assign_rhs1 (stmt);
665 HOST_WIDE_INT offset;
666 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
667 &offset);
668 if (tem
669 && TREE_CODE (tem) == MEM_REF
670 && (mem_ref_offset (tem) + offset) == 0)
672 *arg = TREE_OPERAND (tem, 0);
673 return true;
676 /* TODO: Much like IPA-CP jump-functions we want to handle constant
677 additions symbolically here, and we'd need to update the comparison
678 code that compares the arg + cst tuples in our caller. For now the
679 code above exactly handles the VEC_BASE pattern from vec.h. */
680 return false;
683 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
684 of the form SSA_NAME NE 0.
686 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
687 the two input values of the EQ_EXPR match arg0 and arg1.
689 If so update *code and return TRUE. Otherwise return FALSE. */
691 static bool
692 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
693 enum tree_code *code, const_tree rhs)
695 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
696 statement. */
697 if (TREE_CODE (rhs) == SSA_NAME)
699 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
701 /* Verify the defining statement has an EQ_EXPR on the RHS. */
702 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
704 /* Finally verify the source operands of the EQ_EXPR are equal
705 to arg0 and arg1. */
706 tree op0 = gimple_assign_rhs1 (def1);
707 tree op1 = gimple_assign_rhs2 (def1);
708 if ((operand_equal_for_phi_arg_p (arg0, op0)
709 && operand_equal_for_phi_arg_p (arg1, op1))
710 || (operand_equal_for_phi_arg_p (arg0, op1)
711 && operand_equal_for_phi_arg_p (arg1, op0)))
713 /* We will perform the optimization. */
714 *code = gimple_assign_rhs_code (def1);
715 return true;
719 return false;
722 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
724 Also return TRUE if arg0/arg1 are equal to the source arguments of a
725 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
727 Return FALSE otherwise. */
729 static bool
730 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
731 enum tree_code *code, gimple *cond)
733 gimple *def;
734 tree lhs = gimple_cond_lhs (cond);
735 tree rhs = gimple_cond_rhs (cond);
737 if ((operand_equal_for_phi_arg_p (arg0, lhs)
738 && operand_equal_for_phi_arg_p (arg1, rhs))
739 || (operand_equal_for_phi_arg_p (arg1, lhs)
740 && operand_equal_for_phi_arg_p (arg0, rhs)))
741 return true;
743 /* Now handle more complex case where we have an EQ comparison
744 which feeds a BIT_AND_EXPR which feeds COND.
746 First verify that COND is of the form SSA_NAME NE 0. */
747 if (*code != NE_EXPR || !integer_zerop (rhs)
748 || TREE_CODE (lhs) != SSA_NAME)
749 return false;
751 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
752 def = SSA_NAME_DEF_STMT (lhs);
753 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
754 return false;
756 /* Now verify arg0/arg1 correspond to the source arguments of an
757 EQ comparison feeding the BIT_AND_EXPR. */
759 tree tmp = gimple_assign_rhs1 (def);
760 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
761 return true;
763 tmp = gimple_assign_rhs2 (def);
764 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
765 return true;
767 return false;
770 /* Returns true if ARG is a neutral element for operation CODE
771 on the RIGHT side. */
773 static bool
774 neutral_element_p (tree_code code, tree arg, bool right)
776 switch (code)
778 case PLUS_EXPR:
779 case BIT_IOR_EXPR:
780 case BIT_XOR_EXPR:
781 return integer_zerop (arg);
783 case LROTATE_EXPR:
784 case RROTATE_EXPR:
785 case LSHIFT_EXPR:
786 case RSHIFT_EXPR:
787 case MINUS_EXPR:
788 case POINTER_PLUS_EXPR:
789 return right && integer_zerop (arg);
791 case MULT_EXPR:
792 return integer_onep (arg);
794 case TRUNC_DIV_EXPR:
795 case CEIL_DIV_EXPR:
796 case FLOOR_DIV_EXPR:
797 case ROUND_DIV_EXPR:
798 case EXACT_DIV_EXPR:
799 return right && integer_onep (arg);
801 case BIT_AND_EXPR:
802 return integer_all_onesp (arg);
804 default:
805 return false;
809 /* Returns true if ARG is an absorbing element for operation CODE. */
811 static bool
812 absorbing_element_p (tree_code code, tree arg)
814 switch (code)
816 case BIT_IOR_EXPR:
817 return integer_all_onesp (arg);
819 case MULT_EXPR:
820 case BIT_AND_EXPR:
821 return integer_zerop (arg);
823 default:
824 return false;
828 /* The function value_replacement does the main work of doing the value
829 replacement. Return non-zero if the replacement is done. Otherwise return
830 0. If we remove the middle basic block, return 2.
831 BB is the basic block where the replacement is going to be done on. ARG0
832 is argument 0 from the PHI. Likewise for ARG1. */
834 static int
835 value_replacement (basic_block cond_bb, basic_block middle_bb,
836 edge e0, edge e1, gimple *phi,
837 tree arg0, tree arg1)
839 gimple_stmt_iterator gsi;
840 gimple *cond;
841 edge true_edge, false_edge;
842 enum tree_code code;
843 bool emtpy_or_with_defined_p = true;
845 /* If the type says honor signed zeros we cannot do this
846 optimization. */
847 if (HONOR_SIGNED_ZEROS (arg1))
848 return 0;
850 /* If there is a statement in MIDDLE_BB that defines one of the PHI
851 arguments, then adjust arg0 or arg1. */
852 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
853 while (!gsi_end_p (gsi))
855 gimple *stmt = gsi_stmt (gsi);
856 tree lhs;
857 gsi_next_nondebug (&gsi);
858 if (!is_gimple_assign (stmt))
860 emtpy_or_with_defined_p = false;
861 continue;
863 /* Now try to adjust arg0 or arg1 according to the computation
864 in the statement. */
865 lhs = gimple_assign_lhs (stmt);
866 if (!(lhs == arg0
867 && jump_function_from_stmt (&arg0, stmt))
868 || (lhs == arg1
869 && jump_function_from_stmt (&arg1, stmt)))
870 emtpy_or_with_defined_p = false;
873 cond = last_stmt (cond_bb);
874 code = gimple_cond_code (cond);
876 /* This transformation is only valid for equality comparisons. */
877 if (code != NE_EXPR && code != EQ_EXPR)
878 return 0;
880 /* We need to know which is the true edge and which is the false
881 edge so that we know if have abs or negative abs. */
882 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
884 /* At this point we know we have a COND_EXPR with two successors.
885 One successor is BB, the other successor is an empty block which
886 falls through into BB.
888 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
890 There is a single PHI node at the join point (BB) with two arguments.
892 We now need to verify that the two arguments in the PHI node match
893 the two arguments to the equality comparison. */
895 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
897 edge e;
898 tree arg;
900 /* For NE_EXPR, we want to build an assignment result = arg where
901 arg is the PHI argument associated with the true edge. For
902 EQ_EXPR we want the PHI argument associated with the false edge. */
903 e = (code == NE_EXPR ? true_edge : false_edge);
905 /* Unfortunately, E may not reach BB (it may instead have gone to
906 OTHER_BLOCK). If that is the case, then we want the single outgoing
907 edge from OTHER_BLOCK which reaches BB and represents the desired
908 path from COND_BLOCK. */
909 if (e->dest == middle_bb)
910 e = single_succ_edge (e->dest);
912 /* Now we know the incoming edge to BB that has the argument for the
913 RHS of our new assignment statement. */
914 if (e0 == e)
915 arg = arg0;
916 else
917 arg = arg1;
919 /* If the middle basic block was empty or is defining the
920 PHI arguments and this is a single phi where the args are different
921 for the edges e0 and e1 then we can remove the middle basic block. */
922 if (emtpy_or_with_defined_p
923 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
924 e0, e1) == phi)
926 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
927 /* Note that we optimized this PHI. */
928 return 2;
930 else
932 /* Replace the PHI arguments with arg. */
933 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
934 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
935 if (dump_file && (dump_flags & TDF_DETAILS))
937 fprintf (dump_file, "PHI ");
938 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
939 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
940 cond_bb->index);
941 print_generic_expr (dump_file, arg, 0);
942 fprintf (dump_file, ".\n");
944 return 1;
949 /* Now optimize (x != 0) ? x + y : y to just y.
950 The following condition is too restrictive, there can easily be another
951 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
952 gimple *assign = last_and_only_stmt (middle_bb);
953 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
954 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
955 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
956 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
957 return 0;
959 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
960 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
961 return 0;
963 /* Only transform if it removes the condition. */
964 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
965 return 0;
967 /* Size-wise, this is always profitable. */
968 if (optimize_bb_for_speed_p (cond_bb)
969 /* The special case is useless if it has a low probability. */
970 && profile_status_for_fn (cfun) != PROFILE_ABSENT
971 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
972 /* If assign is cheap, there is no point avoiding it. */
973 && estimate_num_insns (assign, &eni_time_weights)
974 >= 3 * estimate_num_insns (cond, &eni_time_weights))
975 return 0;
977 tree lhs = gimple_assign_lhs (assign);
978 tree rhs1 = gimple_assign_rhs1 (assign);
979 tree rhs2 = gimple_assign_rhs2 (assign);
980 enum tree_code code_def = gimple_assign_rhs_code (assign);
981 tree cond_lhs = gimple_cond_lhs (cond);
982 tree cond_rhs = gimple_cond_rhs (cond);
984 if (((code == NE_EXPR && e1 == false_edge)
985 || (code == EQ_EXPR && e1 == true_edge))
986 && arg0 == lhs
987 && ((arg1 == rhs1
988 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
989 && neutral_element_p (code_def, cond_rhs, true))
990 || (arg1 == rhs2
991 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
992 && neutral_element_p (code_def, cond_rhs, false))
993 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
994 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
995 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
996 && absorbing_element_p (code_def, cond_rhs))))
998 gsi = gsi_for_stmt (cond);
999 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1001 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1002 def-stmt in:
1003 if (n_5 != 0)
1004 goto <bb 3>;
1005 else
1006 goto <bb 4>;
1008 <bb 3>:
1009 # RANGE [0, 4294967294]
1010 u_6 = n_5 + 4294967295;
1012 <bb 4>:
1013 # u_3 = PHI <u_6(3), 4294967295(2)> */
1014 SSA_NAME_RANGE_INFO (lhs) = NULL;
1015 /* If available, we can use VR of phi result at least. */
1016 tree phires = gimple_phi_result (phi);
1017 struct range_info_def *phires_range_info
1018 = SSA_NAME_RANGE_INFO (phires);
1019 if (phires_range_info)
1020 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1021 phires_range_info);
1023 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1024 gsi_move_before (&gsi_from, &gsi);
1025 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1026 return 2;
1029 return 0;
1032 /* The function minmax_replacement does the main work of doing the minmax
1033 replacement. Return true if the replacement is done. Otherwise return
1034 false.
1035 BB is the basic block where the replacement is going to be done on. ARG0
1036 is argument 0 from the PHI. Likewise for ARG1. */
1038 static bool
1039 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1040 edge e0, edge e1, gimple *phi,
1041 tree arg0, tree arg1)
1043 tree result, type;
1044 gcond *cond;
1045 gassign *new_stmt;
1046 edge true_edge, false_edge;
1047 enum tree_code cmp, minmax, ass_code;
1048 tree smaller, alt_smaller, larger, alt_larger, arg_true, arg_false;
1049 gimple_stmt_iterator gsi, gsi_from;
1051 type = TREE_TYPE (PHI_RESULT (phi));
1053 /* The optimization may be unsafe due to NaNs. */
1054 if (HONOR_NANS (type))
1055 return false;
1057 cond = as_a <gcond *> (last_stmt (cond_bb));
1058 cmp = gimple_cond_code (cond);
1060 /* This transformation is only valid for order comparisons. Record which
1061 operand is smaller/larger if the result of the comparison is true. */
1062 alt_smaller = NULL_TREE;
1063 alt_larger = NULL_TREE;
1064 if (cmp == LT_EXPR || cmp == LE_EXPR)
1066 smaller = gimple_cond_lhs (cond);
1067 larger = gimple_cond_rhs (cond);
1068 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1069 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1070 if (TREE_CODE (larger) == INTEGER_CST)
1072 if (cmp == LT_EXPR)
1074 bool overflow;
1075 wide_int alt = wi::sub (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1076 &overflow);
1077 if (! overflow)
1078 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1080 else
1082 bool overflow;
1083 wide_int alt = wi::add (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1084 &overflow);
1085 if (! overflow)
1086 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1090 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1092 smaller = gimple_cond_rhs (cond);
1093 larger = gimple_cond_lhs (cond);
1094 /* If we have larger > CST it is equivalent to larger >= CST+1.
1095 Likewise larger >= CST is equivalent to larger > CST-1. */
1096 if (TREE_CODE (smaller) == INTEGER_CST)
1098 if (cmp == GT_EXPR)
1100 bool overflow;
1101 wide_int alt = wi::add (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1102 &overflow);
1103 if (! overflow)
1104 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1106 else
1108 bool overflow;
1109 wide_int alt = wi::sub (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1110 &overflow);
1111 if (! overflow)
1112 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1116 else
1117 return false;
1119 /* We need to know which is the true edge and which is the false
1120 edge so that we know if have abs or negative abs. */
1121 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1123 /* Forward the edges over the middle basic block. */
1124 if (true_edge->dest == middle_bb)
1125 true_edge = EDGE_SUCC (true_edge->dest, 0);
1126 if (false_edge->dest == middle_bb)
1127 false_edge = EDGE_SUCC (false_edge->dest, 0);
1129 if (true_edge == e0)
1131 gcc_assert (false_edge == e1);
1132 arg_true = arg0;
1133 arg_false = arg1;
1135 else
1137 gcc_assert (false_edge == e0);
1138 gcc_assert (true_edge == e1);
1139 arg_true = arg1;
1140 arg_false = arg0;
1143 if (empty_block_p (middle_bb))
1145 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1146 || (alt_smaller
1147 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1148 && (operand_equal_for_phi_arg_p (arg_false, larger)
1149 || (alt_larger
1150 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1152 /* Case
1154 if (smaller < larger)
1155 rslt = smaller;
1156 else
1157 rslt = larger; */
1158 minmax = MIN_EXPR;
1160 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1161 || (alt_smaller
1162 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1163 && (operand_equal_for_phi_arg_p (arg_true, larger)
1164 || (alt_larger
1165 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1166 minmax = MAX_EXPR;
1167 else
1168 return false;
1170 else
1172 /* Recognize the following case, assuming d <= u:
1174 if (a <= u)
1175 b = MAX (a, d);
1176 x = PHI <b, u>
1178 This is equivalent to
1180 b = MAX (a, d);
1181 x = MIN (b, u); */
1183 gimple *assign = last_and_only_stmt (middle_bb);
1184 tree lhs, op0, op1, bound;
1186 if (!assign
1187 || gimple_code (assign) != GIMPLE_ASSIGN)
1188 return false;
1190 lhs = gimple_assign_lhs (assign);
1191 ass_code = gimple_assign_rhs_code (assign);
1192 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1193 return false;
1194 op0 = gimple_assign_rhs1 (assign);
1195 op1 = gimple_assign_rhs2 (assign);
1197 if (true_edge->src == middle_bb)
1199 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1200 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1201 return false;
1203 if (operand_equal_for_phi_arg_p (arg_false, larger)
1204 || (alt_larger
1205 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1207 /* Case
1209 if (smaller < larger)
1211 r' = MAX_EXPR (smaller, bound)
1213 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1214 if (ass_code != MAX_EXPR)
1215 return false;
1217 minmax = MIN_EXPR;
1218 if (operand_equal_for_phi_arg_p (op0, smaller)
1219 || (alt_smaller
1220 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1221 bound = op1;
1222 else if (operand_equal_for_phi_arg_p (op1, smaller)
1223 || (alt_smaller
1224 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1225 bound = op0;
1226 else
1227 return false;
1229 /* We need BOUND <= LARGER. */
1230 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1231 bound, larger)))
1232 return false;
1234 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1235 || (alt_smaller
1236 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1238 /* Case
1240 if (smaller < larger)
1242 r' = MIN_EXPR (larger, bound)
1244 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1245 if (ass_code != MIN_EXPR)
1246 return false;
1248 minmax = MAX_EXPR;
1249 if (operand_equal_for_phi_arg_p (op0, larger)
1250 || (alt_larger
1251 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1252 bound = op1;
1253 else if (operand_equal_for_phi_arg_p (op1, larger)
1254 || (alt_larger
1255 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1256 bound = op0;
1257 else
1258 return false;
1260 /* We need BOUND >= SMALLER. */
1261 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1262 bound, smaller)))
1263 return false;
1265 else
1266 return false;
1268 else
1270 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1271 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1272 return false;
1274 if (operand_equal_for_phi_arg_p (arg_true, larger)
1275 || (alt_larger
1276 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1278 /* Case
1280 if (smaller > larger)
1282 r' = MIN_EXPR (smaller, bound)
1284 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1285 if (ass_code != MIN_EXPR)
1286 return false;
1288 minmax = MAX_EXPR;
1289 if (operand_equal_for_phi_arg_p (op0, smaller)
1290 || (alt_smaller
1291 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1292 bound = op1;
1293 else if (operand_equal_for_phi_arg_p (op1, smaller)
1294 || (alt_smaller
1295 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1296 bound = op0;
1297 else
1298 return false;
1300 /* We need BOUND >= LARGER. */
1301 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1302 bound, larger)))
1303 return false;
1305 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1306 || (alt_smaller
1307 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1309 /* Case
1311 if (smaller > larger)
1313 r' = MAX_EXPR (larger, bound)
1315 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1316 if (ass_code != MAX_EXPR)
1317 return false;
1319 minmax = MIN_EXPR;
1320 if (operand_equal_for_phi_arg_p (op0, larger))
1321 bound = op1;
1322 else if (operand_equal_for_phi_arg_p (op1, larger))
1323 bound = op0;
1324 else
1325 return false;
1327 /* We need BOUND <= SMALLER. */
1328 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1329 bound, smaller)))
1330 return false;
1332 else
1333 return false;
1336 /* Move the statement from the middle block. */
1337 gsi = gsi_last_bb (cond_bb);
1338 gsi_from = gsi_last_nondebug_bb (middle_bb);
1339 gsi_move_before (&gsi_from, &gsi);
1342 /* Create an SSA var to hold the min/max result. If we're the only
1343 things setting the target PHI, then we can clone the PHI
1344 variable. Otherwise we must create a new one. */
1345 result = PHI_RESULT (phi);
1346 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1347 result = duplicate_ssa_name (result, NULL);
1348 else
1349 result = make_ssa_name (TREE_TYPE (result));
1351 /* Emit the statement to compute min/max. */
1352 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1353 gsi = gsi_last_bb (cond_bb);
1354 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1356 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1357 reset_flow_sensitive_info_in_bb (cond_bb);
1359 return true;
1362 /* The function absolute_replacement does the main work of doing the absolute
1363 replacement. Return true if the replacement is done. Otherwise return
1364 false.
1365 bb is the basic block where the replacement is going to be done on. arg0
1366 is argument 0 from the phi. Likewise for arg1. */
1368 static bool
1369 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1370 edge e0 ATTRIBUTE_UNUSED, edge e1,
1371 gimple *phi, tree arg0, tree arg1)
1373 tree result;
1374 gassign *new_stmt;
1375 gimple *cond;
1376 gimple_stmt_iterator gsi;
1377 edge true_edge, false_edge;
1378 gimple *assign;
1379 edge e;
1380 tree rhs, lhs;
1381 bool negate;
1382 enum tree_code cond_code;
1384 /* If the type says honor signed zeros we cannot do this
1385 optimization. */
1386 if (HONOR_SIGNED_ZEROS (arg1))
1387 return false;
1389 /* OTHER_BLOCK must have only one executable statement which must have the
1390 form arg0 = -arg1 or arg1 = -arg0. */
1392 assign = last_and_only_stmt (middle_bb);
1393 /* If we did not find the proper negation assignment, then we can not
1394 optimize. */
1395 if (assign == NULL)
1396 return false;
1398 /* If we got here, then we have found the only executable statement
1399 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1400 arg1 = -arg0, then we can not optimize. */
1401 if (gimple_code (assign) != GIMPLE_ASSIGN)
1402 return false;
1404 lhs = gimple_assign_lhs (assign);
1406 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1407 return false;
1409 rhs = gimple_assign_rhs1 (assign);
1411 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1412 if (!(lhs == arg0 && rhs == arg1)
1413 && !(lhs == arg1 && rhs == arg0))
1414 return false;
1416 cond = last_stmt (cond_bb);
1417 result = PHI_RESULT (phi);
1419 /* Only relationals comparing arg[01] against zero are interesting. */
1420 cond_code = gimple_cond_code (cond);
1421 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1422 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1423 return false;
1425 /* Make sure the conditional is arg[01] OP y. */
1426 if (gimple_cond_lhs (cond) != rhs)
1427 return false;
1429 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1430 ? real_zerop (gimple_cond_rhs (cond))
1431 : integer_zerop (gimple_cond_rhs (cond)))
1433 else
1434 return false;
1436 /* We need to know which is the true edge and which is the false
1437 edge so that we know if have abs or negative abs. */
1438 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1440 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1441 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1442 the false edge goes to OTHER_BLOCK. */
1443 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1444 e = true_edge;
1445 else
1446 e = false_edge;
1448 if (e->dest == middle_bb)
1449 negate = true;
1450 else
1451 negate = false;
1453 result = duplicate_ssa_name (result, NULL);
1455 if (negate)
1456 lhs = make_ssa_name (TREE_TYPE (result));
1457 else
1458 lhs = result;
1460 /* Build the modify expression with abs expression. */
1461 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1463 gsi = gsi_last_bb (cond_bb);
1464 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1466 if (negate)
1468 /* Get the right GSI. We want to insert after the recently
1469 added ABS_EXPR statement (which we know is the first statement
1470 in the block. */
1471 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1473 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1476 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1477 reset_flow_sensitive_info_in_bb (cond_bb);
1479 /* Note that we optimized this PHI. */
1480 return true;
1483 /* Auxiliary functions to determine the set of memory accesses which
1484 can't trap because they are preceded by accesses to the same memory
1485 portion. We do that for MEM_REFs, so we only need to track
1486 the SSA_NAME of the pointer indirectly referenced. The algorithm
1487 simply is a walk over all instructions in dominator order. When
1488 we see an MEM_REF we determine if we've already seen a same
1489 ref anywhere up to the root of the dominator tree. If we do the
1490 current access can't trap. If we don't see any dominating access
1491 the current access might trap, but might also make later accesses
1492 non-trapping, so we remember it. We need to be careful with loads
1493 or stores, for instance a load might not trap, while a store would,
1494 so if we see a dominating read access this doesn't mean that a later
1495 write access would not trap. Hence we also need to differentiate the
1496 type of access(es) seen.
1498 ??? We currently are very conservative and assume that a load might
1499 trap even if a store doesn't (write-only memory). This probably is
1500 overly conservative. */
1502 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1503 through it was seen, which would constitute a no-trap region for
1504 same accesses. */
1505 struct name_to_bb
1507 unsigned int ssa_name_ver;
1508 unsigned int phase;
1509 bool store;
1510 HOST_WIDE_INT offset, size;
1511 basic_block bb;
1514 /* Hashtable helpers. */
1516 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1518 static inline hashval_t hash (const name_to_bb *);
1519 static inline bool equal (const name_to_bb *, const name_to_bb *);
1522 /* Used for quick clearing of the hash-table when we see calls.
1523 Hash entries with phase < nt_call_phase are invalid. */
1524 static unsigned int nt_call_phase;
1526 /* The hash function. */
1528 inline hashval_t
1529 ssa_names_hasher::hash (const name_to_bb *n)
1531 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1532 ^ (n->offset << 6) ^ (n->size << 3);
1535 /* The equality function of *P1 and *P2. */
1537 inline bool
1538 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1540 return n1->ssa_name_ver == n2->ssa_name_ver
1541 && n1->store == n2->store
1542 && n1->offset == n2->offset
1543 && n1->size == n2->size;
1546 class nontrapping_dom_walker : public dom_walker
1548 public:
1549 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1550 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1552 virtual edge before_dom_children (basic_block);
1553 virtual void after_dom_children (basic_block);
1555 private:
1557 /* We see the expression EXP in basic block BB. If it's an interesting
1558 expression (an MEM_REF through an SSA_NAME) possibly insert the
1559 expression into the set NONTRAP or the hash table of seen expressions.
1560 STORE is true if this expression is on the LHS, otherwise it's on
1561 the RHS. */
1562 void add_or_mark_expr (basic_block, tree, bool);
1564 hash_set<tree> *m_nontrapping;
1566 /* The hash table for remembering what we've seen. */
1567 hash_table<ssa_names_hasher> m_seen_ssa_names;
1570 /* Called by walk_dominator_tree, when entering the block BB. */
1571 edge
1572 nontrapping_dom_walker::before_dom_children (basic_block bb)
1574 edge e;
1575 edge_iterator ei;
1576 gimple_stmt_iterator gsi;
1578 /* If we haven't seen all our predecessors, clear the hash-table. */
1579 FOR_EACH_EDGE (e, ei, bb->preds)
1580 if ((((size_t)e->src->aux) & 2) == 0)
1582 nt_call_phase++;
1583 break;
1586 /* Mark this BB as being on the path to dominator root and as visited. */
1587 bb->aux = (void*)(1 | 2);
1589 /* And walk the statements in order. */
1590 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1592 gimple *stmt = gsi_stmt (gsi);
1594 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1595 || (is_gimple_call (stmt)
1596 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1597 nt_call_phase++;
1598 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1600 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1601 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1604 return NULL;
1607 /* Called by walk_dominator_tree, when basic block BB is exited. */
1608 void
1609 nontrapping_dom_walker::after_dom_children (basic_block bb)
1611 /* This BB isn't on the path to dominator root anymore. */
1612 bb->aux = (void*)2;
1615 /* We see the expression EXP in basic block BB. If it's an interesting
1616 expression (an MEM_REF through an SSA_NAME) possibly insert the
1617 expression into the set NONTRAP or the hash table of seen expressions.
1618 STORE is true if this expression is on the LHS, otherwise it's on
1619 the RHS. */
1620 void
1621 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1623 HOST_WIDE_INT size;
1625 if (TREE_CODE (exp) == MEM_REF
1626 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1627 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1628 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1630 tree name = TREE_OPERAND (exp, 0);
1631 struct name_to_bb map;
1632 name_to_bb **slot;
1633 struct name_to_bb *n2bb;
1634 basic_block found_bb = 0;
1636 /* Try to find the last seen MEM_REF through the same
1637 SSA_NAME, which can trap. */
1638 map.ssa_name_ver = SSA_NAME_VERSION (name);
1639 map.phase = 0;
1640 map.bb = 0;
1641 map.store = store;
1642 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1643 map.size = size;
1645 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1646 n2bb = *slot;
1647 if (n2bb && n2bb->phase >= nt_call_phase)
1648 found_bb = n2bb->bb;
1650 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1651 (it's in a basic block on the path from us to the dominator root)
1652 then we can't trap. */
1653 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1655 m_nontrapping->add (exp);
1657 else
1659 /* EXP might trap, so insert it into the hash table. */
1660 if (n2bb)
1662 n2bb->phase = nt_call_phase;
1663 n2bb->bb = bb;
1665 else
1667 n2bb = XNEW (struct name_to_bb);
1668 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1669 n2bb->phase = nt_call_phase;
1670 n2bb->bb = bb;
1671 n2bb->store = store;
1672 n2bb->offset = map.offset;
1673 n2bb->size = size;
1674 *slot = n2bb;
1680 /* This is the entry point of gathering non trapping memory accesses.
1681 It will do a dominator walk over the whole function, and it will
1682 make use of the bb->aux pointers. It returns a set of trees
1683 (the MEM_REFs itself) which can't trap. */
1684 static hash_set<tree> *
1685 get_non_trapping (void)
1687 nt_call_phase = 0;
1688 hash_set<tree> *nontrap = new hash_set<tree>;
1689 /* We're going to do a dominator walk, so ensure that we have
1690 dominance information. */
1691 calculate_dominance_info (CDI_DOMINATORS);
1693 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1694 .walk (cfun->cfg->x_entry_block_ptr);
1696 clear_aux_for_blocks ();
1697 return nontrap;
1700 /* Do the main work of conditional store replacement. We already know
1701 that the recognized pattern looks like so:
1703 split:
1704 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1705 MIDDLE_BB:
1706 something
1707 fallthrough (edge E0)
1708 JOIN_BB:
1709 some more
1711 We check that MIDDLE_BB contains only one store, that that store
1712 doesn't trap (not via NOTRAP, but via checking if an access to the same
1713 memory location dominates us) and that the store has a "simple" RHS. */
1715 static bool
1716 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1717 edge e0, edge e1, hash_set<tree> *nontrap)
1719 gimple *assign = last_and_only_stmt (middle_bb);
1720 tree lhs, rhs, name, name2;
1721 gphi *newphi;
1722 gassign *new_stmt;
1723 gimple_stmt_iterator gsi;
1724 source_location locus;
1726 /* Check if middle_bb contains of only one store. */
1727 if (!assign
1728 || !gimple_assign_single_p (assign)
1729 || gimple_has_volatile_ops (assign))
1730 return false;
1732 locus = gimple_location (assign);
1733 lhs = gimple_assign_lhs (assign);
1734 rhs = gimple_assign_rhs1 (assign);
1735 if (TREE_CODE (lhs) != MEM_REF
1736 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1737 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1738 return false;
1740 /* Prove that we can move the store down. We could also check
1741 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1742 whose value is not available readily, which we want to avoid. */
1743 if (!nontrap->contains (lhs))
1744 return false;
1746 /* Now we've checked the constraints, so do the transformation:
1747 1) Remove the single store. */
1748 gsi = gsi_for_stmt (assign);
1749 unlink_stmt_vdef (assign);
1750 gsi_remove (&gsi, true);
1751 release_defs (assign);
1753 /* 2) Insert a load from the memory of the store to the temporary
1754 on the edge which did not contain the store. */
1755 lhs = unshare_expr (lhs);
1756 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1757 new_stmt = gimple_build_assign (name, lhs);
1758 gimple_set_location (new_stmt, locus);
1759 gsi_insert_on_edge (e1, new_stmt);
1761 /* 3) Create a PHI node at the join block, with one argument
1762 holding the old RHS, and the other holding the temporary
1763 where we stored the old memory contents. */
1764 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1765 newphi = create_phi_node (name2, join_bb);
1766 add_phi_arg (newphi, rhs, e0, locus);
1767 add_phi_arg (newphi, name, e1, locus);
1769 lhs = unshare_expr (lhs);
1770 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1772 /* 4) Insert that PHI node. */
1773 gsi = gsi_after_labels (join_bb);
1774 if (gsi_end_p (gsi))
1776 gsi = gsi_last_bb (join_bb);
1777 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1779 else
1780 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1782 return true;
1785 /* Do the main work of conditional store replacement. */
1787 static bool
1788 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1789 basic_block join_bb, gimple *then_assign,
1790 gimple *else_assign)
1792 tree lhs_base, lhs, then_rhs, else_rhs, name;
1793 source_location then_locus, else_locus;
1794 gimple_stmt_iterator gsi;
1795 gphi *newphi;
1796 gassign *new_stmt;
1798 if (then_assign == NULL
1799 || !gimple_assign_single_p (then_assign)
1800 || gimple_clobber_p (then_assign)
1801 || gimple_has_volatile_ops (then_assign)
1802 || else_assign == NULL
1803 || !gimple_assign_single_p (else_assign)
1804 || gimple_clobber_p (else_assign)
1805 || gimple_has_volatile_ops (else_assign))
1806 return false;
1808 lhs = gimple_assign_lhs (then_assign);
1809 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1810 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1811 return false;
1813 lhs_base = get_base_address (lhs);
1814 if (lhs_base == NULL_TREE
1815 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1816 return false;
1818 then_rhs = gimple_assign_rhs1 (then_assign);
1819 else_rhs = gimple_assign_rhs1 (else_assign);
1820 then_locus = gimple_location (then_assign);
1821 else_locus = gimple_location (else_assign);
1823 /* Now we've checked the constraints, so do the transformation:
1824 1) Remove the stores. */
1825 gsi = gsi_for_stmt (then_assign);
1826 unlink_stmt_vdef (then_assign);
1827 gsi_remove (&gsi, true);
1828 release_defs (then_assign);
1830 gsi = gsi_for_stmt (else_assign);
1831 unlink_stmt_vdef (else_assign);
1832 gsi_remove (&gsi, true);
1833 release_defs (else_assign);
1835 /* 2) Create a PHI node at the join block, with one argument
1836 holding the old RHS, and the other holding the temporary
1837 where we stored the old memory contents. */
1838 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1839 newphi = create_phi_node (name, join_bb);
1840 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1841 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1843 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1845 /* 3) Insert that PHI node. */
1846 gsi = gsi_after_labels (join_bb);
1847 if (gsi_end_p (gsi))
1849 gsi = gsi_last_bb (join_bb);
1850 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1852 else
1853 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1855 return true;
1858 /* Conditional store replacement. We already know
1859 that the recognized pattern looks like so:
1861 split:
1862 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1863 THEN_BB:
1865 X = Y;
1867 goto JOIN_BB;
1868 ELSE_BB:
1870 X = Z;
1872 fallthrough (edge E0)
1873 JOIN_BB:
1874 some more
1876 We check that it is safe to sink the store to JOIN_BB by verifying that
1877 there are no read-after-write or write-after-write dependencies in
1878 THEN_BB and ELSE_BB. */
1880 static bool
1881 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1882 basic_block join_bb)
1884 gimple *then_assign = last_and_only_stmt (then_bb);
1885 gimple *else_assign = last_and_only_stmt (else_bb);
1886 vec<data_reference_p> then_datarefs, else_datarefs;
1887 vec<ddr_p> then_ddrs, else_ddrs;
1888 gimple *then_store, *else_store;
1889 bool found, ok = false, res;
1890 struct data_dependence_relation *ddr;
1891 data_reference_p then_dr, else_dr;
1892 int i, j;
1893 tree then_lhs, else_lhs;
1894 basic_block blocks[3];
1896 if (MAX_STORES_TO_SINK == 0)
1897 return false;
1899 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1900 if (then_assign && else_assign)
1901 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1902 then_assign, else_assign);
1904 /* Find data references. */
1905 then_datarefs.create (1);
1906 else_datarefs.create (1);
1907 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1908 == chrec_dont_know)
1909 || !then_datarefs.length ()
1910 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1911 == chrec_dont_know)
1912 || !else_datarefs.length ())
1914 free_data_refs (then_datarefs);
1915 free_data_refs (else_datarefs);
1916 return false;
1919 /* Find pairs of stores with equal LHS. */
1920 auto_vec<gimple *, 1> then_stores, else_stores;
1921 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1923 if (DR_IS_READ (then_dr))
1924 continue;
1926 then_store = DR_STMT (then_dr);
1927 then_lhs = gimple_get_lhs (then_store);
1928 if (then_lhs == NULL_TREE)
1929 continue;
1930 found = false;
1932 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1934 if (DR_IS_READ (else_dr))
1935 continue;
1937 else_store = DR_STMT (else_dr);
1938 else_lhs = gimple_get_lhs (else_store);
1939 if (else_lhs == NULL_TREE)
1940 continue;
1942 if (operand_equal_p (then_lhs, else_lhs, 0))
1944 found = true;
1945 break;
1949 if (!found)
1950 continue;
1952 then_stores.safe_push (then_store);
1953 else_stores.safe_push (else_store);
1956 /* No pairs of stores found. */
1957 if (!then_stores.length ()
1958 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1960 free_data_refs (then_datarefs);
1961 free_data_refs (else_datarefs);
1962 return false;
1965 /* Compute and check data dependencies in both basic blocks. */
1966 then_ddrs.create (1);
1967 else_ddrs.create (1);
1968 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1969 vNULL, false)
1970 || !compute_all_dependences (else_datarefs, &else_ddrs,
1971 vNULL, false))
1973 free_dependence_relations (then_ddrs);
1974 free_dependence_relations (else_ddrs);
1975 free_data_refs (then_datarefs);
1976 free_data_refs (else_datarefs);
1977 return false;
1979 blocks[0] = then_bb;
1980 blocks[1] = else_bb;
1981 blocks[2] = join_bb;
1982 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1984 /* Check that there are no read-after-write or write-after-write dependencies
1985 in THEN_BB. */
1986 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1988 struct data_reference *dra = DDR_A (ddr);
1989 struct data_reference *drb = DDR_B (ddr);
1991 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1992 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1993 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1994 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1995 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1996 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1998 free_dependence_relations (then_ddrs);
1999 free_dependence_relations (else_ddrs);
2000 free_data_refs (then_datarefs);
2001 free_data_refs (else_datarefs);
2002 return false;
2006 /* Check that there are no read-after-write or write-after-write dependencies
2007 in ELSE_BB. */
2008 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2010 struct data_reference *dra = DDR_A (ddr);
2011 struct data_reference *drb = DDR_B (ddr);
2013 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2014 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2015 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2016 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2017 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2018 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2020 free_dependence_relations (then_ddrs);
2021 free_dependence_relations (else_ddrs);
2022 free_data_refs (then_datarefs);
2023 free_data_refs (else_datarefs);
2024 return false;
2028 /* Sink stores with same LHS. */
2029 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2031 else_store = else_stores[i];
2032 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2033 then_store, else_store);
2034 ok = ok || res;
2037 free_dependence_relations (then_ddrs);
2038 free_dependence_relations (else_ddrs);
2039 free_data_refs (then_datarefs);
2040 free_data_refs (else_datarefs);
2042 return ok;
2045 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2047 static bool
2048 local_mem_dependence (gimple *stmt, basic_block bb)
2050 tree vuse = gimple_vuse (stmt);
2051 gimple *def;
2053 if (!vuse)
2054 return false;
2056 def = SSA_NAME_DEF_STMT (vuse);
2057 return (def && gimple_bb (def) == bb);
2060 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2061 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2062 and BB3 rejoins control flow following BB1 and BB2, look for
2063 opportunities to hoist loads as follows. If BB3 contains a PHI of
2064 two loads, one each occurring in BB1 and BB2, and the loads are
2065 provably of adjacent fields in the same structure, then move both
2066 loads into BB0. Of course this can only be done if there are no
2067 dependencies preventing such motion.
2069 One of the hoisted loads will always be speculative, so the
2070 transformation is currently conservative:
2072 - The fields must be strictly adjacent.
2073 - The two fields must occupy a single memory block that is
2074 guaranteed to not cross a page boundary.
2076 The last is difficult to prove, as such memory blocks should be
2077 aligned on the minimum of the stack alignment boundary and the
2078 alignment guaranteed by heap allocation interfaces. Thus we rely
2079 on a parameter for the alignment value.
2081 Provided a good value is used for the last case, the first
2082 restriction could possibly be relaxed. */
2084 static void
2085 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2086 basic_block bb2, basic_block bb3)
2088 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2089 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2090 gphi_iterator gsi;
2092 /* Walk the phis in bb3 looking for an opportunity. We are looking
2093 for phis of two SSA names, one each of which is defined in bb1 and
2094 bb2. */
2095 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2097 gphi *phi_stmt = gsi.phi ();
2098 gimple *def1, *def2;
2099 tree arg1, arg2, ref1, ref2, field1, field2;
2100 tree tree_offset1, tree_offset2, tree_size2, next;
2101 int offset1, offset2, size2;
2102 unsigned align1;
2103 gimple_stmt_iterator gsi2;
2104 basic_block bb_for_def1, bb_for_def2;
2106 if (gimple_phi_num_args (phi_stmt) != 2
2107 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2108 continue;
2110 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2111 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2113 if (TREE_CODE (arg1) != SSA_NAME
2114 || TREE_CODE (arg2) != SSA_NAME
2115 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2116 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2117 continue;
2119 def1 = SSA_NAME_DEF_STMT (arg1);
2120 def2 = SSA_NAME_DEF_STMT (arg2);
2122 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2123 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2124 continue;
2126 /* Check the mode of the arguments to be sure a conditional move
2127 can be generated for it. */
2128 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2129 == CODE_FOR_nothing)
2130 continue;
2132 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2133 if (!gimple_assign_single_p (def1)
2134 || !gimple_assign_single_p (def2)
2135 || gimple_has_volatile_ops (def1)
2136 || gimple_has_volatile_ops (def2))
2137 continue;
2139 ref1 = gimple_assign_rhs1 (def1);
2140 ref2 = gimple_assign_rhs1 (def2);
2142 if (TREE_CODE (ref1) != COMPONENT_REF
2143 || TREE_CODE (ref2) != COMPONENT_REF)
2144 continue;
2146 /* The zeroth operand of the two component references must be
2147 identical. It is not sufficient to compare get_base_address of
2148 the two references, because this could allow for different
2149 elements of the same array in the two trees. It is not safe to
2150 assume that the existence of one array element implies the
2151 existence of a different one. */
2152 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2153 continue;
2155 field1 = TREE_OPERAND (ref1, 1);
2156 field2 = TREE_OPERAND (ref2, 1);
2158 /* Check for field adjacency, and ensure field1 comes first. */
2159 for (next = DECL_CHAIN (field1);
2160 next && TREE_CODE (next) != FIELD_DECL;
2161 next = DECL_CHAIN (next))
2164 if (next != field2)
2166 for (next = DECL_CHAIN (field2);
2167 next && TREE_CODE (next) != FIELD_DECL;
2168 next = DECL_CHAIN (next))
2171 if (next != field1)
2172 continue;
2174 std::swap (field1, field2);
2175 std::swap (def1, def2);
2178 bb_for_def1 = gimple_bb (def1);
2179 bb_for_def2 = gimple_bb (def2);
2181 /* Check for proper alignment of the first field. */
2182 tree_offset1 = bit_position (field1);
2183 tree_offset2 = bit_position (field2);
2184 tree_size2 = DECL_SIZE (field2);
2186 if (!tree_fits_uhwi_p (tree_offset1)
2187 || !tree_fits_uhwi_p (tree_offset2)
2188 || !tree_fits_uhwi_p (tree_size2))
2189 continue;
2191 offset1 = tree_to_uhwi (tree_offset1);
2192 offset2 = tree_to_uhwi (tree_offset2);
2193 size2 = tree_to_uhwi (tree_size2);
2194 align1 = DECL_ALIGN (field1) % param_align_bits;
2196 if (offset1 % BITS_PER_UNIT != 0)
2197 continue;
2199 /* For profitability, the two field references should fit within
2200 a single cache line. */
2201 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2202 continue;
2204 /* The two expressions cannot be dependent upon vdefs defined
2205 in bb1/bb2. */
2206 if (local_mem_dependence (def1, bb_for_def1)
2207 || local_mem_dependence (def2, bb_for_def2))
2208 continue;
2210 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2211 bb0. We hoist the first one first so that a cache miss is handled
2212 efficiently regardless of hardware cache-fill policy. */
2213 gsi2 = gsi_for_stmt (def1);
2214 gsi_move_to_bb_end (&gsi2, bb0);
2215 gsi2 = gsi_for_stmt (def2);
2216 gsi_move_to_bb_end (&gsi2, bb0);
2218 if (dump_file && (dump_flags & TDF_DETAILS))
2220 fprintf (dump_file,
2221 "\nHoisting adjacent loads from %d and %d into %d: \n",
2222 bb_for_def1->index, bb_for_def2->index, bb0->index);
2223 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2224 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2229 /* Determine whether we should attempt to hoist adjacent loads out of
2230 diamond patterns in pass_phiopt. Always hoist loads if
2231 -fhoist-adjacent-loads is specified and the target machine has
2232 both a conditional move instruction and a defined cache line size. */
2234 static bool
2235 gate_hoist_loads (void)
2237 return (flag_hoist_adjacent_loads == 1
2238 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2239 && HAVE_conditional_move);
2242 /* This pass tries to replaces an if-then-else block with an
2243 assignment. We have four kinds of transformations. Some of these
2244 transformations are also performed by the ifcvt RTL optimizer.
2246 Conditional Replacement
2247 -----------------------
2249 This transformation, implemented in conditional_replacement,
2250 replaces
2252 bb0:
2253 if (cond) goto bb2; else goto bb1;
2254 bb1:
2255 bb2:
2256 x = PHI <0 (bb1), 1 (bb0), ...>;
2258 with
2260 bb0:
2261 x' = cond;
2262 goto bb2;
2263 bb2:
2264 x = PHI <x' (bb0), ...>;
2266 We remove bb1 as it becomes unreachable. This occurs often due to
2267 gimplification of conditionals.
2269 Value Replacement
2270 -----------------
2272 This transformation, implemented in value_replacement, replaces
2274 bb0:
2275 if (a != b) goto bb2; else goto bb1;
2276 bb1:
2277 bb2:
2278 x = PHI <a (bb1), b (bb0), ...>;
2280 with
2282 bb0:
2283 bb2:
2284 x = PHI <b (bb0), ...>;
2286 This opportunity can sometimes occur as a result of other
2287 optimizations.
2290 Another case caught by value replacement looks like this:
2292 bb0:
2293 t1 = a == CONST;
2294 t2 = b > c;
2295 t3 = t1 & t2;
2296 if (t3 != 0) goto bb1; else goto bb2;
2297 bb1:
2298 bb2:
2299 x = PHI (CONST, a)
2301 Gets replaced with:
2302 bb0:
2303 bb2:
2304 t1 = a == CONST;
2305 t2 = b > c;
2306 t3 = t1 & t2;
2307 x = a;
2309 ABS Replacement
2310 ---------------
2312 This transformation, implemented in abs_replacement, replaces
2314 bb0:
2315 if (a >= 0) goto bb2; else goto bb1;
2316 bb1:
2317 x = -a;
2318 bb2:
2319 x = PHI <x (bb1), a (bb0), ...>;
2321 with
2323 bb0:
2324 x' = ABS_EXPR< a >;
2325 bb2:
2326 x = PHI <x' (bb0), ...>;
2328 MIN/MAX Replacement
2329 -------------------
2331 This transformation, minmax_replacement replaces
2333 bb0:
2334 if (a <= b) goto bb2; else goto bb1;
2335 bb1:
2336 bb2:
2337 x = PHI <b (bb1), a (bb0), ...>;
2339 with
2341 bb0:
2342 x' = MIN_EXPR (a, b)
2343 bb2:
2344 x = PHI <x' (bb0), ...>;
2346 A similar transformation is done for MAX_EXPR.
2349 This pass also performs a fifth transformation of a slightly different
2350 flavor.
2352 Factor conversion in COND_EXPR
2353 ------------------------------
2355 This transformation factors the conversion out of COND_EXPR with
2356 factor_out_conditional_conversion.
2358 For example:
2359 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2360 <bb 3>:
2361 tmp = (int) a;
2362 <bb 4>:
2363 tmp = PHI <tmp, CST>
2365 Into:
2366 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2367 <bb 3>:
2368 <bb 4>:
2369 a = PHI <a, CST>
2370 tmp = (int) a;
2372 Adjacent Load Hoisting
2373 ----------------------
2375 This transformation replaces
2377 bb0:
2378 if (...) goto bb2; else goto bb1;
2379 bb1:
2380 x1 = (<expr>).field1;
2381 goto bb3;
2382 bb2:
2383 x2 = (<expr>).field2;
2384 bb3:
2385 # x = PHI <x1, x2>;
2387 with
2389 bb0:
2390 x1 = (<expr>).field1;
2391 x2 = (<expr>).field2;
2392 if (...) goto bb2; else goto bb1;
2393 bb1:
2394 goto bb3;
2395 bb2:
2396 bb3:
2397 # x = PHI <x1, x2>;
2399 The purpose of this transformation is to enable generation of conditional
2400 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2401 the loads is speculative, the transformation is restricted to very
2402 specific cases to avoid introducing a page fault. We are looking for
2403 the common idiom:
2405 if (...)
2406 x = y->left;
2407 else
2408 x = y->right;
2410 where left and right are typically adjacent pointers in a tree structure. */
2412 namespace {
2414 const pass_data pass_data_phiopt =
2416 GIMPLE_PASS, /* type */
2417 "phiopt", /* name */
2418 OPTGROUP_NONE, /* optinfo_flags */
2419 TV_TREE_PHIOPT, /* tv_id */
2420 ( PROP_cfg | PROP_ssa ), /* properties_required */
2421 0, /* properties_provided */
2422 0, /* properties_destroyed */
2423 0, /* todo_flags_start */
2424 0, /* todo_flags_finish */
2427 class pass_phiopt : public gimple_opt_pass
2429 public:
2430 pass_phiopt (gcc::context *ctxt)
2431 : gimple_opt_pass (pass_data_phiopt, ctxt)
2434 /* opt_pass methods: */
2435 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2436 virtual bool gate (function *) { return flag_ssa_phiopt; }
2437 virtual unsigned int execute (function *)
2439 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2442 }; // class pass_phiopt
2444 } // anon namespace
2446 gimple_opt_pass *
2447 make_pass_phiopt (gcc::context *ctxt)
2449 return new pass_phiopt (ctxt);
2452 namespace {
2454 const pass_data pass_data_cselim =
2456 GIMPLE_PASS, /* type */
2457 "cselim", /* name */
2458 OPTGROUP_NONE, /* optinfo_flags */
2459 TV_TREE_PHIOPT, /* tv_id */
2460 ( PROP_cfg | PROP_ssa ), /* properties_required */
2461 0, /* properties_provided */
2462 0, /* properties_destroyed */
2463 0, /* todo_flags_start */
2464 0, /* todo_flags_finish */
2467 class pass_cselim : public gimple_opt_pass
2469 public:
2470 pass_cselim (gcc::context *ctxt)
2471 : gimple_opt_pass (pass_data_cselim, ctxt)
2474 /* opt_pass methods: */
2475 virtual bool gate (function *) { return flag_tree_cselim; }
2476 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2478 }; // class pass_cselim
2480 } // anon namespace
2482 gimple_opt_pass *
2483 make_pass_cselim (gcc::context *ctxt)
2485 return new pass_cselim (ctxt);