Clean up some minor white space issues in trans-decl.c and trans-expr.c
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blob8da7a5c7f59637c592d1b76335d512f50021b449
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "params.h"
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block, basic_block,
51 edge, edge, gphi *, tree, tree);
52 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree);
53 static int value_replacement (basic_block, basic_block,
54 edge, edge, gimple *, tree, tree);
55 static bool minmax_replacement (basic_block, basic_block,
56 edge, edge, gimple *, tree, tree);
57 static bool abs_replacement (basic_block, basic_block,
58 edge, edge, gimple *, tree, tree);
59 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
60 hash_set<tree> *);
61 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
62 static hash_set<tree> * get_non_trapping ();
63 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
64 static void hoist_adjacent_loads (basic_block, basic_block,
65 basic_block, basic_block);
66 static bool gate_hoist_loads (void);
68 /* This pass tries to transform conditional stores into unconditional
69 ones, enabling further simplifications with the simpler then and else
70 blocks. In particular it replaces this:
72 bb0:
73 if (cond) goto bb2; else goto bb1;
74 bb1:
75 *p = RHS;
76 bb2:
78 with
80 bb0:
81 if (cond) goto bb1; else goto bb2;
82 bb1:
83 condtmp' = *p;
84 bb2:
85 condtmp = PHI <RHS, condtmp'>
86 *p = condtmp;
88 This transformation can only be done under several constraints,
89 documented below. It also replaces:
91 bb0:
92 if (cond) goto bb2; else goto bb1;
93 bb1:
94 *p = RHS1;
95 goto bb3;
96 bb2:
97 *p = RHS2;
98 bb3:
100 with
102 bb0:
103 if (cond) goto bb3; else goto bb1;
104 bb1:
105 bb3:
106 condtmp = PHI <RHS1, RHS2>
107 *p = condtmp; */
109 static unsigned int
110 tree_ssa_cs_elim (void)
112 unsigned todo;
113 /* ??? We are not interested in loop related info, but the following
114 will create it, ICEing as we didn't init loops with pre-headers.
115 An interfacing issue of find_data_references_in_bb. */
116 loop_optimizer_init (LOOPS_NORMAL);
117 scev_initialize ();
118 todo = tree_ssa_phiopt_worker (true, false);
119 scev_finalize ();
120 loop_optimizer_finalize ();
121 return todo;
124 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
126 static gphi *
127 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
129 gimple_stmt_iterator i;
130 gphi *phi = NULL;
131 if (gimple_seq_singleton_p (seq))
132 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
133 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
135 gphi *p = as_a <gphi *> (gsi_stmt (i));
136 /* If the PHI arguments are equal then we can skip this PHI. */
137 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
138 gimple_phi_arg_def (p, e1->dest_idx)))
139 continue;
141 /* If we already have a PHI that has the two edge arguments are
142 different, then return it is not a singleton for these PHIs. */
143 if (phi)
144 return NULL;
146 phi = p;
148 return phi;
151 /* The core routine of conditional store replacement and normal
152 phi optimizations. Both share much of the infrastructure in how
153 to match applicable basic block patterns. DO_STORE_ELIM is true
154 when we want to do conditional store replacement, false otherwise.
155 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
156 of diamond control flow patterns, false otherwise. */
157 static unsigned int
158 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
160 basic_block bb;
161 basic_block *bb_order;
162 unsigned n, i;
163 bool cfgchanged = false;
164 hash_set<tree> *nontrap = 0;
166 if (do_store_elim)
167 /* Calculate the set of non-trapping memory accesses. */
168 nontrap = get_non_trapping ();
170 /* Search every basic block for COND_EXPR we may be able to optimize.
172 We walk the blocks in order that guarantees that a block with
173 a single predecessor is processed before the predecessor.
174 This ensures that we collapse inner ifs before visiting the
175 outer ones, and also that we do not try to visit a removed
176 block. */
177 bb_order = single_pred_before_succ_order ();
178 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
180 for (i = 0; i < n; i++)
182 gimple *cond_stmt;
183 gphi *phi;
184 basic_block bb1, bb2;
185 edge e1, e2;
186 tree arg0, arg1;
188 bb = bb_order[i];
190 cond_stmt = last_stmt (bb);
191 /* Check to see if the last statement is a GIMPLE_COND. */
192 if (!cond_stmt
193 || gimple_code (cond_stmt) != GIMPLE_COND)
194 continue;
196 e1 = EDGE_SUCC (bb, 0);
197 bb1 = e1->dest;
198 e2 = EDGE_SUCC (bb, 1);
199 bb2 = e2->dest;
201 /* We cannot do the optimization on abnormal edges. */
202 if ((e1->flags & EDGE_ABNORMAL) != 0
203 || (e2->flags & EDGE_ABNORMAL) != 0)
204 continue;
206 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
207 if (EDGE_COUNT (bb1->succs) == 0
208 || bb2 == NULL
209 || EDGE_COUNT (bb2->succs) == 0)
210 continue;
212 /* Find the bb which is the fall through to the other. */
213 if (EDGE_SUCC (bb1, 0)->dest == bb2)
215 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
217 std::swap (bb1, bb2);
218 std::swap (e1, e2);
220 else if (do_store_elim
221 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
223 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
225 if (!single_succ_p (bb1)
226 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
227 || !single_succ_p (bb2)
228 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
229 || EDGE_COUNT (bb3->preds) != 2)
230 continue;
231 if (cond_if_else_store_replacement (bb1, bb2, bb3))
232 cfgchanged = true;
233 continue;
235 else if (do_hoist_loads
236 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
238 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
240 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
241 && single_succ_p (bb1)
242 && single_succ_p (bb2)
243 && single_pred_p (bb1)
244 && single_pred_p (bb2)
245 && EDGE_COUNT (bb->succs) == 2
246 && EDGE_COUNT (bb3->preds) == 2
247 /* If one edge or the other is dominant, a conditional move
248 is likely to perform worse than the well-predicted branch. */
249 && !predictable_edge_p (EDGE_SUCC (bb, 0))
250 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
251 hoist_adjacent_loads (bb, bb1, bb2, bb3);
252 continue;
254 else
255 continue;
257 e1 = EDGE_SUCC (bb1, 0);
259 /* Make sure that bb1 is just a fall through. */
260 if (!single_succ_p (bb1)
261 || (e1->flags & EDGE_FALLTHRU) == 0)
262 continue;
264 /* Also make sure that bb1 only have one predecessor and that it
265 is bb. */
266 if (!single_pred_p (bb1)
267 || single_pred (bb1) != bb)
268 continue;
270 if (do_store_elim)
272 /* bb1 is the middle block, bb2 the join block, bb the split block,
273 e1 the fallthrough edge from bb1 to bb2. We can't do the
274 optimization if the join block has more than two predecessors. */
275 if (EDGE_COUNT (bb2->preds) > 2)
276 continue;
277 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
278 cfgchanged = true;
280 else
282 gimple_seq phis = phi_nodes (bb2);
283 gimple_stmt_iterator gsi;
284 bool candorest = true;
286 /* Value replacement can work with more than one PHI
287 so try that first. */
288 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
290 phi = as_a <gphi *> (gsi_stmt (gsi));
291 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
292 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
293 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
295 candorest = false;
296 cfgchanged = true;
297 break;
301 if (!candorest)
302 continue;
304 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
305 if (!phi)
306 continue;
308 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
309 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
311 /* Something is wrong if we cannot find the arguments in the PHI
312 node. */
313 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
315 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
316 arg0, arg1);
317 if (newphi != NULL)
319 phi = newphi;
320 /* factor_out_conditional_conversion may create a new PHI in
321 BB2 and eliminate an existing PHI in BB2. Recompute values
322 that may be affected by that change. */
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
328 /* Do the replacement of conditional if it can be done. */
329 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
330 cfgchanged = true;
331 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332 cfgchanged = true;
333 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334 cfgchanged = true;
338 free (bb_order);
340 if (do_store_elim)
341 delete nontrap;
342 /* If the CFG has changed, we should cleanup the CFG. */
343 if (cfgchanged && do_store_elim)
345 /* In cond-store replacement we have added some loads on edges
346 and new VOPS (as we moved the store, and created a load). */
347 gsi_commit_edge_inserts ();
348 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
350 else if (cfgchanged)
351 return TODO_cleanup_cfg;
352 return 0;
355 /* Replace PHI node element whose edge is E in block BB with variable NEW.
356 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
357 is known to have two edges, one of which must reach BB). */
359 static void
360 replace_phi_edge_with_variable (basic_block cond_block,
361 edge e, gimple *phi, tree new_tree)
363 basic_block bb = gimple_bb (phi);
364 basic_block block_to_remove;
365 gimple_stmt_iterator gsi;
367 /* Change the PHI argument to new. */
368 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
370 /* Remove the empty basic block. */
371 if (EDGE_SUCC (cond_block, 0)->dest == bb)
373 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
374 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
375 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
376 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
378 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
380 else
382 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
383 EDGE_SUCC (cond_block, 1)->flags
384 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
385 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
386 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
388 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
390 delete_basic_block (block_to_remove);
392 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393 gsi = gsi_last_bb (cond_block);
394 gsi_remove (&gsi, true);
396 if (dump_file && (dump_flags & TDF_DETAILS))
397 fprintf (dump_file,
398 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
399 cond_block->index,
400 bb->index);
403 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405 to the result of PHI stmt. Return the newly-created PHI, if any. */
407 static gphi *
408 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
409 tree arg0, tree arg1)
411 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
412 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
413 tree temp, result;
414 gphi *newphi;
415 gimple_stmt_iterator gsi, gsi_for_def;
416 source_location locus = gimple_location (phi);
417 enum tree_code convert_code;
419 /* Handle only PHI statements with two arguments. TODO: If all
420 other arguments to PHI are INTEGER_CST or if their defining
421 statement have the same unary operation, we can handle more
422 than two arguments too. */
423 if (gimple_phi_num_args (phi) != 2)
424 return NULL;
426 /* First canonicalize to simplify tests. */
427 if (TREE_CODE (arg0) != SSA_NAME)
429 std::swap (arg0, arg1);
430 std::swap (e0, e1);
433 if (TREE_CODE (arg0) != SSA_NAME
434 || (TREE_CODE (arg1) != SSA_NAME
435 && TREE_CODE (arg1) != INTEGER_CST))
436 return NULL;
438 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
439 a conversion. */
440 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
441 if (!is_gimple_assign (arg0_def_stmt)
442 || !gimple_assign_cast_p (arg0_def_stmt))
443 return NULL;
445 /* Use the RHS as new_arg0. */
446 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
447 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
448 if (convert_code == VIEW_CONVERT_EXPR)
449 new_arg0 = TREE_OPERAND (new_arg0, 0);
451 if (TREE_CODE (arg1) == SSA_NAME)
453 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
454 is a conversion. */
455 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
456 if (!is_gimple_assign (arg1_def_stmt)
457 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
458 return NULL;
460 /* Use the RHS as new_arg1. */
461 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
462 if (convert_code == VIEW_CONVERT_EXPR)
463 new_arg1 = TREE_OPERAND (new_arg1, 0);
465 else
467 /* If arg1 is an INTEGER_CST, fold it to new type. */
468 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
469 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
471 if (gimple_assign_cast_p (arg0_def_stmt))
472 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
473 else
474 return NULL;
476 else
477 return NULL;
480 /* If arg0/arg1 have > 1 use, then this transformation actually increases
481 the number of expressions evaluated at runtime. */
482 if (!has_single_use (arg0)
483 || (arg1_def_stmt && !has_single_use (arg1)))
484 return NULL;
486 /* If types of new_arg0 and new_arg1 are different bailout. */
487 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
488 return NULL;
490 /* Create a new PHI stmt. */
491 result = PHI_RESULT (phi);
492 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
493 newphi = create_phi_node (temp, gimple_bb (phi));
495 if (dump_file && (dump_flags & TDF_DETAILS))
497 fprintf (dump_file, "PHI ");
498 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
499 fprintf (dump_file,
500 " changed to factor conversion out from COND_EXPR.\n");
501 fprintf (dump_file, "New stmt with CAST that defines ");
502 print_generic_expr (dump_file, result, 0);
503 fprintf (dump_file, ".\n");
506 /* Remove the old cast(s) that has single use. */
507 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
508 gsi_remove (&gsi_for_def, true);
509 release_defs (arg0_def_stmt);
511 if (arg1_def_stmt)
513 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
514 gsi_remove (&gsi_for_def, true);
515 release_defs (arg1_def_stmt);
518 add_phi_arg (newphi, new_arg0, e0, locus);
519 add_phi_arg (newphi, new_arg1, e1, locus);
521 /* Create the conversion stmt and insert it. */
522 if (convert_code == VIEW_CONVERT_EXPR)
523 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
524 new_stmt = gimple_build_assign (result, convert_code, temp);
525 gsi = gsi_after_labels (gimple_bb (phi));
526 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
528 /* Remove the original PHI stmt. */
529 gsi = gsi_for_stmt (phi);
530 gsi_remove (&gsi, true);
531 return newphi;
534 /* The function conditional_replacement does the main work of doing the
535 conditional replacement. Return true if the replacement is done.
536 Otherwise return false.
537 BB is the basic block where the replacement is going to be done on. ARG0
538 is argument 0 from PHI. Likewise for ARG1. */
540 static bool
541 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
542 edge e0, edge e1, gphi *phi,
543 tree arg0, tree arg1)
545 tree result;
546 gimple *stmt;
547 gassign *new_stmt;
548 tree cond;
549 gimple_stmt_iterator gsi;
550 edge true_edge, false_edge;
551 tree new_var, new_var2;
552 bool neg;
554 /* FIXME: Gimplification of complex type is too hard for now. */
555 /* We aren't prepared to handle vectors either (and it is a question
556 if it would be worthwhile anyway). */
557 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
558 || POINTER_TYPE_P (TREE_TYPE (arg0)))
559 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
560 || POINTER_TYPE_P (TREE_TYPE (arg1))))
561 return false;
563 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
564 convert it to the conditional. */
565 if ((integer_zerop (arg0) && integer_onep (arg1))
566 || (integer_zerop (arg1) && integer_onep (arg0)))
567 neg = false;
568 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
569 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
570 neg = true;
571 else
572 return false;
574 if (!empty_block_p (middle_bb))
575 return false;
577 /* At this point we know we have a GIMPLE_COND with two successors.
578 One successor is BB, the other successor is an empty block which
579 falls through into BB.
581 There is a single PHI node at the join point (BB) and its arguments
582 are constants (0, 1) or (0, -1).
584 So, given the condition COND, and the two PHI arguments, we can
585 rewrite this PHI into non-branching code:
587 dest = (COND) or dest = COND'
589 We use the condition as-is if the argument associated with the
590 true edge has the value one or the argument associated with the
591 false edge as the value zero. Note that those conditions are not
592 the same since only one of the outgoing edges from the GIMPLE_COND
593 will directly reach BB and thus be associated with an argument. */
595 stmt = last_stmt (cond_bb);
596 result = PHI_RESULT (phi);
598 /* To handle special cases like floating point comparison, it is easier and
599 less error-prone to build a tree and gimplify it on the fly though it is
600 less efficient. */
601 cond = fold_build2_loc (gimple_location (stmt),
602 gimple_cond_code (stmt), boolean_type_node,
603 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
605 /* We need to know which is the true edge and which is the false
606 edge so that we know when to invert the condition below. */
607 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
608 if ((e0 == true_edge && integer_zerop (arg0))
609 || (e0 == false_edge && !integer_zerop (arg0))
610 || (e1 == true_edge && integer_zerop (arg1))
611 || (e1 == false_edge && !integer_zerop (arg1)))
612 cond = fold_build1_loc (gimple_location (stmt),
613 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
615 if (neg)
617 cond = fold_convert_loc (gimple_location (stmt),
618 TREE_TYPE (result), cond);
619 cond = fold_build1_loc (gimple_location (stmt),
620 NEGATE_EXPR, TREE_TYPE (cond), cond);
623 /* Insert our new statements at the end of conditional block before the
624 COND_STMT. */
625 gsi = gsi_for_stmt (stmt);
626 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
627 GSI_SAME_STMT);
629 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
631 source_location locus_0, locus_1;
633 new_var2 = make_ssa_name (TREE_TYPE (result));
634 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
635 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
636 new_var = new_var2;
638 /* Set the locus to the first argument, unless is doesn't have one. */
639 locus_0 = gimple_phi_arg_location (phi, 0);
640 locus_1 = gimple_phi_arg_location (phi, 1);
641 if (locus_0 == UNKNOWN_LOCATION)
642 locus_0 = locus_1;
643 gimple_set_location (new_stmt, locus_0);
646 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
647 reset_flow_sensitive_info_in_bb (cond_bb);
649 /* Note that we optimized this PHI. */
650 return true;
653 /* Update *ARG which is defined in STMT so that it contains the
654 computed value if that seems profitable. Return true if the
655 statement is made dead by that rewriting. */
657 static bool
658 jump_function_from_stmt (tree *arg, gimple *stmt)
660 enum tree_code code = gimple_assign_rhs_code (stmt);
661 if (code == ADDR_EXPR)
663 /* For arg = &p->i transform it to p, if possible. */
664 tree rhs1 = gimple_assign_rhs1 (stmt);
665 HOST_WIDE_INT offset;
666 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
667 &offset);
668 if (tem
669 && TREE_CODE (tem) == MEM_REF
670 && (mem_ref_offset (tem) + offset) == 0)
672 *arg = TREE_OPERAND (tem, 0);
673 return true;
676 /* TODO: Much like IPA-CP jump-functions we want to handle constant
677 additions symbolically here, and we'd need to update the comparison
678 code that compares the arg + cst tuples in our caller. For now the
679 code above exactly handles the VEC_BASE pattern from vec.h. */
680 return false;
683 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
684 of the form SSA_NAME NE 0.
686 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
687 the two input values of the EQ_EXPR match arg0 and arg1.
689 If so update *code and return TRUE. Otherwise return FALSE. */
691 static bool
692 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
693 enum tree_code *code, const_tree rhs)
695 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
696 statement. */
697 if (TREE_CODE (rhs) == SSA_NAME)
699 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
701 /* Verify the defining statement has an EQ_EXPR on the RHS. */
702 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
704 /* Finally verify the source operands of the EQ_EXPR are equal
705 to arg0 and arg1. */
706 tree op0 = gimple_assign_rhs1 (def1);
707 tree op1 = gimple_assign_rhs2 (def1);
708 if ((operand_equal_for_phi_arg_p (arg0, op0)
709 && operand_equal_for_phi_arg_p (arg1, op1))
710 || (operand_equal_for_phi_arg_p (arg0, op1)
711 && operand_equal_for_phi_arg_p (arg1, op0)))
713 /* We will perform the optimization. */
714 *code = gimple_assign_rhs_code (def1);
715 return true;
719 return false;
722 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
724 Also return TRUE if arg0/arg1 are equal to the source arguments of a
725 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
727 Return FALSE otherwise. */
729 static bool
730 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
731 enum tree_code *code, gimple *cond)
733 gimple *def;
734 tree lhs = gimple_cond_lhs (cond);
735 tree rhs = gimple_cond_rhs (cond);
737 if ((operand_equal_for_phi_arg_p (arg0, lhs)
738 && operand_equal_for_phi_arg_p (arg1, rhs))
739 || (operand_equal_for_phi_arg_p (arg1, lhs)
740 && operand_equal_for_phi_arg_p (arg0, rhs)))
741 return true;
743 /* Now handle more complex case where we have an EQ comparison
744 which feeds a BIT_AND_EXPR which feeds COND.
746 First verify that COND is of the form SSA_NAME NE 0. */
747 if (*code != NE_EXPR || !integer_zerop (rhs)
748 || TREE_CODE (lhs) != SSA_NAME)
749 return false;
751 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
752 def = SSA_NAME_DEF_STMT (lhs);
753 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
754 return false;
756 /* Now verify arg0/arg1 correspond to the source arguments of an
757 EQ comparison feeding the BIT_AND_EXPR. */
759 tree tmp = gimple_assign_rhs1 (def);
760 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
761 return true;
763 tmp = gimple_assign_rhs2 (def);
764 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
765 return true;
767 return false;
770 /* Returns true if ARG is a neutral element for operation CODE
771 on the RIGHT side. */
773 static bool
774 neutral_element_p (tree_code code, tree arg, bool right)
776 switch (code)
778 case PLUS_EXPR:
779 case BIT_IOR_EXPR:
780 case BIT_XOR_EXPR:
781 return integer_zerop (arg);
783 case LROTATE_EXPR:
784 case RROTATE_EXPR:
785 case LSHIFT_EXPR:
786 case RSHIFT_EXPR:
787 case MINUS_EXPR:
788 case POINTER_PLUS_EXPR:
789 return right && integer_zerop (arg);
791 case MULT_EXPR:
792 return integer_onep (arg);
794 case TRUNC_DIV_EXPR:
795 case CEIL_DIV_EXPR:
796 case FLOOR_DIV_EXPR:
797 case ROUND_DIV_EXPR:
798 case EXACT_DIV_EXPR:
799 return right && integer_onep (arg);
801 case BIT_AND_EXPR:
802 return integer_all_onesp (arg);
804 default:
805 return false;
809 /* Returns true if ARG is an absorbing element for operation CODE. */
811 static bool
812 absorbing_element_p (tree_code code, tree arg)
814 switch (code)
816 case BIT_IOR_EXPR:
817 return integer_all_onesp (arg);
819 case MULT_EXPR:
820 case BIT_AND_EXPR:
821 return integer_zerop (arg);
823 default:
824 return false;
828 /* The function value_replacement does the main work of doing the value
829 replacement. Return non-zero if the replacement is done. Otherwise return
830 0. If we remove the middle basic block, return 2.
831 BB is the basic block where the replacement is going to be done on. ARG0
832 is argument 0 from the PHI. Likewise for ARG1. */
834 static int
835 value_replacement (basic_block cond_bb, basic_block middle_bb,
836 edge e0, edge e1, gimple *phi,
837 tree arg0, tree arg1)
839 gimple_stmt_iterator gsi;
840 gimple *cond;
841 edge true_edge, false_edge;
842 enum tree_code code;
843 bool emtpy_or_with_defined_p = true;
845 /* If the type says honor signed zeros we cannot do this
846 optimization. */
847 if (HONOR_SIGNED_ZEROS (arg1))
848 return 0;
850 /* If there is a statement in MIDDLE_BB that defines one of the PHI
851 arguments, then adjust arg0 or arg1. */
852 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
853 while (!gsi_end_p (gsi))
855 gimple *stmt = gsi_stmt (gsi);
856 tree lhs;
857 gsi_next_nondebug (&gsi);
858 if (!is_gimple_assign (stmt))
860 emtpy_or_with_defined_p = false;
861 continue;
863 /* Now try to adjust arg0 or arg1 according to the computation
864 in the statement. */
865 lhs = gimple_assign_lhs (stmt);
866 if (!(lhs == arg0
867 && jump_function_from_stmt (&arg0, stmt))
868 || (lhs == arg1
869 && jump_function_from_stmt (&arg1, stmt)))
870 emtpy_or_with_defined_p = false;
873 cond = last_stmt (cond_bb);
874 code = gimple_cond_code (cond);
876 /* This transformation is only valid for equality comparisons. */
877 if (code != NE_EXPR && code != EQ_EXPR)
878 return 0;
880 /* We need to know which is the true edge and which is the false
881 edge so that we know if have abs or negative abs. */
882 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
884 /* At this point we know we have a COND_EXPR with two successors.
885 One successor is BB, the other successor is an empty block which
886 falls through into BB.
888 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
890 There is a single PHI node at the join point (BB) with two arguments.
892 We now need to verify that the two arguments in the PHI node match
893 the two arguments to the equality comparison. */
895 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
897 edge e;
898 tree arg;
900 /* For NE_EXPR, we want to build an assignment result = arg where
901 arg is the PHI argument associated with the true edge. For
902 EQ_EXPR we want the PHI argument associated with the false edge. */
903 e = (code == NE_EXPR ? true_edge : false_edge);
905 /* Unfortunately, E may not reach BB (it may instead have gone to
906 OTHER_BLOCK). If that is the case, then we want the single outgoing
907 edge from OTHER_BLOCK which reaches BB and represents the desired
908 path from COND_BLOCK. */
909 if (e->dest == middle_bb)
910 e = single_succ_edge (e->dest);
912 /* Now we know the incoming edge to BB that has the argument for the
913 RHS of our new assignment statement. */
914 if (e0 == e)
915 arg = arg0;
916 else
917 arg = arg1;
919 /* If the middle basic block was empty or is defining the
920 PHI arguments and this is a single phi where the args are different
921 for the edges e0 and e1 then we can remove the middle basic block. */
922 if (emtpy_or_with_defined_p
923 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
924 e0, e1) == phi)
926 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
927 /* Note that we optimized this PHI. */
928 return 2;
930 else
932 /* Replace the PHI arguments with arg. */
933 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
934 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
935 if (dump_file && (dump_flags & TDF_DETAILS))
937 fprintf (dump_file, "PHI ");
938 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
939 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
940 cond_bb->index);
941 print_generic_expr (dump_file, arg, 0);
942 fprintf (dump_file, ".\n");
944 return 1;
949 /* Now optimize (x != 0) ? x + y : y to just y.
950 The following condition is too restrictive, there can easily be another
951 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
952 gimple *assign = last_and_only_stmt (middle_bb);
953 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
954 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
955 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
956 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
957 return 0;
959 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
960 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
961 return 0;
963 /* Only transform if it removes the condition. */
964 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
965 return 0;
967 /* Size-wise, this is always profitable. */
968 if (optimize_bb_for_speed_p (cond_bb)
969 /* The special case is useless if it has a low probability. */
970 && profile_status_for_fn (cfun) != PROFILE_ABSENT
971 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
972 /* If assign is cheap, there is no point avoiding it. */
973 && estimate_num_insns (assign, &eni_time_weights)
974 >= 3 * estimate_num_insns (cond, &eni_time_weights))
975 return 0;
977 tree lhs = gimple_assign_lhs (assign);
978 tree rhs1 = gimple_assign_rhs1 (assign);
979 tree rhs2 = gimple_assign_rhs2 (assign);
980 enum tree_code code_def = gimple_assign_rhs_code (assign);
981 tree cond_lhs = gimple_cond_lhs (cond);
982 tree cond_rhs = gimple_cond_rhs (cond);
984 if (((code == NE_EXPR && e1 == false_edge)
985 || (code == EQ_EXPR && e1 == true_edge))
986 && arg0 == lhs
987 && ((arg1 == rhs1
988 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
989 && neutral_element_p (code_def, cond_rhs, true))
990 || (arg1 == rhs2
991 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
992 && neutral_element_p (code_def, cond_rhs, false))
993 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
994 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
995 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
996 && absorbing_element_p (code_def, cond_rhs))))
998 gsi = gsi_for_stmt (cond);
999 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1001 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1002 def-stmt in:
1003 if (n_5 != 0)
1004 goto <bb 3>;
1005 else
1006 goto <bb 4>;
1008 <bb 3>:
1009 # RANGE [0, 4294967294]
1010 u_6 = n_5 + 4294967295;
1012 <bb 4>:
1013 # u_3 = PHI <u_6(3), 4294967295(2)> */
1014 SSA_NAME_RANGE_INFO (lhs) = NULL;
1015 /* If available, we can use VR of phi result at least. */
1016 tree phires = gimple_phi_result (phi);
1017 struct range_info_def *phires_range_info
1018 = SSA_NAME_RANGE_INFO (phires);
1019 if (phires_range_info)
1020 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1021 phires_range_info);
1023 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1024 gsi_move_before (&gsi_from, &gsi);
1025 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1026 return 2;
1029 return 0;
1032 /* The function minmax_replacement does the main work of doing the minmax
1033 replacement. Return true if the replacement is done. Otherwise return
1034 false.
1035 BB is the basic block where the replacement is going to be done on. ARG0
1036 is argument 0 from the PHI. Likewise for ARG1. */
1038 static bool
1039 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1040 edge e0, edge e1, gimple *phi,
1041 tree arg0, tree arg1)
1043 tree result, type;
1044 gcond *cond;
1045 gassign *new_stmt;
1046 edge true_edge, false_edge;
1047 enum tree_code cmp, minmax, ass_code;
1048 tree smaller, larger, arg_true, arg_false;
1049 gimple_stmt_iterator gsi, gsi_from;
1051 type = TREE_TYPE (PHI_RESULT (phi));
1053 /* The optimization may be unsafe due to NaNs. */
1054 if (HONOR_NANS (type))
1055 return false;
1057 cond = as_a <gcond *> (last_stmt (cond_bb));
1058 cmp = gimple_cond_code (cond);
1060 /* This transformation is only valid for order comparisons. Record which
1061 operand is smaller/larger if the result of the comparison is true. */
1062 if (cmp == LT_EXPR || cmp == LE_EXPR)
1064 smaller = gimple_cond_lhs (cond);
1065 larger = gimple_cond_rhs (cond);
1067 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1069 smaller = gimple_cond_rhs (cond);
1070 larger = gimple_cond_lhs (cond);
1072 else
1073 return false;
1075 /* We need to know which is the true edge and which is the false
1076 edge so that we know if have abs or negative abs. */
1077 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1079 /* Forward the edges over the middle basic block. */
1080 if (true_edge->dest == middle_bb)
1081 true_edge = EDGE_SUCC (true_edge->dest, 0);
1082 if (false_edge->dest == middle_bb)
1083 false_edge = EDGE_SUCC (false_edge->dest, 0);
1085 if (true_edge == e0)
1087 gcc_assert (false_edge == e1);
1088 arg_true = arg0;
1089 arg_false = arg1;
1091 else
1093 gcc_assert (false_edge == e0);
1094 gcc_assert (true_edge == e1);
1095 arg_true = arg1;
1096 arg_false = arg0;
1099 if (empty_block_p (middle_bb))
1101 if (operand_equal_for_phi_arg_p (arg_true, smaller)
1102 && operand_equal_for_phi_arg_p (arg_false, larger))
1104 /* Case
1106 if (smaller < larger)
1107 rslt = smaller;
1108 else
1109 rslt = larger; */
1110 minmax = MIN_EXPR;
1112 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1113 && operand_equal_for_phi_arg_p (arg_true, larger))
1114 minmax = MAX_EXPR;
1115 else
1116 return false;
1118 else
1120 /* Recognize the following case, assuming d <= u:
1122 if (a <= u)
1123 b = MAX (a, d);
1124 x = PHI <b, u>
1126 This is equivalent to
1128 b = MAX (a, d);
1129 x = MIN (b, u); */
1131 gimple *assign = last_and_only_stmt (middle_bb);
1132 tree lhs, op0, op1, bound;
1134 if (!assign
1135 || gimple_code (assign) != GIMPLE_ASSIGN)
1136 return false;
1138 lhs = gimple_assign_lhs (assign);
1139 ass_code = gimple_assign_rhs_code (assign);
1140 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1141 return false;
1142 op0 = gimple_assign_rhs1 (assign);
1143 op1 = gimple_assign_rhs2 (assign);
1145 if (true_edge->src == middle_bb)
1147 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1148 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1149 return false;
1151 if (operand_equal_for_phi_arg_p (arg_false, larger))
1153 /* Case
1155 if (smaller < larger)
1157 r' = MAX_EXPR (smaller, bound)
1159 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1160 if (ass_code != MAX_EXPR)
1161 return false;
1163 minmax = MIN_EXPR;
1164 if (operand_equal_for_phi_arg_p (op0, smaller))
1165 bound = op1;
1166 else if (operand_equal_for_phi_arg_p (op1, smaller))
1167 bound = op0;
1168 else
1169 return false;
1171 /* We need BOUND <= LARGER. */
1172 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1173 bound, larger)))
1174 return false;
1176 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1178 /* Case
1180 if (smaller < larger)
1182 r' = MIN_EXPR (larger, bound)
1184 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1185 if (ass_code != MIN_EXPR)
1186 return false;
1188 minmax = MAX_EXPR;
1189 if (operand_equal_for_phi_arg_p (op0, larger))
1190 bound = op1;
1191 else if (operand_equal_for_phi_arg_p (op1, larger))
1192 bound = op0;
1193 else
1194 return false;
1196 /* We need BOUND >= SMALLER. */
1197 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1198 bound, smaller)))
1199 return false;
1201 else
1202 return false;
1204 else
1206 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1207 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1208 return false;
1210 if (operand_equal_for_phi_arg_p (arg_true, larger))
1212 /* Case
1214 if (smaller > larger)
1216 r' = MIN_EXPR (smaller, bound)
1218 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1219 if (ass_code != MIN_EXPR)
1220 return false;
1222 minmax = MAX_EXPR;
1223 if (operand_equal_for_phi_arg_p (op0, smaller))
1224 bound = op1;
1225 else if (operand_equal_for_phi_arg_p (op1, smaller))
1226 bound = op0;
1227 else
1228 return false;
1230 /* We need BOUND >= LARGER. */
1231 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1232 bound, larger)))
1233 return false;
1235 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1237 /* Case
1239 if (smaller > larger)
1241 r' = MAX_EXPR (larger, bound)
1243 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1244 if (ass_code != MAX_EXPR)
1245 return false;
1247 minmax = MIN_EXPR;
1248 if (operand_equal_for_phi_arg_p (op0, larger))
1249 bound = op1;
1250 else if (operand_equal_for_phi_arg_p (op1, larger))
1251 bound = op0;
1252 else
1253 return false;
1255 /* We need BOUND <= SMALLER. */
1256 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1257 bound, smaller)))
1258 return false;
1260 else
1261 return false;
1264 /* Move the statement from the middle block. */
1265 gsi = gsi_last_bb (cond_bb);
1266 gsi_from = gsi_last_nondebug_bb (middle_bb);
1267 gsi_move_before (&gsi_from, &gsi);
1270 /* Create an SSA var to hold the min/max result. If we're the only
1271 things setting the target PHI, then we can clone the PHI
1272 variable. Otherwise we must create a new one. */
1273 result = PHI_RESULT (phi);
1274 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1275 result = duplicate_ssa_name (result, NULL);
1276 else
1277 result = make_ssa_name (TREE_TYPE (result));
1279 /* Emit the statement to compute min/max. */
1280 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1281 gsi = gsi_last_bb (cond_bb);
1282 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1284 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1285 reset_flow_sensitive_info_in_bb (cond_bb);
1287 return true;
1290 /* The function absolute_replacement does the main work of doing the absolute
1291 replacement. Return true if the replacement is done. Otherwise return
1292 false.
1293 bb is the basic block where the replacement is going to be done on. arg0
1294 is argument 0 from the phi. Likewise for arg1. */
1296 static bool
1297 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1298 edge e0 ATTRIBUTE_UNUSED, edge e1,
1299 gimple *phi, tree arg0, tree arg1)
1301 tree result;
1302 gassign *new_stmt;
1303 gimple *cond;
1304 gimple_stmt_iterator gsi;
1305 edge true_edge, false_edge;
1306 gimple *assign;
1307 edge e;
1308 tree rhs, lhs;
1309 bool negate;
1310 enum tree_code cond_code;
1312 /* If the type says honor signed zeros we cannot do this
1313 optimization. */
1314 if (HONOR_SIGNED_ZEROS (arg1))
1315 return false;
1317 /* OTHER_BLOCK must have only one executable statement which must have the
1318 form arg0 = -arg1 or arg1 = -arg0. */
1320 assign = last_and_only_stmt (middle_bb);
1321 /* If we did not find the proper negation assignment, then we can not
1322 optimize. */
1323 if (assign == NULL)
1324 return false;
1326 /* If we got here, then we have found the only executable statement
1327 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1328 arg1 = -arg0, then we can not optimize. */
1329 if (gimple_code (assign) != GIMPLE_ASSIGN)
1330 return false;
1332 lhs = gimple_assign_lhs (assign);
1334 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1335 return false;
1337 rhs = gimple_assign_rhs1 (assign);
1339 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1340 if (!(lhs == arg0 && rhs == arg1)
1341 && !(lhs == arg1 && rhs == arg0))
1342 return false;
1344 cond = last_stmt (cond_bb);
1345 result = PHI_RESULT (phi);
1347 /* Only relationals comparing arg[01] against zero are interesting. */
1348 cond_code = gimple_cond_code (cond);
1349 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1350 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1351 return false;
1353 /* Make sure the conditional is arg[01] OP y. */
1354 if (gimple_cond_lhs (cond) != rhs)
1355 return false;
1357 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1358 ? real_zerop (gimple_cond_rhs (cond))
1359 : integer_zerop (gimple_cond_rhs (cond)))
1361 else
1362 return false;
1364 /* We need to know which is the true edge and which is the false
1365 edge so that we know if have abs or negative abs. */
1366 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1368 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1369 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1370 the false edge goes to OTHER_BLOCK. */
1371 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1372 e = true_edge;
1373 else
1374 e = false_edge;
1376 if (e->dest == middle_bb)
1377 negate = true;
1378 else
1379 negate = false;
1381 result = duplicate_ssa_name (result, NULL);
1383 if (negate)
1384 lhs = make_ssa_name (TREE_TYPE (result));
1385 else
1386 lhs = result;
1388 /* Build the modify expression with abs expression. */
1389 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1391 gsi = gsi_last_bb (cond_bb);
1392 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1394 if (negate)
1396 /* Get the right GSI. We want to insert after the recently
1397 added ABS_EXPR statement (which we know is the first statement
1398 in the block. */
1399 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1401 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1404 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1405 reset_flow_sensitive_info_in_bb (cond_bb);
1407 /* Note that we optimized this PHI. */
1408 return true;
1411 /* Auxiliary functions to determine the set of memory accesses which
1412 can't trap because they are preceded by accesses to the same memory
1413 portion. We do that for MEM_REFs, so we only need to track
1414 the SSA_NAME of the pointer indirectly referenced. The algorithm
1415 simply is a walk over all instructions in dominator order. When
1416 we see an MEM_REF we determine if we've already seen a same
1417 ref anywhere up to the root of the dominator tree. If we do the
1418 current access can't trap. If we don't see any dominating access
1419 the current access might trap, but might also make later accesses
1420 non-trapping, so we remember it. We need to be careful with loads
1421 or stores, for instance a load might not trap, while a store would,
1422 so if we see a dominating read access this doesn't mean that a later
1423 write access would not trap. Hence we also need to differentiate the
1424 type of access(es) seen.
1426 ??? We currently are very conservative and assume that a load might
1427 trap even if a store doesn't (write-only memory). This probably is
1428 overly conservative. */
1430 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1431 through it was seen, which would constitute a no-trap region for
1432 same accesses. */
1433 struct name_to_bb
1435 unsigned int ssa_name_ver;
1436 unsigned int phase;
1437 bool store;
1438 HOST_WIDE_INT offset, size;
1439 basic_block bb;
1442 /* Hashtable helpers. */
1444 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1446 static inline hashval_t hash (const name_to_bb *);
1447 static inline bool equal (const name_to_bb *, const name_to_bb *);
1450 /* Used for quick clearing of the hash-table when we see calls.
1451 Hash entries with phase < nt_call_phase are invalid. */
1452 static unsigned int nt_call_phase;
1454 /* The hash function. */
1456 inline hashval_t
1457 ssa_names_hasher::hash (const name_to_bb *n)
1459 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1460 ^ (n->offset << 6) ^ (n->size << 3);
1463 /* The equality function of *P1 and *P2. */
1465 inline bool
1466 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1468 return n1->ssa_name_ver == n2->ssa_name_ver
1469 && n1->store == n2->store
1470 && n1->offset == n2->offset
1471 && n1->size == n2->size;
1474 class nontrapping_dom_walker : public dom_walker
1476 public:
1477 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1478 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1480 virtual edge before_dom_children (basic_block);
1481 virtual void after_dom_children (basic_block);
1483 private:
1485 /* We see the expression EXP in basic block BB. If it's an interesting
1486 expression (an MEM_REF through an SSA_NAME) possibly insert the
1487 expression into the set NONTRAP or the hash table of seen expressions.
1488 STORE is true if this expression is on the LHS, otherwise it's on
1489 the RHS. */
1490 void add_or_mark_expr (basic_block, tree, bool);
1492 hash_set<tree> *m_nontrapping;
1494 /* The hash table for remembering what we've seen. */
1495 hash_table<ssa_names_hasher> m_seen_ssa_names;
1498 /* Called by walk_dominator_tree, when entering the block BB. */
1499 edge
1500 nontrapping_dom_walker::before_dom_children (basic_block bb)
1502 edge e;
1503 edge_iterator ei;
1504 gimple_stmt_iterator gsi;
1506 /* If we haven't seen all our predecessors, clear the hash-table. */
1507 FOR_EACH_EDGE (e, ei, bb->preds)
1508 if ((((size_t)e->src->aux) & 2) == 0)
1510 nt_call_phase++;
1511 break;
1514 /* Mark this BB as being on the path to dominator root and as visited. */
1515 bb->aux = (void*)(1 | 2);
1517 /* And walk the statements in order. */
1518 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1520 gimple *stmt = gsi_stmt (gsi);
1522 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1523 || (is_gimple_call (stmt)
1524 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1525 nt_call_phase++;
1526 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1528 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1529 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1532 return NULL;
1535 /* Called by walk_dominator_tree, when basic block BB is exited. */
1536 void
1537 nontrapping_dom_walker::after_dom_children (basic_block bb)
1539 /* This BB isn't on the path to dominator root anymore. */
1540 bb->aux = (void*)2;
1543 /* We see the expression EXP in basic block BB. If it's an interesting
1544 expression (an MEM_REF through an SSA_NAME) possibly insert the
1545 expression into the set NONTRAP or the hash table of seen expressions.
1546 STORE is true if this expression is on the LHS, otherwise it's on
1547 the RHS. */
1548 void
1549 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1551 HOST_WIDE_INT size;
1553 if (TREE_CODE (exp) == MEM_REF
1554 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1555 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1556 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1558 tree name = TREE_OPERAND (exp, 0);
1559 struct name_to_bb map;
1560 name_to_bb **slot;
1561 struct name_to_bb *n2bb;
1562 basic_block found_bb = 0;
1564 /* Try to find the last seen MEM_REF through the same
1565 SSA_NAME, which can trap. */
1566 map.ssa_name_ver = SSA_NAME_VERSION (name);
1567 map.phase = 0;
1568 map.bb = 0;
1569 map.store = store;
1570 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1571 map.size = size;
1573 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1574 n2bb = *slot;
1575 if (n2bb && n2bb->phase >= nt_call_phase)
1576 found_bb = n2bb->bb;
1578 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1579 (it's in a basic block on the path from us to the dominator root)
1580 then we can't trap. */
1581 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1583 m_nontrapping->add (exp);
1585 else
1587 /* EXP might trap, so insert it into the hash table. */
1588 if (n2bb)
1590 n2bb->phase = nt_call_phase;
1591 n2bb->bb = bb;
1593 else
1595 n2bb = XNEW (struct name_to_bb);
1596 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1597 n2bb->phase = nt_call_phase;
1598 n2bb->bb = bb;
1599 n2bb->store = store;
1600 n2bb->offset = map.offset;
1601 n2bb->size = size;
1602 *slot = n2bb;
1608 /* This is the entry point of gathering non trapping memory accesses.
1609 It will do a dominator walk over the whole function, and it will
1610 make use of the bb->aux pointers. It returns a set of trees
1611 (the MEM_REFs itself) which can't trap. */
1612 static hash_set<tree> *
1613 get_non_trapping (void)
1615 nt_call_phase = 0;
1616 hash_set<tree> *nontrap = new hash_set<tree>;
1617 /* We're going to do a dominator walk, so ensure that we have
1618 dominance information. */
1619 calculate_dominance_info (CDI_DOMINATORS);
1621 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1622 .walk (cfun->cfg->x_entry_block_ptr);
1624 clear_aux_for_blocks ();
1625 return nontrap;
1628 /* Do the main work of conditional store replacement. We already know
1629 that the recognized pattern looks like so:
1631 split:
1632 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1633 MIDDLE_BB:
1634 something
1635 fallthrough (edge E0)
1636 JOIN_BB:
1637 some more
1639 We check that MIDDLE_BB contains only one store, that that store
1640 doesn't trap (not via NOTRAP, but via checking if an access to the same
1641 memory location dominates us) and that the store has a "simple" RHS. */
1643 static bool
1644 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1645 edge e0, edge e1, hash_set<tree> *nontrap)
1647 gimple *assign = last_and_only_stmt (middle_bb);
1648 tree lhs, rhs, name, name2;
1649 gphi *newphi;
1650 gassign *new_stmt;
1651 gimple_stmt_iterator gsi;
1652 source_location locus;
1654 /* Check if middle_bb contains of only one store. */
1655 if (!assign
1656 || !gimple_assign_single_p (assign)
1657 || gimple_has_volatile_ops (assign))
1658 return false;
1660 locus = gimple_location (assign);
1661 lhs = gimple_assign_lhs (assign);
1662 rhs = gimple_assign_rhs1 (assign);
1663 if (TREE_CODE (lhs) != MEM_REF
1664 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1665 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1666 return false;
1668 /* Prove that we can move the store down. We could also check
1669 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1670 whose value is not available readily, which we want to avoid. */
1671 if (!nontrap->contains (lhs))
1672 return false;
1674 /* Now we've checked the constraints, so do the transformation:
1675 1) Remove the single store. */
1676 gsi = gsi_for_stmt (assign);
1677 unlink_stmt_vdef (assign);
1678 gsi_remove (&gsi, true);
1679 release_defs (assign);
1681 /* 2) Insert a load from the memory of the store to the temporary
1682 on the edge which did not contain the store. */
1683 lhs = unshare_expr (lhs);
1684 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1685 new_stmt = gimple_build_assign (name, lhs);
1686 gimple_set_location (new_stmt, locus);
1687 gsi_insert_on_edge (e1, new_stmt);
1689 /* 3) Create a PHI node at the join block, with one argument
1690 holding the old RHS, and the other holding the temporary
1691 where we stored the old memory contents. */
1692 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1693 newphi = create_phi_node (name2, join_bb);
1694 add_phi_arg (newphi, rhs, e0, locus);
1695 add_phi_arg (newphi, name, e1, locus);
1697 lhs = unshare_expr (lhs);
1698 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1700 /* 4) Insert that PHI node. */
1701 gsi = gsi_after_labels (join_bb);
1702 if (gsi_end_p (gsi))
1704 gsi = gsi_last_bb (join_bb);
1705 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1707 else
1708 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1710 return true;
1713 /* Do the main work of conditional store replacement. */
1715 static bool
1716 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1717 basic_block join_bb, gimple *then_assign,
1718 gimple *else_assign)
1720 tree lhs_base, lhs, then_rhs, else_rhs, name;
1721 source_location then_locus, else_locus;
1722 gimple_stmt_iterator gsi;
1723 gphi *newphi;
1724 gassign *new_stmt;
1726 if (then_assign == NULL
1727 || !gimple_assign_single_p (then_assign)
1728 || gimple_clobber_p (then_assign)
1729 || gimple_has_volatile_ops (then_assign)
1730 || else_assign == NULL
1731 || !gimple_assign_single_p (else_assign)
1732 || gimple_clobber_p (else_assign)
1733 || gimple_has_volatile_ops (else_assign))
1734 return false;
1736 lhs = gimple_assign_lhs (then_assign);
1737 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1738 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1739 return false;
1741 lhs_base = get_base_address (lhs);
1742 if (lhs_base == NULL_TREE
1743 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1744 return false;
1746 then_rhs = gimple_assign_rhs1 (then_assign);
1747 else_rhs = gimple_assign_rhs1 (else_assign);
1748 then_locus = gimple_location (then_assign);
1749 else_locus = gimple_location (else_assign);
1751 /* Now we've checked the constraints, so do the transformation:
1752 1) Remove the stores. */
1753 gsi = gsi_for_stmt (then_assign);
1754 unlink_stmt_vdef (then_assign);
1755 gsi_remove (&gsi, true);
1756 release_defs (then_assign);
1758 gsi = gsi_for_stmt (else_assign);
1759 unlink_stmt_vdef (else_assign);
1760 gsi_remove (&gsi, true);
1761 release_defs (else_assign);
1763 /* 2) Create a PHI node at the join block, with one argument
1764 holding the old RHS, and the other holding the temporary
1765 where we stored the old memory contents. */
1766 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1767 newphi = create_phi_node (name, join_bb);
1768 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1769 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1771 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1773 /* 3) Insert that PHI node. */
1774 gsi = gsi_after_labels (join_bb);
1775 if (gsi_end_p (gsi))
1777 gsi = gsi_last_bb (join_bb);
1778 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1780 else
1781 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1783 return true;
1786 /* Conditional store replacement. We already know
1787 that the recognized pattern looks like so:
1789 split:
1790 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1791 THEN_BB:
1793 X = Y;
1795 goto JOIN_BB;
1796 ELSE_BB:
1798 X = Z;
1800 fallthrough (edge E0)
1801 JOIN_BB:
1802 some more
1804 We check that it is safe to sink the store to JOIN_BB by verifying that
1805 there are no read-after-write or write-after-write dependencies in
1806 THEN_BB and ELSE_BB. */
1808 static bool
1809 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1810 basic_block join_bb)
1812 gimple *then_assign = last_and_only_stmt (then_bb);
1813 gimple *else_assign = last_and_only_stmt (else_bb);
1814 vec<data_reference_p> then_datarefs, else_datarefs;
1815 vec<ddr_p> then_ddrs, else_ddrs;
1816 gimple *then_store, *else_store;
1817 bool found, ok = false, res;
1818 struct data_dependence_relation *ddr;
1819 data_reference_p then_dr, else_dr;
1820 int i, j;
1821 tree then_lhs, else_lhs;
1822 basic_block blocks[3];
1824 if (MAX_STORES_TO_SINK == 0)
1825 return false;
1827 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1828 if (then_assign && else_assign)
1829 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1830 then_assign, else_assign);
1832 /* Find data references. */
1833 then_datarefs.create (1);
1834 else_datarefs.create (1);
1835 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1836 == chrec_dont_know)
1837 || !then_datarefs.length ()
1838 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1839 == chrec_dont_know)
1840 || !else_datarefs.length ())
1842 free_data_refs (then_datarefs);
1843 free_data_refs (else_datarefs);
1844 return false;
1847 /* Find pairs of stores with equal LHS. */
1848 auto_vec<gimple *, 1> then_stores, else_stores;
1849 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1851 if (DR_IS_READ (then_dr))
1852 continue;
1854 then_store = DR_STMT (then_dr);
1855 then_lhs = gimple_get_lhs (then_store);
1856 if (then_lhs == NULL_TREE)
1857 continue;
1858 found = false;
1860 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1862 if (DR_IS_READ (else_dr))
1863 continue;
1865 else_store = DR_STMT (else_dr);
1866 else_lhs = gimple_get_lhs (else_store);
1867 if (else_lhs == NULL_TREE)
1868 continue;
1870 if (operand_equal_p (then_lhs, else_lhs, 0))
1872 found = true;
1873 break;
1877 if (!found)
1878 continue;
1880 then_stores.safe_push (then_store);
1881 else_stores.safe_push (else_store);
1884 /* No pairs of stores found. */
1885 if (!then_stores.length ()
1886 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1888 free_data_refs (then_datarefs);
1889 free_data_refs (else_datarefs);
1890 return false;
1893 /* Compute and check data dependencies in both basic blocks. */
1894 then_ddrs.create (1);
1895 else_ddrs.create (1);
1896 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1897 vNULL, false)
1898 || !compute_all_dependences (else_datarefs, &else_ddrs,
1899 vNULL, false))
1901 free_dependence_relations (then_ddrs);
1902 free_dependence_relations (else_ddrs);
1903 free_data_refs (then_datarefs);
1904 free_data_refs (else_datarefs);
1905 return false;
1907 blocks[0] = then_bb;
1908 blocks[1] = else_bb;
1909 blocks[2] = join_bb;
1910 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1912 /* Check that there are no read-after-write or write-after-write dependencies
1913 in THEN_BB. */
1914 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1916 struct data_reference *dra = DDR_A (ddr);
1917 struct data_reference *drb = DDR_B (ddr);
1919 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1920 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1921 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1922 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1923 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1924 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1926 free_dependence_relations (then_ddrs);
1927 free_dependence_relations (else_ddrs);
1928 free_data_refs (then_datarefs);
1929 free_data_refs (else_datarefs);
1930 return false;
1934 /* Check that there are no read-after-write or write-after-write dependencies
1935 in ELSE_BB. */
1936 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1938 struct data_reference *dra = DDR_A (ddr);
1939 struct data_reference *drb = DDR_B (ddr);
1941 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1942 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1943 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1944 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1945 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1946 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1948 free_dependence_relations (then_ddrs);
1949 free_dependence_relations (else_ddrs);
1950 free_data_refs (then_datarefs);
1951 free_data_refs (else_datarefs);
1952 return false;
1956 /* Sink stores with same LHS. */
1957 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1959 else_store = else_stores[i];
1960 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1961 then_store, else_store);
1962 ok = ok || res;
1965 free_dependence_relations (then_ddrs);
1966 free_dependence_relations (else_ddrs);
1967 free_data_refs (then_datarefs);
1968 free_data_refs (else_datarefs);
1970 return ok;
1973 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1975 static bool
1976 local_mem_dependence (gimple *stmt, basic_block bb)
1978 tree vuse = gimple_vuse (stmt);
1979 gimple *def;
1981 if (!vuse)
1982 return false;
1984 def = SSA_NAME_DEF_STMT (vuse);
1985 return (def && gimple_bb (def) == bb);
1988 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1989 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1990 and BB3 rejoins control flow following BB1 and BB2, look for
1991 opportunities to hoist loads as follows. If BB3 contains a PHI of
1992 two loads, one each occurring in BB1 and BB2, and the loads are
1993 provably of adjacent fields in the same structure, then move both
1994 loads into BB0. Of course this can only be done if there are no
1995 dependencies preventing such motion.
1997 One of the hoisted loads will always be speculative, so the
1998 transformation is currently conservative:
2000 - The fields must be strictly adjacent.
2001 - The two fields must occupy a single memory block that is
2002 guaranteed to not cross a page boundary.
2004 The last is difficult to prove, as such memory blocks should be
2005 aligned on the minimum of the stack alignment boundary and the
2006 alignment guaranteed by heap allocation interfaces. Thus we rely
2007 on a parameter for the alignment value.
2009 Provided a good value is used for the last case, the first
2010 restriction could possibly be relaxed. */
2012 static void
2013 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2014 basic_block bb2, basic_block bb3)
2016 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2017 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2018 gphi_iterator gsi;
2020 /* Walk the phis in bb3 looking for an opportunity. We are looking
2021 for phis of two SSA names, one each of which is defined in bb1 and
2022 bb2. */
2023 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2025 gphi *phi_stmt = gsi.phi ();
2026 gimple *def1, *def2;
2027 tree arg1, arg2, ref1, ref2, field1, field2;
2028 tree tree_offset1, tree_offset2, tree_size2, next;
2029 int offset1, offset2, size2;
2030 unsigned align1;
2031 gimple_stmt_iterator gsi2;
2032 basic_block bb_for_def1, bb_for_def2;
2034 if (gimple_phi_num_args (phi_stmt) != 2
2035 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2036 continue;
2038 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2039 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2041 if (TREE_CODE (arg1) != SSA_NAME
2042 || TREE_CODE (arg2) != SSA_NAME
2043 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2044 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2045 continue;
2047 def1 = SSA_NAME_DEF_STMT (arg1);
2048 def2 = SSA_NAME_DEF_STMT (arg2);
2050 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2051 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2052 continue;
2054 /* Check the mode of the arguments to be sure a conditional move
2055 can be generated for it. */
2056 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2057 == CODE_FOR_nothing)
2058 continue;
2060 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2061 if (!gimple_assign_single_p (def1)
2062 || !gimple_assign_single_p (def2)
2063 || gimple_has_volatile_ops (def1)
2064 || gimple_has_volatile_ops (def2))
2065 continue;
2067 ref1 = gimple_assign_rhs1 (def1);
2068 ref2 = gimple_assign_rhs1 (def2);
2070 if (TREE_CODE (ref1) != COMPONENT_REF
2071 || TREE_CODE (ref2) != COMPONENT_REF)
2072 continue;
2074 /* The zeroth operand of the two component references must be
2075 identical. It is not sufficient to compare get_base_address of
2076 the two references, because this could allow for different
2077 elements of the same array in the two trees. It is not safe to
2078 assume that the existence of one array element implies the
2079 existence of a different one. */
2080 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2081 continue;
2083 field1 = TREE_OPERAND (ref1, 1);
2084 field2 = TREE_OPERAND (ref2, 1);
2086 /* Check for field adjacency, and ensure field1 comes first. */
2087 for (next = DECL_CHAIN (field1);
2088 next && TREE_CODE (next) != FIELD_DECL;
2089 next = DECL_CHAIN (next))
2092 if (next != field2)
2094 for (next = DECL_CHAIN (field2);
2095 next && TREE_CODE (next) != FIELD_DECL;
2096 next = DECL_CHAIN (next))
2099 if (next != field1)
2100 continue;
2102 std::swap (field1, field2);
2103 std::swap (def1, def2);
2106 bb_for_def1 = gimple_bb (def1);
2107 bb_for_def2 = gimple_bb (def2);
2109 /* Check for proper alignment of the first field. */
2110 tree_offset1 = bit_position (field1);
2111 tree_offset2 = bit_position (field2);
2112 tree_size2 = DECL_SIZE (field2);
2114 if (!tree_fits_uhwi_p (tree_offset1)
2115 || !tree_fits_uhwi_p (tree_offset2)
2116 || !tree_fits_uhwi_p (tree_size2))
2117 continue;
2119 offset1 = tree_to_uhwi (tree_offset1);
2120 offset2 = tree_to_uhwi (tree_offset2);
2121 size2 = tree_to_uhwi (tree_size2);
2122 align1 = DECL_ALIGN (field1) % param_align_bits;
2124 if (offset1 % BITS_PER_UNIT != 0)
2125 continue;
2127 /* For profitability, the two field references should fit within
2128 a single cache line. */
2129 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2130 continue;
2132 /* The two expressions cannot be dependent upon vdefs defined
2133 in bb1/bb2. */
2134 if (local_mem_dependence (def1, bb_for_def1)
2135 || local_mem_dependence (def2, bb_for_def2))
2136 continue;
2138 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2139 bb0. We hoist the first one first so that a cache miss is handled
2140 efficiently regardless of hardware cache-fill policy. */
2141 gsi2 = gsi_for_stmt (def1);
2142 gsi_move_to_bb_end (&gsi2, bb0);
2143 gsi2 = gsi_for_stmt (def2);
2144 gsi_move_to_bb_end (&gsi2, bb0);
2146 if (dump_file && (dump_flags & TDF_DETAILS))
2148 fprintf (dump_file,
2149 "\nHoisting adjacent loads from %d and %d into %d: \n",
2150 bb_for_def1->index, bb_for_def2->index, bb0->index);
2151 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2152 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2157 /* Determine whether we should attempt to hoist adjacent loads out of
2158 diamond patterns in pass_phiopt. Always hoist loads if
2159 -fhoist-adjacent-loads is specified and the target machine has
2160 both a conditional move instruction and a defined cache line size. */
2162 static bool
2163 gate_hoist_loads (void)
2165 return (flag_hoist_adjacent_loads == 1
2166 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2167 && HAVE_conditional_move);
2170 /* This pass tries to replaces an if-then-else block with an
2171 assignment. We have four kinds of transformations. Some of these
2172 transformations are also performed by the ifcvt RTL optimizer.
2174 Conditional Replacement
2175 -----------------------
2177 This transformation, implemented in conditional_replacement,
2178 replaces
2180 bb0:
2181 if (cond) goto bb2; else goto bb1;
2182 bb1:
2183 bb2:
2184 x = PHI <0 (bb1), 1 (bb0), ...>;
2186 with
2188 bb0:
2189 x' = cond;
2190 goto bb2;
2191 bb2:
2192 x = PHI <x' (bb0), ...>;
2194 We remove bb1 as it becomes unreachable. This occurs often due to
2195 gimplification of conditionals.
2197 Value Replacement
2198 -----------------
2200 This transformation, implemented in value_replacement, replaces
2202 bb0:
2203 if (a != b) goto bb2; else goto bb1;
2204 bb1:
2205 bb2:
2206 x = PHI <a (bb1), b (bb0), ...>;
2208 with
2210 bb0:
2211 bb2:
2212 x = PHI <b (bb0), ...>;
2214 This opportunity can sometimes occur as a result of other
2215 optimizations.
2218 Another case caught by value replacement looks like this:
2220 bb0:
2221 t1 = a == CONST;
2222 t2 = b > c;
2223 t3 = t1 & t2;
2224 if (t3 != 0) goto bb1; else goto bb2;
2225 bb1:
2226 bb2:
2227 x = PHI (CONST, a)
2229 Gets replaced with:
2230 bb0:
2231 bb2:
2232 t1 = a == CONST;
2233 t2 = b > c;
2234 t3 = t1 & t2;
2235 x = a;
2237 ABS Replacement
2238 ---------------
2240 This transformation, implemented in abs_replacement, replaces
2242 bb0:
2243 if (a >= 0) goto bb2; else goto bb1;
2244 bb1:
2245 x = -a;
2246 bb2:
2247 x = PHI <x (bb1), a (bb0), ...>;
2249 with
2251 bb0:
2252 x' = ABS_EXPR< a >;
2253 bb2:
2254 x = PHI <x' (bb0), ...>;
2256 MIN/MAX Replacement
2257 -------------------
2259 This transformation, minmax_replacement replaces
2261 bb0:
2262 if (a <= b) goto bb2; else goto bb1;
2263 bb1:
2264 bb2:
2265 x = PHI <b (bb1), a (bb0), ...>;
2267 with
2269 bb0:
2270 x' = MIN_EXPR (a, b)
2271 bb2:
2272 x = PHI <x' (bb0), ...>;
2274 A similar transformation is done for MAX_EXPR.
2277 This pass also performs a fifth transformation of a slightly different
2278 flavor.
2280 Factor conversion in COND_EXPR
2281 ------------------------------
2283 This transformation factors the conversion out of COND_EXPR with
2284 factor_out_conditional_conversion.
2286 For example:
2287 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2288 <bb 3>:
2289 tmp = (int) a;
2290 <bb 4>:
2291 tmp = PHI <tmp, CST>
2293 Into:
2294 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2295 <bb 3>:
2296 <bb 4>:
2297 a = PHI <a, CST>
2298 tmp = (int) a;
2300 Adjacent Load Hoisting
2301 ----------------------
2303 This transformation replaces
2305 bb0:
2306 if (...) goto bb2; else goto bb1;
2307 bb1:
2308 x1 = (<expr>).field1;
2309 goto bb3;
2310 bb2:
2311 x2 = (<expr>).field2;
2312 bb3:
2313 # x = PHI <x1, x2>;
2315 with
2317 bb0:
2318 x1 = (<expr>).field1;
2319 x2 = (<expr>).field2;
2320 if (...) goto bb2; else goto bb1;
2321 bb1:
2322 goto bb3;
2323 bb2:
2324 bb3:
2325 # x = PHI <x1, x2>;
2327 The purpose of this transformation is to enable generation of conditional
2328 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2329 the loads is speculative, the transformation is restricted to very
2330 specific cases to avoid introducing a page fault. We are looking for
2331 the common idiom:
2333 if (...)
2334 x = y->left;
2335 else
2336 x = y->right;
2338 where left and right are typically adjacent pointers in a tree structure. */
2340 namespace {
2342 const pass_data pass_data_phiopt =
2344 GIMPLE_PASS, /* type */
2345 "phiopt", /* name */
2346 OPTGROUP_NONE, /* optinfo_flags */
2347 TV_TREE_PHIOPT, /* tv_id */
2348 ( PROP_cfg | PROP_ssa ), /* properties_required */
2349 0, /* properties_provided */
2350 0, /* properties_destroyed */
2351 0, /* todo_flags_start */
2352 0, /* todo_flags_finish */
2355 class pass_phiopt : public gimple_opt_pass
2357 public:
2358 pass_phiopt (gcc::context *ctxt)
2359 : gimple_opt_pass (pass_data_phiopt, ctxt)
2362 /* opt_pass methods: */
2363 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2364 virtual bool gate (function *) { return flag_ssa_phiopt; }
2365 virtual unsigned int execute (function *)
2367 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2370 }; // class pass_phiopt
2372 } // anon namespace
2374 gimple_opt_pass *
2375 make_pass_phiopt (gcc::context *ctxt)
2377 return new pass_phiopt (ctxt);
2380 namespace {
2382 const pass_data pass_data_cselim =
2384 GIMPLE_PASS, /* type */
2385 "cselim", /* name */
2386 OPTGROUP_NONE, /* optinfo_flags */
2387 TV_TREE_PHIOPT, /* tv_id */
2388 ( PROP_cfg | PROP_ssa ), /* properties_required */
2389 0, /* properties_provided */
2390 0, /* properties_destroyed */
2391 0, /* todo_flags_start */
2392 0, /* todo_flags_finish */
2395 class pass_cselim : public gimple_opt_pass
2397 public:
2398 pass_cselim (gcc::context *ctxt)
2399 : gimple_opt_pass (pass_data_cselim, ctxt)
2402 /* opt_pass methods: */
2403 virtual bool gate (function *) { return flag_tree_cselim; }
2404 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2406 }; // class pass_cselim
2408 } // anon namespace
2410 gimple_opt_pass *
2411 make_pass_cselim (gcc::context *ctxt)
2413 return new pass_cselim (ctxt);