Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS hook.
[official-gcc.git] / gcc / tree-ssa-phiopt.c
blobe26e686db5a63dee6af30c8955808ac59afe1a7d
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "input.h"
25 #include "alias.h"
26 #include "symtab.h"
27 #include "tree.h"
28 #include "fold-const.h"
29 #include "stor-layout.h"
30 #include "flags.h"
31 #include "tm_p.h"
32 #include "predict.h"
33 #include "hard-reg-set.h"
34 #include "function.h"
35 #include "dominance.h"
36 #include "cfg.h"
37 #include "cfganal.h"
38 #include "basic-block.h"
39 #include "tree-ssa-alias.h"
40 #include "internal-fn.h"
41 #include "gimple-expr.h"
42 #include "is-a.h"
43 #include "gimple.h"
44 #include "gimplify.h"
45 #include "gimple-iterator.h"
46 #include "gimplify-me.h"
47 #include "gimple-ssa.h"
48 #include "tree-cfg.h"
49 #include "tree-phinodes.h"
50 #include "ssa-iterators.h"
51 #include "stringpool.h"
52 #include "tree-ssanames.h"
53 #include "rtl.h"
54 #include "insn-config.h"
55 #include "expmed.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "calls.h"
59 #include "emit-rtl.h"
60 #include "varasm.h"
61 #include "stmt.h"
62 #include "expr.h"
63 #include "tree-dfa.h"
64 #include "tree-pass.h"
65 #include "langhooks.h"
66 #include "domwalk.h"
67 #include "cfgloop.h"
68 #include "tree-data-ref.h"
69 #include "gimple-pretty-print.h"
70 #include "insn-codes.h"
71 #include "optabs.h"
72 #include "tree-scalar-evolution.h"
73 #include "tree-inline.h"
75 static unsigned int tree_ssa_phiopt_worker (bool, bool);
76 static bool conditional_replacement (basic_block, basic_block,
77 edge, edge, gphi *, tree, tree);
78 static int value_replacement (basic_block, basic_block,
79 edge, edge, gimple, tree, tree);
80 static bool minmax_replacement (basic_block, basic_block,
81 edge, edge, gimple, tree, tree);
82 static bool abs_replacement (basic_block, basic_block,
83 edge, edge, gimple, tree, tree);
84 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
85 hash_set<tree> *);
86 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
87 static hash_set<tree> * get_non_trapping ();
88 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
89 static void hoist_adjacent_loads (basic_block, basic_block,
90 basic_block, basic_block);
91 static bool gate_hoist_loads (void);
93 /* This pass tries to transform conditional stores into unconditional
94 ones, enabling further simplifications with the simpler then and else
95 blocks. In particular it replaces this:
97 bb0:
98 if (cond) goto bb2; else goto bb1;
99 bb1:
100 *p = RHS;
101 bb2:
103 with
105 bb0:
106 if (cond) goto bb1; else goto bb2;
107 bb1:
108 condtmp' = *p;
109 bb2:
110 condtmp = PHI <RHS, condtmp'>
111 *p = condtmp;
113 This transformation can only be done under several constraints,
114 documented below. It also replaces:
116 bb0:
117 if (cond) goto bb2; else goto bb1;
118 bb1:
119 *p = RHS1;
120 goto bb3;
121 bb2:
122 *p = RHS2;
123 bb3:
125 with
127 bb0:
128 if (cond) goto bb3; else goto bb1;
129 bb1:
130 bb3:
131 condtmp = PHI <RHS1, RHS2>
132 *p = condtmp; */
134 static unsigned int
135 tree_ssa_cs_elim (void)
137 unsigned todo;
138 /* ??? We are not interested in loop related info, but the following
139 will create it, ICEing as we didn't init loops with pre-headers.
140 An interfacing issue of find_data_references_in_bb. */
141 loop_optimizer_init (LOOPS_NORMAL);
142 scev_initialize ();
143 todo = tree_ssa_phiopt_worker (true, false);
144 scev_finalize ();
145 loop_optimizer_finalize ();
146 return todo;
149 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
151 static gphi *
152 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
154 gimple_stmt_iterator i;
155 gphi *phi = NULL;
156 if (gimple_seq_singleton_p (seq))
157 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
158 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
160 gphi *p = as_a <gphi *> (gsi_stmt (i));
161 /* If the PHI arguments are equal then we can skip this PHI. */
162 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
163 gimple_phi_arg_def (p, e1->dest_idx)))
164 continue;
166 /* If we already have a PHI that has the two edge arguments are
167 different, then return it is not a singleton for these PHIs. */
168 if (phi)
169 return NULL;
171 phi = p;
173 return phi;
176 /* The core routine of conditional store replacement and normal
177 phi optimizations. Both share much of the infrastructure in how
178 to match applicable basic block patterns. DO_STORE_ELIM is true
179 when we want to do conditional store replacement, false otherwise.
180 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
181 of diamond control flow patterns, false otherwise. */
182 static unsigned int
183 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
185 basic_block bb;
186 basic_block *bb_order;
187 unsigned n, i;
188 bool cfgchanged = false;
189 hash_set<tree> *nontrap = 0;
191 if (do_store_elim)
192 /* Calculate the set of non-trapping memory accesses. */
193 nontrap = get_non_trapping ();
195 /* Search every basic block for COND_EXPR we may be able to optimize.
197 We walk the blocks in order that guarantees that a block with
198 a single predecessor is processed before the predecessor.
199 This ensures that we collapse inner ifs before visiting the
200 outer ones, and also that we do not try to visit a removed
201 block. */
202 bb_order = single_pred_before_succ_order ();
203 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
205 for (i = 0; i < n; i++)
207 gimple cond_stmt;
208 gphi *phi;
209 basic_block bb1, bb2;
210 edge e1, e2;
211 tree arg0, arg1;
213 bb = bb_order[i];
215 cond_stmt = last_stmt (bb);
216 /* Check to see if the last statement is a GIMPLE_COND. */
217 if (!cond_stmt
218 || gimple_code (cond_stmt) != GIMPLE_COND)
219 continue;
221 e1 = EDGE_SUCC (bb, 0);
222 bb1 = e1->dest;
223 e2 = EDGE_SUCC (bb, 1);
224 bb2 = e2->dest;
226 /* We cannot do the optimization on abnormal edges. */
227 if ((e1->flags & EDGE_ABNORMAL) != 0
228 || (e2->flags & EDGE_ABNORMAL) != 0)
229 continue;
231 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
232 if (EDGE_COUNT (bb1->succs) == 0
233 || bb2 == NULL
234 || EDGE_COUNT (bb2->succs) == 0)
235 continue;
237 /* Find the bb which is the fall through to the other. */
238 if (EDGE_SUCC (bb1, 0)->dest == bb2)
240 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
242 basic_block bb_tmp = bb1;
243 edge e_tmp = e1;
244 bb1 = bb2;
245 bb2 = bb_tmp;
246 e1 = e2;
247 e2 = e_tmp;
249 else if (do_store_elim
250 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
252 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
254 if (!single_succ_p (bb1)
255 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
256 || !single_succ_p (bb2)
257 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
258 || EDGE_COUNT (bb3->preds) != 2)
259 continue;
260 if (cond_if_else_store_replacement (bb1, bb2, bb3))
261 cfgchanged = true;
262 continue;
264 else if (do_hoist_loads
265 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
267 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
269 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
270 && single_succ_p (bb1)
271 && single_succ_p (bb2)
272 && single_pred_p (bb1)
273 && single_pred_p (bb2)
274 && EDGE_COUNT (bb->succs) == 2
275 && EDGE_COUNT (bb3->preds) == 2
276 /* If one edge or the other is dominant, a conditional move
277 is likely to perform worse than the well-predicted branch. */
278 && !predictable_edge_p (EDGE_SUCC (bb, 0))
279 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
280 hoist_adjacent_loads (bb, bb1, bb2, bb3);
281 continue;
283 else
284 continue;
286 e1 = EDGE_SUCC (bb1, 0);
288 /* Make sure that bb1 is just a fall through. */
289 if (!single_succ_p (bb1)
290 || (e1->flags & EDGE_FALLTHRU) == 0)
291 continue;
293 /* Also make sure that bb1 only have one predecessor and that it
294 is bb. */
295 if (!single_pred_p (bb1)
296 || single_pred (bb1) != bb)
297 continue;
299 if (do_store_elim)
301 /* bb1 is the middle block, bb2 the join block, bb the split block,
302 e1 the fallthrough edge from bb1 to bb2. We can't do the
303 optimization if the join block has more than two predecessors. */
304 if (EDGE_COUNT (bb2->preds) > 2)
305 continue;
306 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
307 cfgchanged = true;
309 else
311 gimple_seq phis = phi_nodes (bb2);
312 gimple_stmt_iterator gsi;
313 bool candorest = true;
315 /* Value replacement can work with more than one PHI
316 so try that first. */
317 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
319 phi = as_a <gphi *> (gsi_stmt (gsi));
320 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
321 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
322 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
324 candorest = false;
325 cfgchanged = true;
326 break;
330 if (!candorest)
331 continue;
333 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
334 if (!phi)
335 continue;
337 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
338 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
340 /* Something is wrong if we cannot find the arguments in the PHI
341 node. */
342 gcc_assert (arg0 != NULL && arg1 != NULL);
344 /* Do the replacement of conditional if it can be done. */
345 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
346 cfgchanged = true;
347 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
348 cfgchanged = true;
349 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
350 cfgchanged = true;
354 free (bb_order);
356 if (do_store_elim)
357 delete nontrap;
358 /* If the CFG has changed, we should cleanup the CFG. */
359 if (cfgchanged && do_store_elim)
361 /* In cond-store replacement we have added some loads on edges
362 and new VOPS (as we moved the store, and created a load). */
363 gsi_commit_edge_inserts ();
364 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
366 else if (cfgchanged)
367 return TODO_cleanup_cfg;
368 return 0;
371 /* Replace PHI node element whose edge is E in block BB with variable NEW.
372 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
373 is known to have two edges, one of which must reach BB). */
375 static void
376 replace_phi_edge_with_variable (basic_block cond_block,
377 edge e, gimple phi, tree new_tree)
379 basic_block bb = gimple_bb (phi);
380 basic_block block_to_remove;
381 gimple_stmt_iterator gsi;
383 /* Change the PHI argument to new. */
384 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
386 /* Remove the empty basic block. */
387 if (EDGE_SUCC (cond_block, 0)->dest == bb)
389 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
390 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
391 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
392 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
394 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
396 else
398 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
399 EDGE_SUCC (cond_block, 1)->flags
400 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
401 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
402 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
404 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
406 delete_basic_block (block_to_remove);
408 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
409 gsi = gsi_last_bb (cond_block);
410 gsi_remove (&gsi, true);
412 if (dump_file && (dump_flags & TDF_DETAILS))
413 fprintf (dump_file,
414 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
415 cond_block->index,
416 bb->index);
419 /* The function conditional_replacement does the main work of doing the
420 conditional replacement. Return true if the replacement is done.
421 Otherwise return false.
422 BB is the basic block where the replacement is going to be done on. ARG0
423 is argument 0 from PHI. Likewise for ARG1. */
425 static bool
426 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
427 edge e0, edge e1, gphi *phi,
428 tree arg0, tree arg1)
430 tree result;
431 gimple stmt;
432 gassign *new_stmt;
433 tree cond;
434 gimple_stmt_iterator gsi;
435 edge true_edge, false_edge;
436 tree new_var, new_var2;
437 bool neg;
439 /* FIXME: Gimplification of complex type is too hard for now. */
440 /* We aren't prepared to handle vectors either (and it is a question
441 if it would be worthwhile anyway). */
442 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
443 || POINTER_TYPE_P (TREE_TYPE (arg0)))
444 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
445 || POINTER_TYPE_P (TREE_TYPE (arg1))))
446 return false;
448 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
449 convert it to the conditional. */
450 if ((integer_zerop (arg0) && integer_onep (arg1))
451 || (integer_zerop (arg1) && integer_onep (arg0)))
452 neg = false;
453 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
454 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
455 neg = true;
456 else
457 return false;
459 if (!empty_block_p (middle_bb))
460 return false;
462 /* At this point we know we have a GIMPLE_COND with two successors.
463 One successor is BB, the other successor is an empty block which
464 falls through into BB.
466 There is a single PHI node at the join point (BB) and its arguments
467 are constants (0, 1) or (0, -1).
469 So, given the condition COND, and the two PHI arguments, we can
470 rewrite this PHI into non-branching code:
472 dest = (COND) or dest = COND'
474 We use the condition as-is if the argument associated with the
475 true edge has the value one or the argument associated with the
476 false edge as the value zero. Note that those conditions are not
477 the same since only one of the outgoing edges from the GIMPLE_COND
478 will directly reach BB and thus be associated with an argument. */
480 stmt = last_stmt (cond_bb);
481 result = PHI_RESULT (phi);
483 /* To handle special cases like floating point comparison, it is easier and
484 less error-prone to build a tree and gimplify it on the fly though it is
485 less efficient. */
486 cond = fold_build2_loc (gimple_location (stmt),
487 gimple_cond_code (stmt), boolean_type_node,
488 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
490 /* We need to know which is the true edge and which is the false
491 edge so that we know when to invert the condition below. */
492 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
493 if ((e0 == true_edge && integer_zerop (arg0))
494 || (e0 == false_edge && !integer_zerop (arg0))
495 || (e1 == true_edge && integer_zerop (arg1))
496 || (e1 == false_edge && !integer_zerop (arg1)))
497 cond = fold_build1_loc (gimple_location (stmt),
498 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
500 if (neg)
502 cond = fold_convert_loc (gimple_location (stmt),
503 TREE_TYPE (result), cond);
504 cond = fold_build1_loc (gimple_location (stmt),
505 NEGATE_EXPR, TREE_TYPE (cond), cond);
508 /* Insert our new statements at the end of conditional block before the
509 COND_STMT. */
510 gsi = gsi_for_stmt (stmt);
511 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
512 GSI_SAME_STMT);
514 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
516 source_location locus_0, locus_1;
518 new_var2 = make_ssa_name (TREE_TYPE (result));
519 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
520 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
521 new_var = new_var2;
523 /* Set the locus to the first argument, unless is doesn't have one. */
524 locus_0 = gimple_phi_arg_location (phi, 0);
525 locus_1 = gimple_phi_arg_location (phi, 1);
526 if (locus_0 == UNKNOWN_LOCATION)
527 locus_0 = locus_1;
528 gimple_set_location (new_stmt, locus_0);
531 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
533 /* Note that we optimized this PHI. */
534 return true;
537 /* Update *ARG which is defined in STMT so that it contains the
538 computed value if that seems profitable. Return true if the
539 statement is made dead by that rewriting. */
541 static bool
542 jump_function_from_stmt (tree *arg, gimple stmt)
544 enum tree_code code = gimple_assign_rhs_code (stmt);
545 if (code == ADDR_EXPR)
547 /* For arg = &p->i transform it to p, if possible. */
548 tree rhs1 = gimple_assign_rhs1 (stmt);
549 HOST_WIDE_INT offset;
550 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
551 &offset);
552 if (tem
553 && TREE_CODE (tem) == MEM_REF
554 && (mem_ref_offset (tem) + offset) == 0)
556 *arg = TREE_OPERAND (tem, 0);
557 return true;
560 /* TODO: Much like IPA-CP jump-functions we want to handle constant
561 additions symbolically here, and we'd need to update the comparison
562 code that compares the arg + cst tuples in our caller. For now the
563 code above exactly handles the VEC_BASE pattern from vec.h. */
564 return false;
567 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
568 of the form SSA_NAME NE 0.
570 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
571 the two input values of the EQ_EXPR match arg0 and arg1.
573 If so update *code and return TRUE. Otherwise return FALSE. */
575 static bool
576 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
577 enum tree_code *code, const_tree rhs)
579 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
580 statement. */
581 if (TREE_CODE (rhs) == SSA_NAME)
583 gimple def1 = SSA_NAME_DEF_STMT (rhs);
585 /* Verify the defining statement has an EQ_EXPR on the RHS. */
586 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
588 /* Finally verify the source operands of the EQ_EXPR are equal
589 to arg0 and arg1. */
590 tree op0 = gimple_assign_rhs1 (def1);
591 tree op1 = gimple_assign_rhs2 (def1);
592 if ((operand_equal_for_phi_arg_p (arg0, op0)
593 && operand_equal_for_phi_arg_p (arg1, op1))
594 || (operand_equal_for_phi_arg_p (arg0, op1)
595 && operand_equal_for_phi_arg_p (arg1, op0)))
597 /* We will perform the optimization. */
598 *code = gimple_assign_rhs_code (def1);
599 return true;
603 return false;
606 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
608 Also return TRUE if arg0/arg1 are equal to the source arguments of a
609 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
611 Return FALSE otherwise. */
613 static bool
614 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
615 enum tree_code *code, gimple cond)
617 gimple def;
618 tree lhs = gimple_cond_lhs (cond);
619 tree rhs = gimple_cond_rhs (cond);
621 if ((operand_equal_for_phi_arg_p (arg0, lhs)
622 && operand_equal_for_phi_arg_p (arg1, rhs))
623 || (operand_equal_for_phi_arg_p (arg1, lhs)
624 && operand_equal_for_phi_arg_p (arg0, rhs)))
625 return true;
627 /* Now handle more complex case where we have an EQ comparison
628 which feeds a BIT_AND_EXPR which feeds COND.
630 First verify that COND is of the form SSA_NAME NE 0. */
631 if (*code != NE_EXPR || !integer_zerop (rhs)
632 || TREE_CODE (lhs) != SSA_NAME)
633 return false;
635 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
636 def = SSA_NAME_DEF_STMT (lhs);
637 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
638 return false;
640 /* Now verify arg0/arg1 correspond to the source arguments of an
641 EQ comparison feeding the BIT_AND_EXPR. */
643 tree tmp = gimple_assign_rhs1 (def);
644 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
645 return true;
647 tmp = gimple_assign_rhs2 (def);
648 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
649 return true;
651 return false;
654 /* Returns true if ARG is a neutral element for operation CODE
655 on the RIGHT side. */
657 static bool
658 neutral_element_p (tree_code code, tree arg, bool right)
660 switch (code)
662 case PLUS_EXPR:
663 case BIT_IOR_EXPR:
664 case BIT_XOR_EXPR:
665 return integer_zerop (arg);
667 case LROTATE_EXPR:
668 case RROTATE_EXPR:
669 case LSHIFT_EXPR:
670 case RSHIFT_EXPR:
671 case MINUS_EXPR:
672 case POINTER_PLUS_EXPR:
673 return right && integer_zerop (arg);
675 case MULT_EXPR:
676 return integer_onep (arg);
678 case TRUNC_DIV_EXPR:
679 case CEIL_DIV_EXPR:
680 case FLOOR_DIV_EXPR:
681 case ROUND_DIV_EXPR:
682 case EXACT_DIV_EXPR:
683 return right && integer_onep (arg);
685 case BIT_AND_EXPR:
686 return integer_all_onesp (arg);
688 default:
689 return false;
693 /* Returns true if ARG is an absorbing element for operation CODE. */
695 static bool
696 absorbing_element_p (tree_code code, tree arg)
698 switch (code)
700 case BIT_IOR_EXPR:
701 return integer_all_onesp (arg);
703 case MULT_EXPR:
704 case BIT_AND_EXPR:
705 return integer_zerop (arg);
707 default:
708 return false;
712 /* The function value_replacement does the main work of doing the value
713 replacement. Return non-zero if the replacement is done. Otherwise return
714 0. If we remove the middle basic block, return 2.
715 BB is the basic block where the replacement is going to be done on. ARG0
716 is argument 0 from the PHI. Likewise for ARG1. */
718 static int
719 value_replacement (basic_block cond_bb, basic_block middle_bb,
720 edge e0, edge e1, gimple phi,
721 tree arg0, tree arg1)
723 gimple_stmt_iterator gsi;
724 gimple cond;
725 edge true_edge, false_edge;
726 enum tree_code code;
727 bool emtpy_or_with_defined_p = true;
729 /* If the type says honor signed zeros we cannot do this
730 optimization. */
731 if (HONOR_SIGNED_ZEROS (arg1))
732 return 0;
734 /* If there is a statement in MIDDLE_BB that defines one of the PHI
735 arguments, then adjust arg0 or arg1. */
736 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
737 while (!gsi_end_p (gsi))
739 gimple stmt = gsi_stmt (gsi);
740 tree lhs;
741 gsi_next_nondebug (&gsi);
742 if (!is_gimple_assign (stmt))
744 emtpy_or_with_defined_p = false;
745 continue;
747 /* Now try to adjust arg0 or arg1 according to the computation
748 in the statement. */
749 lhs = gimple_assign_lhs (stmt);
750 if (!(lhs == arg0
751 && jump_function_from_stmt (&arg0, stmt))
752 || (lhs == arg1
753 && jump_function_from_stmt (&arg1, stmt)))
754 emtpy_or_with_defined_p = false;
757 cond = last_stmt (cond_bb);
758 code = gimple_cond_code (cond);
760 /* This transformation is only valid for equality comparisons. */
761 if (code != NE_EXPR && code != EQ_EXPR)
762 return 0;
764 /* We need to know which is the true edge and which is the false
765 edge so that we know if have abs or negative abs. */
766 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
768 /* At this point we know we have a COND_EXPR with two successors.
769 One successor is BB, the other successor is an empty block which
770 falls through into BB.
772 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
774 There is a single PHI node at the join point (BB) with two arguments.
776 We now need to verify that the two arguments in the PHI node match
777 the two arguments to the equality comparison. */
779 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
781 edge e;
782 tree arg;
784 /* For NE_EXPR, we want to build an assignment result = arg where
785 arg is the PHI argument associated with the true edge. For
786 EQ_EXPR we want the PHI argument associated with the false edge. */
787 e = (code == NE_EXPR ? true_edge : false_edge);
789 /* Unfortunately, E may not reach BB (it may instead have gone to
790 OTHER_BLOCK). If that is the case, then we want the single outgoing
791 edge from OTHER_BLOCK which reaches BB and represents the desired
792 path from COND_BLOCK. */
793 if (e->dest == middle_bb)
794 e = single_succ_edge (e->dest);
796 /* Now we know the incoming edge to BB that has the argument for the
797 RHS of our new assignment statement. */
798 if (e0 == e)
799 arg = arg0;
800 else
801 arg = arg1;
803 /* If the middle basic block was empty or is defining the
804 PHI arguments and this is a single phi where the args are different
805 for the edges e0 and e1 then we can remove the middle basic block. */
806 if (emtpy_or_with_defined_p
807 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
808 e0, e1) == phi)
810 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
811 /* Note that we optimized this PHI. */
812 return 2;
814 else
816 /* Replace the PHI arguments with arg. */
817 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
818 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
819 if (dump_file && (dump_flags & TDF_DETAILS))
821 fprintf (dump_file, "PHI ");
822 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
823 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
824 cond_bb->index);
825 print_generic_expr (dump_file, arg, 0);
826 fprintf (dump_file, ".\n");
828 return 1;
833 /* Now optimize (x != 0) ? x + y : y to just y.
834 The following condition is too restrictive, there can easily be another
835 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
836 gimple assign = last_and_only_stmt (middle_bb);
837 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
838 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
839 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
840 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
841 return 0;
843 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
844 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
845 return 0;
847 /* Only transform if it removes the condition. */
848 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
849 return 0;
851 /* Size-wise, this is always profitable. */
852 if (optimize_bb_for_speed_p (cond_bb)
853 /* The special case is useless if it has a low probability. */
854 && profile_status_for_fn (cfun) != PROFILE_ABSENT
855 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
856 /* If assign is cheap, there is no point avoiding it. */
857 && estimate_num_insns (assign, &eni_time_weights)
858 >= 3 * estimate_num_insns (cond, &eni_time_weights))
859 return 0;
861 tree lhs = gimple_assign_lhs (assign);
862 tree rhs1 = gimple_assign_rhs1 (assign);
863 tree rhs2 = gimple_assign_rhs2 (assign);
864 enum tree_code code_def = gimple_assign_rhs_code (assign);
865 tree cond_lhs = gimple_cond_lhs (cond);
866 tree cond_rhs = gimple_cond_rhs (cond);
868 if (((code == NE_EXPR && e1 == false_edge)
869 || (code == EQ_EXPR && e1 == true_edge))
870 && arg0 == lhs
871 && ((arg1 == rhs1
872 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
873 && neutral_element_p (code_def, cond_rhs, true))
874 || (arg1 == rhs2
875 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
876 && neutral_element_p (code_def, cond_rhs, false))
877 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
878 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
879 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
880 && absorbing_element_p (code_def, cond_rhs))))
882 gsi = gsi_for_stmt (cond);
883 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
885 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
886 def-stmt in:
887 if (n_5 != 0)
888 goto <bb 3>;
889 else
890 goto <bb 4>;
892 <bb 3>:
893 # RANGE [0, 4294967294]
894 u_6 = n_5 + 4294967295;
896 <bb 4>:
897 # u_3 = PHI <u_6(3), 4294967295(2)> */
898 SSA_NAME_RANGE_INFO (lhs) = NULL;
899 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
900 /* If available, we can use VR of phi result at least. */
901 tree phires = gimple_phi_result (phi);
902 struct range_info_def *phires_range_info
903 = SSA_NAME_RANGE_INFO (phires);
904 if (phires_range_info)
905 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
906 phires_range_info);
908 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
909 gsi_move_before (&gsi_from, &gsi);
910 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
911 return 2;
914 return 0;
917 /* The function minmax_replacement does the main work of doing the minmax
918 replacement. Return true if the replacement is done. Otherwise return
919 false.
920 BB is the basic block where the replacement is going to be done on. ARG0
921 is argument 0 from the PHI. Likewise for ARG1. */
923 static bool
924 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
925 edge e0, edge e1, gimple phi,
926 tree arg0, tree arg1)
928 tree result, type;
929 gcond *cond;
930 gassign *new_stmt;
931 edge true_edge, false_edge;
932 enum tree_code cmp, minmax, ass_code;
933 tree smaller, larger, arg_true, arg_false;
934 gimple_stmt_iterator gsi, gsi_from;
936 type = TREE_TYPE (PHI_RESULT (phi));
938 /* The optimization may be unsafe due to NaNs. */
939 if (HONOR_NANS (type))
940 return false;
942 cond = as_a <gcond *> (last_stmt (cond_bb));
943 cmp = gimple_cond_code (cond);
945 /* This transformation is only valid for order comparisons. Record which
946 operand is smaller/larger if the result of the comparison is true. */
947 if (cmp == LT_EXPR || cmp == LE_EXPR)
949 smaller = gimple_cond_lhs (cond);
950 larger = gimple_cond_rhs (cond);
952 else if (cmp == GT_EXPR || cmp == GE_EXPR)
954 smaller = gimple_cond_rhs (cond);
955 larger = gimple_cond_lhs (cond);
957 else
958 return false;
960 /* We need to know which is the true edge and which is the false
961 edge so that we know if have abs or negative abs. */
962 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
964 /* Forward the edges over the middle basic block. */
965 if (true_edge->dest == middle_bb)
966 true_edge = EDGE_SUCC (true_edge->dest, 0);
967 if (false_edge->dest == middle_bb)
968 false_edge = EDGE_SUCC (false_edge->dest, 0);
970 if (true_edge == e0)
972 gcc_assert (false_edge == e1);
973 arg_true = arg0;
974 arg_false = arg1;
976 else
978 gcc_assert (false_edge == e0);
979 gcc_assert (true_edge == e1);
980 arg_true = arg1;
981 arg_false = arg0;
984 if (empty_block_p (middle_bb))
986 if (operand_equal_for_phi_arg_p (arg_true, smaller)
987 && operand_equal_for_phi_arg_p (arg_false, larger))
989 /* Case
991 if (smaller < larger)
992 rslt = smaller;
993 else
994 rslt = larger; */
995 minmax = MIN_EXPR;
997 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
998 && operand_equal_for_phi_arg_p (arg_true, larger))
999 minmax = MAX_EXPR;
1000 else
1001 return false;
1003 else
1005 /* Recognize the following case, assuming d <= u:
1007 if (a <= u)
1008 b = MAX (a, d);
1009 x = PHI <b, u>
1011 This is equivalent to
1013 b = MAX (a, d);
1014 x = MIN (b, u); */
1016 gimple assign = last_and_only_stmt (middle_bb);
1017 tree lhs, op0, op1, bound;
1019 if (!assign
1020 || gimple_code (assign) != GIMPLE_ASSIGN)
1021 return false;
1023 lhs = gimple_assign_lhs (assign);
1024 ass_code = gimple_assign_rhs_code (assign);
1025 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1026 return false;
1027 op0 = gimple_assign_rhs1 (assign);
1028 op1 = gimple_assign_rhs2 (assign);
1030 if (true_edge->src == middle_bb)
1032 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1033 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1034 return false;
1036 if (operand_equal_for_phi_arg_p (arg_false, larger))
1038 /* Case
1040 if (smaller < larger)
1042 r' = MAX_EXPR (smaller, bound)
1044 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1045 if (ass_code != MAX_EXPR)
1046 return false;
1048 minmax = MIN_EXPR;
1049 if (operand_equal_for_phi_arg_p (op0, smaller))
1050 bound = op1;
1051 else if (operand_equal_for_phi_arg_p (op1, smaller))
1052 bound = op0;
1053 else
1054 return false;
1056 /* We need BOUND <= LARGER. */
1057 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1058 bound, larger)))
1059 return false;
1061 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1063 /* Case
1065 if (smaller < larger)
1067 r' = MIN_EXPR (larger, bound)
1069 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1070 if (ass_code != MIN_EXPR)
1071 return false;
1073 minmax = MAX_EXPR;
1074 if (operand_equal_for_phi_arg_p (op0, larger))
1075 bound = op1;
1076 else if (operand_equal_for_phi_arg_p (op1, larger))
1077 bound = op0;
1078 else
1079 return false;
1081 /* We need BOUND >= SMALLER. */
1082 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1083 bound, smaller)))
1084 return false;
1086 else
1087 return false;
1089 else
1091 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1092 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1093 return false;
1095 if (operand_equal_for_phi_arg_p (arg_true, larger))
1097 /* Case
1099 if (smaller > larger)
1101 r' = MIN_EXPR (smaller, bound)
1103 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1104 if (ass_code != MIN_EXPR)
1105 return false;
1107 minmax = MAX_EXPR;
1108 if (operand_equal_for_phi_arg_p (op0, smaller))
1109 bound = op1;
1110 else if (operand_equal_for_phi_arg_p (op1, smaller))
1111 bound = op0;
1112 else
1113 return false;
1115 /* We need BOUND >= LARGER. */
1116 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1117 bound, larger)))
1118 return false;
1120 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1122 /* Case
1124 if (smaller > larger)
1126 r' = MAX_EXPR (larger, bound)
1128 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1129 if (ass_code != MAX_EXPR)
1130 return false;
1132 minmax = MIN_EXPR;
1133 if (operand_equal_for_phi_arg_p (op0, larger))
1134 bound = op1;
1135 else if (operand_equal_for_phi_arg_p (op1, larger))
1136 bound = op0;
1137 else
1138 return false;
1140 /* We need BOUND <= SMALLER. */
1141 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1142 bound, smaller)))
1143 return false;
1145 else
1146 return false;
1149 /* Move the statement from the middle block. */
1150 gsi = gsi_last_bb (cond_bb);
1151 gsi_from = gsi_last_nondebug_bb (middle_bb);
1152 gsi_move_before (&gsi_from, &gsi);
1155 /* Emit the statement to compute min/max. */
1156 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1157 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1158 gsi = gsi_last_bb (cond_bb);
1159 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1161 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1162 return true;
1165 /* The function absolute_replacement does the main work of doing the absolute
1166 replacement. Return true if the replacement is done. Otherwise return
1167 false.
1168 bb is the basic block where the replacement is going to be done on. arg0
1169 is argument 0 from the phi. Likewise for arg1. */
1171 static bool
1172 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1173 edge e0 ATTRIBUTE_UNUSED, edge e1,
1174 gimple phi, tree arg0, tree arg1)
1176 tree result;
1177 gassign *new_stmt;
1178 gimple cond;
1179 gimple_stmt_iterator gsi;
1180 edge true_edge, false_edge;
1181 gimple assign;
1182 edge e;
1183 tree rhs, lhs;
1184 bool negate;
1185 enum tree_code cond_code;
1187 /* If the type says honor signed zeros we cannot do this
1188 optimization. */
1189 if (HONOR_SIGNED_ZEROS (arg1))
1190 return false;
1192 /* OTHER_BLOCK must have only one executable statement which must have the
1193 form arg0 = -arg1 or arg1 = -arg0. */
1195 assign = last_and_only_stmt (middle_bb);
1196 /* If we did not find the proper negation assignment, then we can not
1197 optimize. */
1198 if (assign == NULL)
1199 return false;
1201 /* If we got here, then we have found the only executable statement
1202 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1203 arg1 = -arg0, then we can not optimize. */
1204 if (gimple_code (assign) != GIMPLE_ASSIGN)
1205 return false;
1207 lhs = gimple_assign_lhs (assign);
1209 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1210 return false;
1212 rhs = gimple_assign_rhs1 (assign);
1214 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1215 if (!(lhs == arg0 && rhs == arg1)
1216 && !(lhs == arg1 && rhs == arg0))
1217 return false;
1219 cond = last_stmt (cond_bb);
1220 result = PHI_RESULT (phi);
1222 /* Only relationals comparing arg[01] against zero are interesting. */
1223 cond_code = gimple_cond_code (cond);
1224 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1225 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1226 return false;
1228 /* Make sure the conditional is arg[01] OP y. */
1229 if (gimple_cond_lhs (cond) != rhs)
1230 return false;
1232 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1233 ? real_zerop (gimple_cond_rhs (cond))
1234 : integer_zerop (gimple_cond_rhs (cond)))
1236 else
1237 return false;
1239 /* We need to know which is the true edge and which is the false
1240 edge so that we know if have abs or negative abs. */
1241 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1243 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1244 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1245 the false edge goes to OTHER_BLOCK. */
1246 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1247 e = true_edge;
1248 else
1249 e = false_edge;
1251 if (e->dest == middle_bb)
1252 negate = true;
1253 else
1254 negate = false;
1256 result = duplicate_ssa_name (result, NULL);
1258 if (negate)
1259 lhs = make_ssa_name (TREE_TYPE (result));
1260 else
1261 lhs = result;
1263 /* Build the modify expression with abs expression. */
1264 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1266 gsi = gsi_last_bb (cond_bb);
1267 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1269 if (negate)
1271 /* Get the right GSI. We want to insert after the recently
1272 added ABS_EXPR statement (which we know is the first statement
1273 in the block. */
1274 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1276 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1279 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1281 /* Note that we optimized this PHI. */
1282 return true;
1285 /* Auxiliary functions to determine the set of memory accesses which
1286 can't trap because they are preceded by accesses to the same memory
1287 portion. We do that for MEM_REFs, so we only need to track
1288 the SSA_NAME of the pointer indirectly referenced. The algorithm
1289 simply is a walk over all instructions in dominator order. When
1290 we see an MEM_REF we determine if we've already seen a same
1291 ref anywhere up to the root of the dominator tree. If we do the
1292 current access can't trap. If we don't see any dominating access
1293 the current access might trap, but might also make later accesses
1294 non-trapping, so we remember it. We need to be careful with loads
1295 or stores, for instance a load might not trap, while a store would,
1296 so if we see a dominating read access this doesn't mean that a later
1297 write access would not trap. Hence we also need to differentiate the
1298 type of access(es) seen.
1300 ??? We currently are very conservative and assume that a load might
1301 trap even if a store doesn't (write-only memory). This probably is
1302 overly conservative. */
1304 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1305 through it was seen, which would constitute a no-trap region for
1306 same accesses. */
1307 struct name_to_bb
1309 unsigned int ssa_name_ver;
1310 unsigned int phase;
1311 bool store;
1312 HOST_WIDE_INT offset, size;
1313 basic_block bb;
1316 /* Hashtable helpers. */
1318 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1320 typedef name_to_bb *value_type;
1321 typedef name_to_bb *compare_type;
1322 static inline hashval_t hash (const name_to_bb *);
1323 static inline bool equal (const name_to_bb *, const name_to_bb *);
1326 /* Used for quick clearing of the hash-table when we see calls.
1327 Hash entries with phase < nt_call_phase are invalid. */
1328 static unsigned int nt_call_phase;
1330 /* The hash function. */
1332 inline hashval_t
1333 ssa_names_hasher::hash (const name_to_bb *n)
1335 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1336 ^ (n->offset << 6) ^ (n->size << 3);
1339 /* The equality function of *P1 and *P2. */
1341 inline bool
1342 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1344 return n1->ssa_name_ver == n2->ssa_name_ver
1345 && n1->store == n2->store
1346 && n1->offset == n2->offset
1347 && n1->size == n2->size;
1350 class nontrapping_dom_walker : public dom_walker
1352 public:
1353 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1354 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1356 virtual void before_dom_children (basic_block);
1357 virtual void after_dom_children (basic_block);
1359 private:
1361 /* We see the expression EXP in basic block BB. If it's an interesting
1362 expression (an MEM_REF through an SSA_NAME) possibly insert the
1363 expression into the set NONTRAP or the hash table of seen expressions.
1364 STORE is true if this expression is on the LHS, otherwise it's on
1365 the RHS. */
1366 void add_or_mark_expr (basic_block, tree, bool);
1368 hash_set<tree> *m_nontrapping;
1370 /* The hash table for remembering what we've seen. */
1371 hash_table<ssa_names_hasher> m_seen_ssa_names;
1374 /* Called by walk_dominator_tree, when entering the block BB. */
1375 void
1376 nontrapping_dom_walker::before_dom_children (basic_block bb)
1378 edge e;
1379 edge_iterator ei;
1380 gimple_stmt_iterator gsi;
1382 /* If we haven't seen all our predecessors, clear the hash-table. */
1383 FOR_EACH_EDGE (e, ei, bb->preds)
1384 if ((((size_t)e->src->aux) & 2) == 0)
1386 nt_call_phase++;
1387 break;
1390 /* Mark this BB as being on the path to dominator root and as visited. */
1391 bb->aux = (void*)(1 | 2);
1393 /* And walk the statements in order. */
1394 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1396 gimple stmt = gsi_stmt (gsi);
1398 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1399 nt_call_phase++;
1400 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1402 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1403 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1408 /* Called by walk_dominator_tree, when basic block BB is exited. */
1409 void
1410 nontrapping_dom_walker::after_dom_children (basic_block bb)
1412 /* This BB isn't on the path to dominator root anymore. */
1413 bb->aux = (void*)2;
1416 /* We see the expression EXP in basic block BB. If it's an interesting
1417 expression (an MEM_REF through an SSA_NAME) possibly insert the
1418 expression into the set NONTRAP or the hash table of seen expressions.
1419 STORE is true if this expression is on the LHS, otherwise it's on
1420 the RHS. */
1421 void
1422 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1424 HOST_WIDE_INT size;
1426 if (TREE_CODE (exp) == MEM_REF
1427 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1428 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1429 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1431 tree name = TREE_OPERAND (exp, 0);
1432 struct name_to_bb map;
1433 name_to_bb **slot;
1434 struct name_to_bb *n2bb;
1435 basic_block found_bb = 0;
1437 /* Try to find the last seen MEM_REF through the same
1438 SSA_NAME, which can trap. */
1439 map.ssa_name_ver = SSA_NAME_VERSION (name);
1440 map.phase = 0;
1441 map.bb = 0;
1442 map.store = store;
1443 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1444 map.size = size;
1446 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1447 n2bb = *slot;
1448 if (n2bb && n2bb->phase >= nt_call_phase)
1449 found_bb = n2bb->bb;
1451 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1452 (it's in a basic block on the path from us to the dominator root)
1453 then we can't trap. */
1454 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1456 m_nontrapping->add (exp);
1458 else
1460 /* EXP might trap, so insert it into the hash table. */
1461 if (n2bb)
1463 n2bb->phase = nt_call_phase;
1464 n2bb->bb = bb;
1466 else
1468 n2bb = XNEW (struct name_to_bb);
1469 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1470 n2bb->phase = nt_call_phase;
1471 n2bb->bb = bb;
1472 n2bb->store = store;
1473 n2bb->offset = map.offset;
1474 n2bb->size = size;
1475 *slot = n2bb;
1481 /* This is the entry point of gathering non trapping memory accesses.
1482 It will do a dominator walk over the whole function, and it will
1483 make use of the bb->aux pointers. It returns a set of trees
1484 (the MEM_REFs itself) which can't trap. */
1485 static hash_set<tree> *
1486 get_non_trapping (void)
1488 nt_call_phase = 0;
1489 hash_set<tree> *nontrap = new hash_set<tree>;
1490 /* We're going to do a dominator walk, so ensure that we have
1491 dominance information. */
1492 calculate_dominance_info (CDI_DOMINATORS);
1494 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1495 .walk (cfun->cfg->x_entry_block_ptr);
1497 clear_aux_for_blocks ();
1498 return nontrap;
1501 /* Do the main work of conditional store replacement. We already know
1502 that the recognized pattern looks like so:
1504 split:
1505 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1506 MIDDLE_BB:
1507 something
1508 fallthrough (edge E0)
1509 JOIN_BB:
1510 some more
1512 We check that MIDDLE_BB contains only one store, that that store
1513 doesn't trap (not via NOTRAP, but via checking if an access to the same
1514 memory location dominates us) and that the store has a "simple" RHS. */
1516 static bool
1517 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1518 edge e0, edge e1, hash_set<tree> *nontrap)
1520 gimple assign = last_and_only_stmt (middle_bb);
1521 tree lhs, rhs, name, name2;
1522 gphi *newphi;
1523 gassign *new_stmt;
1524 gimple_stmt_iterator gsi;
1525 source_location locus;
1527 /* Check if middle_bb contains of only one store. */
1528 if (!assign
1529 || !gimple_assign_single_p (assign)
1530 || gimple_has_volatile_ops (assign))
1531 return false;
1533 locus = gimple_location (assign);
1534 lhs = gimple_assign_lhs (assign);
1535 rhs = gimple_assign_rhs1 (assign);
1536 if (TREE_CODE (lhs) != MEM_REF
1537 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1538 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1539 return false;
1541 /* Prove that we can move the store down. We could also check
1542 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1543 whose value is not available readily, which we want to avoid. */
1544 if (!nontrap->contains (lhs))
1545 return false;
1547 /* Now we've checked the constraints, so do the transformation:
1548 1) Remove the single store. */
1549 gsi = gsi_for_stmt (assign);
1550 unlink_stmt_vdef (assign);
1551 gsi_remove (&gsi, true);
1552 release_defs (assign);
1554 /* 2) Insert a load from the memory of the store to the temporary
1555 on the edge which did not contain the store. */
1556 lhs = unshare_expr (lhs);
1557 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1558 new_stmt = gimple_build_assign (name, lhs);
1559 gimple_set_location (new_stmt, locus);
1560 gsi_insert_on_edge (e1, new_stmt);
1562 /* 3) Create a PHI node at the join block, with one argument
1563 holding the old RHS, and the other holding the temporary
1564 where we stored the old memory contents. */
1565 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1566 newphi = create_phi_node (name2, join_bb);
1567 add_phi_arg (newphi, rhs, e0, locus);
1568 add_phi_arg (newphi, name, e1, locus);
1570 lhs = unshare_expr (lhs);
1571 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1573 /* 4) Insert that PHI node. */
1574 gsi = gsi_after_labels (join_bb);
1575 if (gsi_end_p (gsi))
1577 gsi = gsi_last_bb (join_bb);
1578 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1580 else
1581 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1583 return true;
1586 /* Do the main work of conditional store replacement. */
1588 static bool
1589 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1590 basic_block join_bb, gimple then_assign,
1591 gimple else_assign)
1593 tree lhs_base, lhs, then_rhs, else_rhs, name;
1594 source_location then_locus, else_locus;
1595 gimple_stmt_iterator gsi;
1596 gphi *newphi;
1597 gassign *new_stmt;
1599 if (then_assign == NULL
1600 || !gimple_assign_single_p (then_assign)
1601 || gimple_clobber_p (then_assign)
1602 || gimple_has_volatile_ops (then_assign)
1603 || else_assign == NULL
1604 || !gimple_assign_single_p (else_assign)
1605 || gimple_clobber_p (else_assign)
1606 || gimple_has_volatile_ops (else_assign))
1607 return false;
1609 lhs = gimple_assign_lhs (then_assign);
1610 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1611 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1612 return false;
1614 lhs_base = get_base_address (lhs);
1615 if (lhs_base == NULL_TREE
1616 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1617 return false;
1619 then_rhs = gimple_assign_rhs1 (then_assign);
1620 else_rhs = gimple_assign_rhs1 (else_assign);
1621 then_locus = gimple_location (then_assign);
1622 else_locus = gimple_location (else_assign);
1624 /* Now we've checked the constraints, so do the transformation:
1625 1) Remove the stores. */
1626 gsi = gsi_for_stmt (then_assign);
1627 unlink_stmt_vdef (then_assign);
1628 gsi_remove (&gsi, true);
1629 release_defs (then_assign);
1631 gsi = gsi_for_stmt (else_assign);
1632 unlink_stmt_vdef (else_assign);
1633 gsi_remove (&gsi, true);
1634 release_defs (else_assign);
1636 /* 2) Create a PHI node at the join block, with one argument
1637 holding the old RHS, and the other holding the temporary
1638 where we stored the old memory contents. */
1639 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1640 newphi = create_phi_node (name, join_bb);
1641 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1642 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1644 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1646 /* 3) Insert that PHI node. */
1647 gsi = gsi_after_labels (join_bb);
1648 if (gsi_end_p (gsi))
1650 gsi = gsi_last_bb (join_bb);
1651 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1653 else
1654 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1656 return true;
1659 /* Conditional store replacement. We already know
1660 that the recognized pattern looks like so:
1662 split:
1663 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1664 THEN_BB:
1666 X = Y;
1668 goto JOIN_BB;
1669 ELSE_BB:
1671 X = Z;
1673 fallthrough (edge E0)
1674 JOIN_BB:
1675 some more
1677 We check that it is safe to sink the store to JOIN_BB by verifying that
1678 there are no read-after-write or write-after-write dependencies in
1679 THEN_BB and ELSE_BB. */
1681 static bool
1682 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1683 basic_block join_bb)
1685 gimple then_assign = last_and_only_stmt (then_bb);
1686 gimple else_assign = last_and_only_stmt (else_bb);
1687 vec<data_reference_p> then_datarefs, else_datarefs;
1688 vec<ddr_p> then_ddrs, else_ddrs;
1689 gimple then_store, else_store;
1690 bool found, ok = false, res;
1691 struct data_dependence_relation *ddr;
1692 data_reference_p then_dr, else_dr;
1693 int i, j;
1694 tree then_lhs, else_lhs;
1695 basic_block blocks[3];
1697 if (MAX_STORES_TO_SINK == 0)
1698 return false;
1700 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1701 if (then_assign && else_assign)
1702 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1703 then_assign, else_assign);
1705 /* Find data references. */
1706 then_datarefs.create (1);
1707 else_datarefs.create (1);
1708 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1709 == chrec_dont_know)
1710 || !then_datarefs.length ()
1711 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1712 == chrec_dont_know)
1713 || !else_datarefs.length ())
1715 free_data_refs (then_datarefs);
1716 free_data_refs (else_datarefs);
1717 return false;
1720 /* Find pairs of stores with equal LHS. */
1721 auto_vec<gimple, 1> then_stores, else_stores;
1722 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1724 if (DR_IS_READ (then_dr))
1725 continue;
1727 then_store = DR_STMT (then_dr);
1728 then_lhs = gimple_get_lhs (then_store);
1729 if (then_lhs == NULL_TREE)
1730 continue;
1731 found = false;
1733 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1735 if (DR_IS_READ (else_dr))
1736 continue;
1738 else_store = DR_STMT (else_dr);
1739 else_lhs = gimple_get_lhs (else_store);
1740 if (else_lhs == NULL_TREE)
1741 continue;
1743 if (operand_equal_p (then_lhs, else_lhs, 0))
1745 found = true;
1746 break;
1750 if (!found)
1751 continue;
1753 then_stores.safe_push (then_store);
1754 else_stores.safe_push (else_store);
1757 /* No pairs of stores found. */
1758 if (!then_stores.length ()
1759 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1761 free_data_refs (then_datarefs);
1762 free_data_refs (else_datarefs);
1763 return false;
1766 /* Compute and check data dependencies in both basic blocks. */
1767 then_ddrs.create (1);
1768 else_ddrs.create (1);
1769 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1770 vNULL, false)
1771 || !compute_all_dependences (else_datarefs, &else_ddrs,
1772 vNULL, false))
1774 free_dependence_relations (then_ddrs);
1775 free_dependence_relations (else_ddrs);
1776 free_data_refs (then_datarefs);
1777 free_data_refs (else_datarefs);
1778 return false;
1780 blocks[0] = then_bb;
1781 blocks[1] = else_bb;
1782 blocks[2] = join_bb;
1783 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1785 /* Check that there are no read-after-write or write-after-write dependencies
1786 in THEN_BB. */
1787 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1789 struct data_reference *dra = DDR_A (ddr);
1790 struct data_reference *drb = DDR_B (ddr);
1792 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1793 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1794 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1795 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1796 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1797 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1799 free_dependence_relations (then_ddrs);
1800 free_dependence_relations (else_ddrs);
1801 free_data_refs (then_datarefs);
1802 free_data_refs (else_datarefs);
1803 return false;
1807 /* Check that there are no read-after-write or write-after-write dependencies
1808 in ELSE_BB. */
1809 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1811 struct data_reference *dra = DDR_A (ddr);
1812 struct data_reference *drb = DDR_B (ddr);
1814 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1815 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1816 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1817 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1818 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1819 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1821 free_dependence_relations (then_ddrs);
1822 free_dependence_relations (else_ddrs);
1823 free_data_refs (then_datarefs);
1824 free_data_refs (else_datarefs);
1825 return false;
1829 /* Sink stores with same LHS. */
1830 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1832 else_store = else_stores[i];
1833 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1834 then_store, else_store);
1835 ok = ok || res;
1838 free_dependence_relations (then_ddrs);
1839 free_dependence_relations (else_ddrs);
1840 free_data_refs (then_datarefs);
1841 free_data_refs (else_datarefs);
1843 return ok;
1846 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1848 static bool
1849 local_mem_dependence (gimple stmt, basic_block bb)
1851 tree vuse = gimple_vuse (stmt);
1852 gimple def;
1854 if (!vuse)
1855 return false;
1857 def = SSA_NAME_DEF_STMT (vuse);
1858 return (def && gimple_bb (def) == bb);
1861 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1862 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1863 and BB3 rejoins control flow following BB1 and BB2, look for
1864 opportunities to hoist loads as follows. If BB3 contains a PHI of
1865 two loads, one each occurring in BB1 and BB2, and the loads are
1866 provably of adjacent fields in the same structure, then move both
1867 loads into BB0. Of course this can only be done if there are no
1868 dependencies preventing such motion.
1870 One of the hoisted loads will always be speculative, so the
1871 transformation is currently conservative:
1873 - The fields must be strictly adjacent.
1874 - The two fields must occupy a single memory block that is
1875 guaranteed to not cross a page boundary.
1877 The last is difficult to prove, as such memory blocks should be
1878 aligned on the minimum of the stack alignment boundary and the
1879 alignment guaranteed by heap allocation interfaces. Thus we rely
1880 on a parameter for the alignment value.
1882 Provided a good value is used for the last case, the first
1883 restriction could possibly be relaxed. */
1885 static void
1886 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
1887 basic_block bb2, basic_block bb3)
1889 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
1890 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
1891 gphi_iterator gsi;
1893 /* Walk the phis in bb3 looking for an opportunity. We are looking
1894 for phis of two SSA names, one each of which is defined in bb1 and
1895 bb2. */
1896 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
1898 gphi *phi_stmt = gsi.phi ();
1899 gimple def1, def2;
1900 tree arg1, arg2, ref1, ref2, field1, field2;
1901 tree tree_offset1, tree_offset2, tree_size2, next;
1902 int offset1, offset2, size2;
1903 unsigned align1;
1904 gimple_stmt_iterator gsi2;
1905 basic_block bb_for_def1, bb_for_def2;
1907 if (gimple_phi_num_args (phi_stmt) != 2
1908 || virtual_operand_p (gimple_phi_result (phi_stmt)))
1909 continue;
1911 arg1 = gimple_phi_arg_def (phi_stmt, 0);
1912 arg2 = gimple_phi_arg_def (phi_stmt, 1);
1914 if (TREE_CODE (arg1) != SSA_NAME
1915 || TREE_CODE (arg2) != SSA_NAME
1916 || SSA_NAME_IS_DEFAULT_DEF (arg1)
1917 || SSA_NAME_IS_DEFAULT_DEF (arg2))
1918 continue;
1920 def1 = SSA_NAME_DEF_STMT (arg1);
1921 def2 = SSA_NAME_DEF_STMT (arg2);
1923 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
1924 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
1925 continue;
1927 /* Check the mode of the arguments to be sure a conditional move
1928 can be generated for it. */
1929 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
1930 == CODE_FOR_nothing)
1931 continue;
1933 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
1934 if (!gimple_assign_single_p (def1)
1935 || !gimple_assign_single_p (def2)
1936 || gimple_has_volatile_ops (def1)
1937 || gimple_has_volatile_ops (def2))
1938 continue;
1940 ref1 = gimple_assign_rhs1 (def1);
1941 ref2 = gimple_assign_rhs1 (def2);
1943 if (TREE_CODE (ref1) != COMPONENT_REF
1944 || TREE_CODE (ref2) != COMPONENT_REF)
1945 continue;
1947 /* The zeroth operand of the two component references must be
1948 identical. It is not sufficient to compare get_base_address of
1949 the two references, because this could allow for different
1950 elements of the same array in the two trees. It is not safe to
1951 assume that the existence of one array element implies the
1952 existence of a different one. */
1953 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
1954 continue;
1956 field1 = TREE_OPERAND (ref1, 1);
1957 field2 = TREE_OPERAND (ref2, 1);
1959 /* Check for field adjacency, and ensure field1 comes first. */
1960 for (next = DECL_CHAIN (field1);
1961 next && TREE_CODE (next) != FIELD_DECL;
1962 next = DECL_CHAIN (next))
1965 if (next != field2)
1967 for (next = DECL_CHAIN (field2);
1968 next && TREE_CODE (next) != FIELD_DECL;
1969 next = DECL_CHAIN (next))
1972 if (next != field1)
1973 continue;
1975 std::swap (field1, field2);
1976 std::swap (def1, def2);
1979 bb_for_def1 = gimple_bb (def1);
1980 bb_for_def2 = gimple_bb (def2);
1982 /* Check for proper alignment of the first field. */
1983 tree_offset1 = bit_position (field1);
1984 tree_offset2 = bit_position (field2);
1985 tree_size2 = DECL_SIZE (field2);
1987 if (!tree_fits_uhwi_p (tree_offset1)
1988 || !tree_fits_uhwi_p (tree_offset2)
1989 || !tree_fits_uhwi_p (tree_size2))
1990 continue;
1992 offset1 = tree_to_uhwi (tree_offset1);
1993 offset2 = tree_to_uhwi (tree_offset2);
1994 size2 = tree_to_uhwi (tree_size2);
1995 align1 = DECL_ALIGN (field1) % param_align_bits;
1997 if (offset1 % BITS_PER_UNIT != 0)
1998 continue;
2000 /* For profitability, the two field references should fit within
2001 a single cache line. */
2002 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2003 continue;
2005 /* The two expressions cannot be dependent upon vdefs defined
2006 in bb1/bb2. */
2007 if (local_mem_dependence (def1, bb_for_def1)
2008 || local_mem_dependence (def2, bb_for_def2))
2009 continue;
2011 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2012 bb0. We hoist the first one first so that a cache miss is handled
2013 efficiently regardless of hardware cache-fill policy. */
2014 gsi2 = gsi_for_stmt (def1);
2015 gsi_move_to_bb_end (&gsi2, bb0);
2016 gsi2 = gsi_for_stmt (def2);
2017 gsi_move_to_bb_end (&gsi2, bb0);
2019 if (dump_file && (dump_flags & TDF_DETAILS))
2021 fprintf (dump_file,
2022 "\nHoisting adjacent loads from %d and %d into %d: \n",
2023 bb_for_def1->index, bb_for_def2->index, bb0->index);
2024 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2025 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2030 /* Determine whether we should attempt to hoist adjacent loads out of
2031 diamond patterns in pass_phiopt. Always hoist loads if
2032 -fhoist-adjacent-loads is specified and the target machine has
2033 both a conditional move instruction and a defined cache line size. */
2035 static bool
2036 gate_hoist_loads (void)
2038 return (flag_hoist_adjacent_loads == 1
2039 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2040 && HAVE_conditional_move);
2043 /* This pass tries to replaces an if-then-else block with an
2044 assignment. We have four kinds of transformations. Some of these
2045 transformations are also performed by the ifcvt RTL optimizer.
2047 Conditional Replacement
2048 -----------------------
2050 This transformation, implemented in conditional_replacement,
2051 replaces
2053 bb0:
2054 if (cond) goto bb2; else goto bb1;
2055 bb1:
2056 bb2:
2057 x = PHI <0 (bb1), 1 (bb0), ...>;
2059 with
2061 bb0:
2062 x' = cond;
2063 goto bb2;
2064 bb2:
2065 x = PHI <x' (bb0), ...>;
2067 We remove bb1 as it becomes unreachable. This occurs often due to
2068 gimplification of conditionals.
2070 Value Replacement
2071 -----------------
2073 This transformation, implemented in value_replacement, replaces
2075 bb0:
2076 if (a != b) goto bb2; else goto bb1;
2077 bb1:
2078 bb2:
2079 x = PHI <a (bb1), b (bb0), ...>;
2081 with
2083 bb0:
2084 bb2:
2085 x = PHI <b (bb0), ...>;
2087 This opportunity can sometimes occur as a result of other
2088 optimizations.
2091 Another case caught by value replacement looks like this:
2093 bb0:
2094 t1 = a == CONST;
2095 t2 = b > c;
2096 t3 = t1 & t2;
2097 if (t3 != 0) goto bb1; else goto bb2;
2098 bb1:
2099 bb2:
2100 x = PHI (CONST, a)
2102 Gets replaced with:
2103 bb0:
2104 bb2:
2105 t1 = a == CONST;
2106 t2 = b > c;
2107 t3 = t1 & t2;
2108 x = a;
2110 ABS Replacement
2111 ---------------
2113 This transformation, implemented in abs_replacement, replaces
2115 bb0:
2116 if (a >= 0) goto bb2; else goto bb1;
2117 bb1:
2118 x = -a;
2119 bb2:
2120 x = PHI <x (bb1), a (bb0), ...>;
2122 with
2124 bb0:
2125 x' = ABS_EXPR< a >;
2126 bb2:
2127 x = PHI <x' (bb0), ...>;
2129 MIN/MAX Replacement
2130 -------------------
2132 This transformation, minmax_replacement replaces
2134 bb0:
2135 if (a <= b) goto bb2; else goto bb1;
2136 bb1:
2137 bb2:
2138 x = PHI <b (bb1), a (bb0), ...>;
2140 with
2142 bb0:
2143 x' = MIN_EXPR (a, b)
2144 bb2:
2145 x = PHI <x' (bb0), ...>;
2147 A similar transformation is done for MAX_EXPR.
2150 This pass also performs a fifth transformation of a slightly different
2151 flavor.
2153 Adjacent Load Hoisting
2154 ----------------------
2156 This transformation replaces
2158 bb0:
2159 if (...) goto bb2; else goto bb1;
2160 bb1:
2161 x1 = (<expr>).field1;
2162 goto bb3;
2163 bb2:
2164 x2 = (<expr>).field2;
2165 bb3:
2166 # x = PHI <x1, x2>;
2168 with
2170 bb0:
2171 x1 = (<expr>).field1;
2172 x2 = (<expr>).field2;
2173 if (...) goto bb2; else goto bb1;
2174 bb1:
2175 goto bb3;
2176 bb2:
2177 bb3:
2178 # x = PHI <x1, x2>;
2180 The purpose of this transformation is to enable generation of conditional
2181 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2182 the loads is speculative, the transformation is restricted to very
2183 specific cases to avoid introducing a page fault. We are looking for
2184 the common idiom:
2186 if (...)
2187 x = y->left;
2188 else
2189 x = y->right;
2191 where left and right are typically adjacent pointers in a tree structure. */
2193 namespace {
2195 const pass_data pass_data_phiopt =
2197 GIMPLE_PASS, /* type */
2198 "phiopt", /* name */
2199 OPTGROUP_NONE, /* optinfo_flags */
2200 TV_TREE_PHIOPT, /* tv_id */
2201 ( PROP_cfg | PROP_ssa ), /* properties_required */
2202 0, /* properties_provided */
2203 0, /* properties_destroyed */
2204 0, /* todo_flags_start */
2205 0, /* todo_flags_finish */
2208 class pass_phiopt : public gimple_opt_pass
2210 public:
2211 pass_phiopt (gcc::context *ctxt)
2212 : gimple_opt_pass (pass_data_phiopt, ctxt)
2215 /* opt_pass methods: */
2216 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2217 virtual bool gate (function *) { return flag_ssa_phiopt; }
2218 virtual unsigned int execute (function *)
2220 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2223 }; // class pass_phiopt
2225 } // anon namespace
2227 gimple_opt_pass *
2228 make_pass_phiopt (gcc::context *ctxt)
2230 return new pass_phiopt (ctxt);
2233 namespace {
2235 const pass_data pass_data_cselim =
2237 GIMPLE_PASS, /* type */
2238 "cselim", /* name */
2239 OPTGROUP_NONE, /* optinfo_flags */
2240 TV_TREE_PHIOPT, /* tv_id */
2241 ( PROP_cfg | PROP_ssa ), /* properties_required */
2242 0, /* properties_provided */
2243 0, /* properties_destroyed */
2244 0, /* todo_flags_start */
2245 0, /* todo_flags_finish */
2248 class pass_cselim : public gimple_opt_pass
2250 public:
2251 pass_cselim (gcc::context *ctxt)
2252 : gimple_opt_pass (pass_data_cselim, ctxt)
2255 /* opt_pass methods: */
2256 virtual bool gate (function *) { return flag_tree_cselim; }
2257 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2259 }; // class pass_cselim
2261 } // anon namespace
2263 gimple_opt_pass *
2264 make_pass_cselim (gcc::context *ctxt)
2266 return new pass_cselim (ctxt);