libgo: add misc/cgo files
[official-gcc.git] / gcc / tree-ssa-reassoc.c
blob35eb72ce31051ac79dfb1e2fd81d95e6626a599f
1 /* Reassociation for trees.
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "alloc-pool.h"
31 #include "tree-pass.h"
32 #include "memmodel.h"
33 #include "tm_p.h"
34 #include "ssa.h"
35 #include "optabs-tree.h"
36 #include "gimple-pretty-print.h"
37 #include "diagnostic-core.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
40 #include "cfganal.h"
41 #include "gimple-fold.h"
42 #include "tree-eh.h"
43 #include "gimple-iterator.h"
44 #include "gimplify-me.h"
45 #include "tree-cfg.h"
46 #include "tree-ssa-loop.h"
47 #include "flags.h"
48 #include "tree-ssa.h"
49 #include "langhooks.h"
50 #include "cfgloop.h"
51 #include "params.h"
52 #include "builtins.h"
53 #include "gimplify.h"
54 #include "case-cfn-macros.h"
56 /* This is a simple global reassociation pass. It is, in part, based
57 on the LLVM pass of the same name (They do some things more/less
58 than we do, in different orders, etc).
60 It consists of five steps:
62 1. Breaking up subtract operations into addition + negate, where
63 it would promote the reassociation of adds.
65 2. Left linearization of the expression trees, so that (A+B)+(C+D)
66 becomes (((A+B)+C)+D), which is easier for us to rewrite later.
67 During linearization, we place the operands of the binary
68 expressions into a vector of operand_entry_*
70 3. Optimization of the operand lists, eliminating things like a +
71 -a, a & a, etc.
73 3a. Combine repeated factors with the same occurrence counts
74 into a __builtin_powi call that will later be optimized into
75 an optimal number of multiplies.
77 4. Rewrite the expression trees we linearized and optimized so
78 they are in proper rank order.
80 5. Repropagate negates, as nothing else will clean it up ATM.
82 A bit of theory on #4, since nobody seems to write anything down
83 about why it makes sense to do it the way they do it:
85 We could do this much nicer theoretically, but don't (for reasons
86 explained after how to do it theoretically nice :P).
88 In order to promote the most redundancy elimination, you want
89 binary expressions whose operands are the same rank (or
90 preferably, the same value) exposed to the redundancy eliminator,
91 for possible elimination.
93 So the way to do this if we really cared, is to build the new op
94 tree from the leaves to the roots, merging as you go, and putting the
95 new op on the end of the worklist, until you are left with one
96 thing on the worklist.
98 IE if you have to rewrite the following set of operands (listed with
99 rank in parentheses), with opcode PLUS_EXPR:
101 a (1), b (1), c (1), d (2), e (2)
104 We start with our merge worklist empty, and the ops list with all of
105 those on it.
107 You want to first merge all leaves of the same rank, as much as
108 possible.
110 So first build a binary op of
112 mergetmp = a + b, and put "mergetmp" on the merge worklist.
114 Because there is no three operand form of PLUS_EXPR, c is not going to
115 be exposed to redundancy elimination as a rank 1 operand.
117 So you might as well throw it on the merge worklist (you could also
118 consider it to now be a rank two operand, and merge it with d and e,
119 but in this case, you then have evicted e from a binary op. So at
120 least in this situation, you can't win.)
122 Then build a binary op of d + e
123 mergetmp2 = d + e
125 and put mergetmp2 on the merge worklist.
127 so merge worklist = {mergetmp, c, mergetmp2}
129 Continue building binary ops of these operations until you have only
130 one operation left on the worklist.
132 So we have
134 build binary op
135 mergetmp3 = mergetmp + c
137 worklist = {mergetmp2, mergetmp3}
139 mergetmp4 = mergetmp2 + mergetmp3
141 worklist = {mergetmp4}
143 because we have one operation left, we can now just set the original
144 statement equal to the result of that operation.
146 This will at least expose a + b and d + e to redundancy elimination
147 as binary operations.
149 For extra points, you can reuse the old statements to build the
150 mergetmps, since you shouldn't run out.
152 So why don't we do this?
154 Because it's expensive, and rarely will help. Most trees we are
155 reassociating have 3 or less ops. If they have 2 ops, they already
156 will be written into a nice single binary op. If you have 3 ops, a
157 single simple check suffices to tell you whether the first two are of the
158 same rank. If so, you know to order it
160 mergetmp = op1 + op2
161 newstmt = mergetmp + op3
163 instead of
164 mergetmp = op2 + op3
165 newstmt = mergetmp + op1
167 If all three are of the same rank, you can't expose them all in a
168 single binary operator anyway, so the above is *still* the best you
169 can do.
171 Thus, this is what we do. When we have three ops left, we check to see
172 what order to put them in, and call it a day. As a nod to vector sum
173 reduction, we check if any of the ops are really a phi node that is a
174 destructive update for the associating op, and keep the destructive
175 update together for vector sum reduction recognition. */
177 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
178 point 3a in the pass header comment. */
179 static bool reassoc_insert_powi_p;
181 /* Statistics */
182 static struct
184 int linearized;
185 int constants_eliminated;
186 int ops_eliminated;
187 int rewritten;
188 int pows_encountered;
189 int pows_created;
190 } reassociate_stats;
192 /* Operator, rank pair. */
193 struct operand_entry
195 unsigned int rank;
196 unsigned int id;
197 tree op;
198 unsigned int count;
199 gimple *stmt_to_insert;
202 static object_allocator<operand_entry> operand_entry_pool
203 ("operand entry pool");
205 /* This is used to assign a unique ID to each struct operand_entry
206 so that qsort results are identical on different hosts. */
207 static unsigned int next_operand_entry_id;
209 /* Starting rank number for a given basic block, so that we can rank
210 operations using unmovable instructions in that BB based on the bb
211 depth. */
212 static long *bb_rank;
214 /* Operand->rank hashtable. */
215 static hash_map<tree, long> *operand_rank;
217 /* Vector of SSA_NAMEs on which after reassociate_bb is done with
218 all basic blocks the CFG should be adjusted - basic blocks
219 split right after that SSA_NAME's definition statement and before
220 the only use, which must be a bit ior. */
221 static vec<tree> reassoc_branch_fixups;
223 /* Forward decls. */
224 static long get_rank (tree);
225 static bool reassoc_stmt_dominates_stmt_p (gimple *, gimple *);
227 /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts
228 possibly added by gsi_remove. */
230 bool
231 reassoc_remove_stmt (gimple_stmt_iterator *gsi)
233 gimple *stmt = gsi_stmt (*gsi);
235 if (!MAY_HAVE_DEBUG_STMTS || gimple_code (stmt) == GIMPLE_PHI)
236 return gsi_remove (gsi, true);
238 gimple_stmt_iterator prev = *gsi;
239 gsi_prev (&prev);
240 unsigned uid = gimple_uid (stmt);
241 basic_block bb = gimple_bb (stmt);
242 bool ret = gsi_remove (gsi, true);
243 if (!gsi_end_p (prev))
244 gsi_next (&prev);
245 else
246 prev = gsi_start_bb (bb);
247 gimple *end_stmt = gsi_stmt (*gsi);
248 while ((stmt = gsi_stmt (prev)) != end_stmt)
250 gcc_assert (stmt && is_gimple_debug (stmt) && gimple_uid (stmt) == 0);
251 gimple_set_uid (stmt, uid);
252 gsi_next (&prev);
254 return ret;
257 /* Bias amount for loop-carried phis. We want this to be larger than
258 the depth of any reassociation tree we can see, but not larger than
259 the rank difference between two blocks. */
260 #define PHI_LOOP_BIAS (1 << 15)
262 /* Rank assigned to a phi statement. If STMT is a loop-carried phi of
263 an innermost loop, and the phi has only a single use which is inside
264 the loop, then the rank is the block rank of the loop latch plus an
265 extra bias for the loop-carried dependence. This causes expressions
266 calculated into an accumulator variable to be independent for each
267 iteration of the loop. If STMT is some other phi, the rank is the
268 block rank of its containing block. */
269 static long
270 phi_rank (gimple *stmt)
272 basic_block bb = gimple_bb (stmt);
273 struct loop *father = bb->loop_father;
274 tree res;
275 unsigned i;
276 use_operand_p use;
277 gimple *use_stmt;
279 /* We only care about real loops (those with a latch). */
280 if (!father->latch)
281 return bb_rank[bb->index];
283 /* Interesting phis must be in headers of innermost loops. */
284 if (bb != father->header
285 || father->inner)
286 return bb_rank[bb->index];
288 /* Ignore virtual SSA_NAMEs. */
289 res = gimple_phi_result (stmt);
290 if (virtual_operand_p (res))
291 return bb_rank[bb->index];
293 /* The phi definition must have a single use, and that use must be
294 within the loop. Otherwise this isn't an accumulator pattern. */
295 if (!single_imm_use (res, &use, &use_stmt)
296 || gimple_bb (use_stmt)->loop_father != father)
297 return bb_rank[bb->index];
299 /* Look for phi arguments from within the loop. If found, bias this phi. */
300 for (i = 0; i < gimple_phi_num_args (stmt); i++)
302 tree arg = gimple_phi_arg_def (stmt, i);
303 if (TREE_CODE (arg) == SSA_NAME
304 && !SSA_NAME_IS_DEFAULT_DEF (arg))
306 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
307 if (gimple_bb (def_stmt)->loop_father == father)
308 return bb_rank[father->latch->index] + PHI_LOOP_BIAS;
312 /* Must be an uninteresting phi. */
313 return bb_rank[bb->index];
316 /* If EXP is an SSA_NAME defined by a PHI statement that represents a
317 loop-carried dependence of an innermost loop, return TRUE; else
318 return FALSE. */
319 static bool
320 loop_carried_phi (tree exp)
322 gimple *phi_stmt;
323 long block_rank;
325 if (TREE_CODE (exp) != SSA_NAME
326 || SSA_NAME_IS_DEFAULT_DEF (exp))
327 return false;
329 phi_stmt = SSA_NAME_DEF_STMT (exp);
331 if (gimple_code (SSA_NAME_DEF_STMT (exp)) != GIMPLE_PHI)
332 return false;
334 /* Non-loop-carried phis have block rank. Loop-carried phis have
335 an additional bias added in. If this phi doesn't have block rank,
336 it's biased and should not be propagated. */
337 block_rank = bb_rank[gimple_bb (phi_stmt)->index];
339 if (phi_rank (phi_stmt) != block_rank)
340 return true;
342 return false;
345 /* Return the maximum of RANK and the rank that should be propagated
346 from expression OP. For most operands, this is just the rank of OP.
347 For loop-carried phis, the value is zero to avoid undoing the bias
348 in favor of the phi. */
349 static long
350 propagate_rank (long rank, tree op)
352 long op_rank;
354 if (loop_carried_phi (op))
355 return rank;
357 op_rank = get_rank (op);
359 return MAX (rank, op_rank);
362 /* Look up the operand rank structure for expression E. */
364 static inline long
365 find_operand_rank (tree e)
367 long *slot = operand_rank->get (e);
368 return slot ? *slot : -1;
371 /* Insert {E,RANK} into the operand rank hashtable. */
373 static inline void
374 insert_operand_rank (tree e, long rank)
376 gcc_assert (rank > 0);
377 gcc_assert (!operand_rank->put (e, rank));
380 /* Given an expression E, return the rank of the expression. */
382 static long
383 get_rank (tree e)
385 /* SSA_NAME's have the rank of the expression they are the result
387 For globals and uninitialized values, the rank is 0.
388 For function arguments, use the pre-setup rank.
389 For PHI nodes, stores, asm statements, etc, we use the rank of
390 the BB.
391 For simple operations, the rank is the maximum rank of any of
392 its operands, or the bb_rank, whichever is less.
393 I make no claims that this is optimal, however, it gives good
394 results. */
396 /* We make an exception to the normal ranking system to break
397 dependences of accumulator variables in loops. Suppose we
398 have a simple one-block loop containing:
400 x_1 = phi(x_0, x_2)
401 b = a + x_1
402 c = b + d
403 x_2 = c + e
405 As shown, each iteration of the calculation into x is fully
406 dependent upon the iteration before it. We would prefer to
407 see this in the form:
409 x_1 = phi(x_0, x_2)
410 b = a + d
411 c = b + e
412 x_2 = c + x_1
414 If the loop is unrolled, the calculations of b and c from
415 different iterations can be interleaved.
417 To obtain this result during reassociation, we bias the rank
418 of the phi definition x_1 upward, when it is recognized as an
419 accumulator pattern. The artificial rank causes it to be
420 added last, providing the desired independence. */
422 if (TREE_CODE (e) == SSA_NAME)
424 ssa_op_iter iter;
425 gimple *stmt;
426 long rank;
427 tree op;
429 if (SSA_NAME_IS_DEFAULT_DEF (e))
430 return find_operand_rank (e);
432 stmt = SSA_NAME_DEF_STMT (e);
433 if (gimple_code (stmt) == GIMPLE_PHI)
434 return phi_rank (stmt);
436 if (!is_gimple_assign (stmt))
437 return bb_rank[gimple_bb (stmt)->index];
439 /* If we already have a rank for this expression, use that. */
440 rank = find_operand_rank (e);
441 if (rank != -1)
442 return rank;
444 /* Otherwise, find the maximum rank for the operands. As an
445 exception, remove the bias from loop-carried phis when propagating
446 the rank so that dependent operations are not also biased. */
447 /* Simply walk over all SSA uses - this takes advatage of the
448 fact that non-SSA operands are is_gimple_min_invariant and
449 thus have rank 0. */
450 rank = 0;
451 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
452 rank = propagate_rank (rank, op);
454 if (dump_file && (dump_flags & TDF_DETAILS))
456 fprintf (dump_file, "Rank for ");
457 print_generic_expr (dump_file, e);
458 fprintf (dump_file, " is %ld\n", (rank + 1));
461 /* Note the rank in the hashtable so we don't recompute it. */
462 insert_operand_rank (e, (rank + 1));
463 return (rank + 1);
466 /* Constants, globals, etc., are rank 0 */
467 return 0;
471 /* We want integer ones to end up last no matter what, since they are
472 the ones we can do the most with. */
473 #define INTEGER_CONST_TYPE 1 << 3
474 #define FLOAT_CONST_TYPE 1 << 2
475 #define OTHER_CONST_TYPE 1 << 1
477 /* Classify an invariant tree into integer, float, or other, so that
478 we can sort them to be near other constants of the same type. */
479 static inline int
480 constant_type (tree t)
482 if (INTEGRAL_TYPE_P (TREE_TYPE (t)))
483 return INTEGER_CONST_TYPE;
484 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t)))
485 return FLOAT_CONST_TYPE;
486 else
487 return OTHER_CONST_TYPE;
490 /* qsort comparison function to sort operand entries PA and PB by rank
491 so that the sorted array is ordered by rank in decreasing order. */
492 static int
493 sort_by_operand_rank (const void *pa, const void *pb)
495 const operand_entry *oea = *(const operand_entry *const *)pa;
496 const operand_entry *oeb = *(const operand_entry *const *)pb;
498 /* It's nicer for optimize_expression if constants that are likely
499 to fold when added/multiplied//whatever are put next to each
500 other. Since all constants have rank 0, order them by type. */
501 if (oeb->rank == 0 && oea->rank == 0)
503 if (constant_type (oeb->op) != constant_type (oea->op))
504 return constant_type (oeb->op) - constant_type (oea->op);
505 else
506 /* To make sorting result stable, we use unique IDs to determine
507 order. */
508 return oeb->id > oea->id ? 1 : -1;
511 /* Lastly, make sure the versions that are the same go next to each
512 other. */
513 if (oeb->rank == oea->rank
514 && TREE_CODE (oea->op) == SSA_NAME
515 && TREE_CODE (oeb->op) == SSA_NAME)
517 /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse
518 versions of removed SSA_NAMEs, so if possible, prefer to sort
519 based on basic block and gimple_uid of the SSA_NAME_DEF_STMT.
520 See PR60418. */
521 if (!SSA_NAME_IS_DEFAULT_DEF (oea->op)
522 && !SSA_NAME_IS_DEFAULT_DEF (oeb->op)
523 && !oea->stmt_to_insert
524 && !oeb->stmt_to_insert
525 && SSA_NAME_VERSION (oeb->op) != SSA_NAME_VERSION (oea->op))
527 gimple *stmta = SSA_NAME_DEF_STMT (oea->op);
528 gimple *stmtb = SSA_NAME_DEF_STMT (oeb->op);
529 basic_block bba = gimple_bb (stmta);
530 basic_block bbb = gimple_bb (stmtb);
531 if (bbb != bba)
533 if (bb_rank[bbb->index] != bb_rank[bba->index])
534 return bb_rank[bbb->index] - bb_rank[bba->index];
536 else
538 bool da = reassoc_stmt_dominates_stmt_p (stmta, stmtb);
539 bool db = reassoc_stmt_dominates_stmt_p (stmtb, stmta);
540 if (da != db)
541 return da ? 1 : -1;
545 if (SSA_NAME_VERSION (oeb->op) != SSA_NAME_VERSION (oea->op))
546 return SSA_NAME_VERSION (oeb->op) > SSA_NAME_VERSION (oea->op) ? 1 : -1;
547 else
548 return oeb->id > oea->id ? 1 : -1;
551 if (oeb->rank != oea->rank)
552 return oeb->rank > oea->rank ? 1 : -1;
553 else
554 return oeb->id > oea->id ? 1 : -1;
557 /* Add an operand entry to *OPS for the tree operand OP. */
559 static void
560 add_to_ops_vec (vec<operand_entry *> *ops, tree op, gimple *stmt_to_insert = NULL)
562 operand_entry *oe = operand_entry_pool.allocate ();
564 oe->op = op;
565 oe->rank = get_rank (op);
566 oe->id = next_operand_entry_id++;
567 oe->count = 1;
568 oe->stmt_to_insert = stmt_to_insert;
569 ops->safe_push (oe);
572 /* Add an operand entry to *OPS for the tree operand OP with repeat
573 count REPEAT. */
575 static void
576 add_repeat_to_ops_vec (vec<operand_entry *> *ops, tree op,
577 HOST_WIDE_INT repeat)
579 operand_entry *oe = operand_entry_pool.allocate ();
581 oe->op = op;
582 oe->rank = get_rank (op);
583 oe->id = next_operand_entry_id++;
584 oe->count = repeat;
585 oe->stmt_to_insert = NULL;
586 ops->safe_push (oe);
588 reassociate_stats.pows_encountered++;
591 /* Return true if STMT is reassociable operation containing a binary
592 operation with tree code CODE, and is inside LOOP. */
594 static bool
595 is_reassociable_op (gimple *stmt, enum tree_code code, struct loop *loop)
597 basic_block bb = gimple_bb (stmt);
599 if (gimple_bb (stmt) == NULL)
600 return false;
602 if (!flow_bb_inside_loop_p (loop, bb))
603 return false;
605 if (is_gimple_assign (stmt)
606 && gimple_assign_rhs_code (stmt) == code
607 && has_single_use (gimple_assign_lhs (stmt)))
609 tree rhs1 = gimple_assign_rhs1 (stmt);
610 tree rhs2 = gimple_assign_rhs1 (stmt);
611 if (TREE_CODE (rhs1) == SSA_NAME
612 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
613 return false;
614 if (rhs2
615 && TREE_CODE (rhs2) == SSA_NAME
616 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2))
617 return false;
618 return true;
621 return false;
625 /* Return true if STMT is a nop-conversion. */
627 static bool
628 gimple_nop_conversion_p (gimple *stmt)
630 if (gassign *ass = dyn_cast <gassign *> (stmt))
632 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
633 && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (ass)),
634 TREE_TYPE (gimple_assign_rhs1 (ass))))
635 return true;
637 return false;
640 /* Given NAME, if NAME is defined by a unary operation OPCODE, return the
641 operand of the negate operation. Otherwise, return NULL. */
643 static tree
644 get_unary_op (tree name, enum tree_code opcode)
646 gimple *stmt = SSA_NAME_DEF_STMT (name);
648 /* Look through nop conversions (sign changes). */
649 if (gimple_nop_conversion_p (stmt)
650 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
651 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
653 if (!is_gimple_assign (stmt))
654 return NULL_TREE;
656 if (gimple_assign_rhs_code (stmt) == opcode)
657 return gimple_assign_rhs1 (stmt);
658 return NULL_TREE;
661 /* Return true if OP1 and OP2 have the same value if casted to either type. */
663 static bool
664 ops_equal_values_p (tree op1, tree op2)
666 if (op1 == op2)
667 return true;
669 tree orig_op1 = op1;
670 if (TREE_CODE (op1) == SSA_NAME)
672 gimple *stmt = SSA_NAME_DEF_STMT (op1);
673 if (gimple_nop_conversion_p (stmt))
675 op1 = gimple_assign_rhs1 (stmt);
676 if (op1 == op2)
677 return true;
681 if (TREE_CODE (op2) == SSA_NAME)
683 gimple *stmt = SSA_NAME_DEF_STMT (op2);
684 if (gimple_nop_conversion_p (stmt))
686 op2 = gimple_assign_rhs1 (stmt);
687 if (op1 == op2
688 || orig_op1 == op2)
689 return true;
693 return false;
697 /* If CURR and LAST are a pair of ops that OPCODE allows us to
698 eliminate through equivalences, do so, remove them from OPS, and
699 return true. Otherwise, return false. */
701 static bool
702 eliminate_duplicate_pair (enum tree_code opcode,
703 vec<operand_entry *> *ops,
704 bool *all_done,
705 unsigned int i,
706 operand_entry *curr,
707 operand_entry *last)
710 /* If we have two of the same op, and the opcode is & |, min, or max,
711 we can eliminate one of them.
712 If we have two of the same op, and the opcode is ^, we can
713 eliminate both of them. */
715 if (last && last->op == curr->op)
717 switch (opcode)
719 case MAX_EXPR:
720 case MIN_EXPR:
721 case BIT_IOR_EXPR:
722 case BIT_AND_EXPR:
723 if (dump_file && (dump_flags & TDF_DETAILS))
725 fprintf (dump_file, "Equivalence: ");
726 print_generic_expr (dump_file, curr->op);
727 fprintf (dump_file, " [&|minmax] ");
728 print_generic_expr (dump_file, last->op);
729 fprintf (dump_file, " -> ");
730 print_generic_stmt (dump_file, last->op);
733 ops->ordered_remove (i);
734 reassociate_stats.ops_eliminated ++;
736 return true;
738 case BIT_XOR_EXPR:
739 if (dump_file && (dump_flags & TDF_DETAILS))
741 fprintf (dump_file, "Equivalence: ");
742 print_generic_expr (dump_file, curr->op);
743 fprintf (dump_file, " ^ ");
744 print_generic_expr (dump_file, last->op);
745 fprintf (dump_file, " -> nothing\n");
748 reassociate_stats.ops_eliminated += 2;
750 if (ops->length () == 2)
752 ops->truncate (0);
753 add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (last->op)));
754 *all_done = true;
756 else
758 ops->ordered_remove (i-1);
759 ops->ordered_remove (i-1);
762 return true;
764 default:
765 break;
768 return false;
771 static vec<tree> plus_negates;
773 /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
774 expression, look in OPS for a corresponding positive operation to cancel
775 it out. If we find one, remove the other from OPS, replace
776 OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise,
777 return false. */
779 static bool
780 eliminate_plus_minus_pair (enum tree_code opcode,
781 vec<operand_entry *> *ops,
782 unsigned int currindex,
783 operand_entry *curr)
785 tree negateop;
786 tree notop;
787 unsigned int i;
788 operand_entry *oe;
790 if (opcode != PLUS_EXPR || TREE_CODE (curr->op) != SSA_NAME)
791 return false;
793 negateop = get_unary_op (curr->op, NEGATE_EXPR);
794 notop = get_unary_op (curr->op, BIT_NOT_EXPR);
795 if (negateop == NULL_TREE && notop == NULL_TREE)
796 return false;
798 /* Any non-negated version will have a rank that is one less than
799 the current rank. So once we hit those ranks, if we don't find
800 one, we can stop. */
802 for (i = currindex + 1;
803 ops->iterate (i, &oe)
804 && oe->rank >= curr->rank - 1 ;
805 i++)
807 if (negateop
808 && ops_equal_values_p (oe->op, negateop))
810 if (dump_file && (dump_flags & TDF_DETAILS))
812 fprintf (dump_file, "Equivalence: ");
813 print_generic_expr (dump_file, negateop);
814 fprintf (dump_file, " + -");
815 print_generic_expr (dump_file, oe->op);
816 fprintf (dump_file, " -> 0\n");
819 ops->ordered_remove (i);
820 add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (oe->op)));
821 ops->ordered_remove (currindex);
822 reassociate_stats.ops_eliminated ++;
824 return true;
826 else if (notop
827 && ops_equal_values_p (oe->op, notop))
829 tree op_type = TREE_TYPE (oe->op);
831 if (dump_file && (dump_flags & TDF_DETAILS))
833 fprintf (dump_file, "Equivalence: ");
834 print_generic_expr (dump_file, notop);
835 fprintf (dump_file, " + ~");
836 print_generic_expr (dump_file, oe->op);
837 fprintf (dump_file, " -> -1\n");
840 ops->ordered_remove (i);
841 add_to_ops_vec (ops, build_all_ones_cst (op_type));
842 ops->ordered_remove (currindex);
843 reassociate_stats.ops_eliminated ++;
845 return true;
849 /* If CURR->OP is a negate expr without nop conversion in a plus expr:
850 save it for later inspection in repropagate_negates(). */
851 if (negateop != NULL_TREE
852 && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (curr->op)) == NEGATE_EXPR)
853 plus_negates.safe_push (curr->op);
855 return false;
858 /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a
859 bitwise not expression, look in OPS for a corresponding operand to
860 cancel it out. If we find one, remove the other from OPS, replace
861 OPS[CURRINDEX] with 0, and return true. Otherwise, return
862 false. */
864 static bool
865 eliminate_not_pairs (enum tree_code opcode,
866 vec<operand_entry *> *ops,
867 unsigned int currindex,
868 operand_entry *curr)
870 tree notop;
871 unsigned int i;
872 operand_entry *oe;
874 if ((opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR)
875 || TREE_CODE (curr->op) != SSA_NAME)
876 return false;
878 notop = get_unary_op (curr->op, BIT_NOT_EXPR);
879 if (notop == NULL_TREE)
880 return false;
882 /* Any non-not version will have a rank that is one less than
883 the current rank. So once we hit those ranks, if we don't find
884 one, we can stop. */
886 for (i = currindex + 1;
887 ops->iterate (i, &oe)
888 && oe->rank >= curr->rank - 1;
889 i++)
891 if (oe->op == notop)
893 if (dump_file && (dump_flags & TDF_DETAILS))
895 fprintf (dump_file, "Equivalence: ");
896 print_generic_expr (dump_file, notop);
897 if (opcode == BIT_AND_EXPR)
898 fprintf (dump_file, " & ~");
899 else if (opcode == BIT_IOR_EXPR)
900 fprintf (dump_file, " | ~");
901 print_generic_expr (dump_file, oe->op);
902 if (opcode == BIT_AND_EXPR)
903 fprintf (dump_file, " -> 0\n");
904 else if (opcode == BIT_IOR_EXPR)
905 fprintf (dump_file, " -> -1\n");
908 if (opcode == BIT_AND_EXPR)
909 oe->op = build_zero_cst (TREE_TYPE (oe->op));
910 else if (opcode == BIT_IOR_EXPR)
911 oe->op = build_all_ones_cst (TREE_TYPE (oe->op));
913 reassociate_stats.ops_eliminated += ops->length () - 1;
914 ops->truncate (0);
915 ops->quick_push (oe);
916 return true;
920 return false;
923 /* Use constant value that may be present in OPS to try to eliminate
924 operands. Note that this function is only really used when we've
925 eliminated ops for other reasons, or merged constants. Across
926 single statements, fold already does all of this, plus more. There
927 is little point in duplicating logic, so I've only included the
928 identities that I could ever construct testcases to trigger. */
930 static void
931 eliminate_using_constants (enum tree_code opcode,
932 vec<operand_entry *> *ops)
934 operand_entry *oelast = ops->last ();
935 tree type = TREE_TYPE (oelast->op);
937 if (oelast->rank == 0
938 && (ANY_INTEGRAL_TYPE_P (type) || FLOAT_TYPE_P (type)))
940 switch (opcode)
942 case BIT_AND_EXPR:
943 if (integer_zerop (oelast->op))
945 if (ops->length () != 1)
947 if (dump_file && (dump_flags & TDF_DETAILS))
948 fprintf (dump_file, "Found & 0, removing all other ops\n");
950 reassociate_stats.ops_eliminated += ops->length () - 1;
952 ops->truncate (0);
953 ops->quick_push (oelast);
954 return;
957 else if (integer_all_onesp (oelast->op))
959 if (ops->length () != 1)
961 if (dump_file && (dump_flags & TDF_DETAILS))
962 fprintf (dump_file, "Found & -1, removing\n");
963 ops->pop ();
964 reassociate_stats.ops_eliminated++;
967 break;
968 case BIT_IOR_EXPR:
969 if (integer_all_onesp (oelast->op))
971 if (ops->length () != 1)
973 if (dump_file && (dump_flags & TDF_DETAILS))
974 fprintf (dump_file, "Found | -1, removing all other ops\n");
976 reassociate_stats.ops_eliminated += ops->length () - 1;
978 ops->truncate (0);
979 ops->quick_push (oelast);
980 return;
983 else if (integer_zerop (oelast->op))
985 if (ops->length () != 1)
987 if (dump_file && (dump_flags & TDF_DETAILS))
988 fprintf (dump_file, "Found | 0, removing\n");
989 ops->pop ();
990 reassociate_stats.ops_eliminated++;
993 break;
994 case MULT_EXPR:
995 if (integer_zerop (oelast->op)
996 || (FLOAT_TYPE_P (type)
997 && !HONOR_NANS (type)
998 && !HONOR_SIGNED_ZEROS (type)
999 && real_zerop (oelast->op)))
1001 if (ops->length () != 1)
1003 if (dump_file && (dump_flags & TDF_DETAILS))
1004 fprintf (dump_file, "Found * 0, removing all other ops\n");
1006 reassociate_stats.ops_eliminated += ops->length () - 1;
1007 ops->truncate (1);
1008 ops->quick_push (oelast);
1009 return;
1012 else if (integer_onep (oelast->op)
1013 || (FLOAT_TYPE_P (type)
1014 && !HONOR_SNANS (type)
1015 && real_onep (oelast->op)))
1017 if (ops->length () != 1)
1019 if (dump_file && (dump_flags & TDF_DETAILS))
1020 fprintf (dump_file, "Found * 1, removing\n");
1021 ops->pop ();
1022 reassociate_stats.ops_eliminated++;
1023 return;
1026 break;
1027 case BIT_XOR_EXPR:
1028 case PLUS_EXPR:
1029 case MINUS_EXPR:
1030 if (integer_zerop (oelast->op)
1031 || (FLOAT_TYPE_P (type)
1032 && (opcode == PLUS_EXPR || opcode == MINUS_EXPR)
1033 && fold_real_zero_addition_p (type, oelast->op,
1034 opcode == MINUS_EXPR)))
1036 if (ops->length () != 1)
1038 if (dump_file && (dump_flags & TDF_DETAILS))
1039 fprintf (dump_file, "Found [|^+] 0, removing\n");
1040 ops->pop ();
1041 reassociate_stats.ops_eliminated++;
1042 return;
1045 break;
1046 default:
1047 break;
1053 static void linearize_expr_tree (vec<operand_entry *> *, gimple *,
1054 bool, bool);
1056 /* Structure for tracking and counting operands. */
1057 struct oecount {
1058 unsigned int cnt;
1059 unsigned int id;
1060 enum tree_code oecode;
1061 tree op;
1065 /* The heap for the oecount hashtable and the sorted list of operands. */
1066 static vec<oecount> cvec;
1069 /* Oecount hashtable helpers. */
1071 struct oecount_hasher : int_hash <int, 0, 1>
1073 static inline hashval_t hash (int);
1074 static inline bool equal (int, int);
1077 /* Hash function for oecount. */
1079 inline hashval_t
1080 oecount_hasher::hash (int p)
1082 const oecount *c = &cvec[p - 42];
1083 return htab_hash_pointer (c->op) ^ (hashval_t)c->oecode;
1086 /* Comparison function for oecount. */
1088 inline bool
1089 oecount_hasher::equal (int p1, int p2)
1091 const oecount *c1 = &cvec[p1 - 42];
1092 const oecount *c2 = &cvec[p2 - 42];
1093 return c1->oecode == c2->oecode && c1->op == c2->op;
1096 /* Comparison function for qsort sorting oecount elements by count. */
1098 static int
1099 oecount_cmp (const void *p1, const void *p2)
1101 const oecount *c1 = (const oecount *)p1;
1102 const oecount *c2 = (const oecount *)p2;
1103 if (c1->cnt != c2->cnt)
1104 return c1->cnt > c2->cnt ? 1 : -1;
1105 else
1106 /* If counts are identical, use unique IDs to stabilize qsort. */
1107 return c1->id > c2->id ? 1 : -1;
1110 /* Return TRUE iff STMT represents a builtin call that raises OP
1111 to some exponent. */
1113 static bool
1114 stmt_is_power_of_op (gimple *stmt, tree op)
1116 if (!is_gimple_call (stmt))
1117 return false;
1119 switch (gimple_call_combined_fn (stmt))
1121 CASE_CFN_POW:
1122 CASE_CFN_POWI:
1123 return (operand_equal_p (gimple_call_arg (stmt, 0), op, 0));
1125 default:
1126 return false;
1130 /* Given STMT which is a __builtin_pow* call, decrement its exponent
1131 in place and return the result. Assumes that stmt_is_power_of_op
1132 was previously called for STMT and returned TRUE. */
1134 static HOST_WIDE_INT
1135 decrement_power (gimple *stmt)
1137 REAL_VALUE_TYPE c, cint;
1138 HOST_WIDE_INT power;
1139 tree arg1;
1141 switch (gimple_call_combined_fn (stmt))
1143 CASE_CFN_POW:
1144 arg1 = gimple_call_arg (stmt, 1);
1145 c = TREE_REAL_CST (arg1);
1146 power = real_to_integer (&c) - 1;
1147 real_from_integer (&cint, VOIDmode, power, SIGNED);
1148 gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint));
1149 return power;
1151 CASE_CFN_POWI:
1152 arg1 = gimple_call_arg (stmt, 1);
1153 power = TREE_INT_CST_LOW (arg1) - 1;
1154 gimple_call_set_arg (stmt, 1, build_int_cst (TREE_TYPE (arg1), power));
1155 return power;
1157 default:
1158 gcc_unreachable ();
1162 /* Replace SSA defined by STMT and replace all its uses with new
1163 SSA. Also return the new SSA. */
1165 static tree
1166 make_new_ssa_for_def (gimple *stmt, enum tree_code opcode, tree op)
1168 gimple *use_stmt;
1169 use_operand_p use;
1170 imm_use_iterator iter;
1171 tree new_lhs, new_debug_lhs = NULL_TREE;
1172 tree lhs = gimple_get_lhs (stmt);
1174 new_lhs = make_ssa_name (TREE_TYPE (lhs));
1175 gimple_set_lhs (stmt, new_lhs);
1177 /* Also need to update GIMPLE_DEBUGs. */
1178 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
1180 tree repl = new_lhs;
1181 if (is_gimple_debug (use_stmt))
1183 if (new_debug_lhs == NULL_TREE)
1185 new_debug_lhs = make_node (DEBUG_EXPR_DECL);
1186 gdebug *def_temp
1187 = gimple_build_debug_bind (new_debug_lhs,
1188 build2 (opcode, TREE_TYPE (lhs),
1189 new_lhs, op),
1190 stmt);
1191 DECL_ARTIFICIAL (new_debug_lhs) = 1;
1192 TREE_TYPE (new_debug_lhs) = TREE_TYPE (lhs);
1193 SET_DECL_MODE (new_debug_lhs, TYPE_MODE (TREE_TYPE (lhs)));
1194 gimple_set_uid (def_temp, gimple_uid (stmt));
1195 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1196 gsi_insert_after (&gsi, def_temp, GSI_SAME_STMT);
1198 repl = new_debug_lhs;
1200 FOR_EACH_IMM_USE_ON_STMT (use, iter)
1201 SET_USE (use, repl);
1202 update_stmt (use_stmt);
1204 return new_lhs;
1207 /* Replace all SSAs defined in STMTS_TO_FIX and replace its
1208 uses with new SSAs. Also do this for the stmt that defines DEF
1209 if *DEF is not OP. */
1211 static void
1212 make_new_ssa_for_all_defs (tree *def, enum tree_code opcode, tree op,
1213 vec<gimple *> &stmts_to_fix)
1215 unsigned i;
1216 gimple *stmt;
1218 if (*def != op
1219 && TREE_CODE (*def) == SSA_NAME
1220 && (stmt = SSA_NAME_DEF_STMT (*def))
1221 && gimple_code (stmt) != GIMPLE_NOP)
1222 *def = make_new_ssa_for_def (stmt, opcode, op);
1224 FOR_EACH_VEC_ELT (stmts_to_fix, i, stmt)
1225 make_new_ssa_for_def (stmt, opcode, op);
1228 /* Find the single immediate use of STMT's LHS, and replace it
1229 with OP. Remove STMT. If STMT's LHS is the same as *DEF,
1230 replace *DEF with OP as well. */
1232 static void
1233 propagate_op_to_single_use (tree op, gimple *stmt, tree *def)
1235 tree lhs;
1236 gimple *use_stmt;
1237 use_operand_p use;
1238 gimple_stmt_iterator gsi;
1240 if (is_gimple_call (stmt))
1241 lhs = gimple_call_lhs (stmt);
1242 else
1243 lhs = gimple_assign_lhs (stmt);
1245 gcc_assert (has_single_use (lhs));
1246 single_imm_use (lhs, &use, &use_stmt);
1247 if (lhs == *def)
1248 *def = op;
1249 SET_USE (use, op);
1250 if (TREE_CODE (op) != SSA_NAME)
1251 update_stmt (use_stmt);
1252 gsi = gsi_for_stmt (stmt);
1253 unlink_stmt_vdef (stmt);
1254 reassoc_remove_stmt (&gsi);
1255 release_defs (stmt);
1258 /* Walks the linear chain with result *DEF searching for an operation
1259 with operand OP and code OPCODE removing that from the chain. *DEF
1260 is updated if there is only one operand but no operation left. */
1262 static void
1263 zero_one_operation (tree *def, enum tree_code opcode, tree op)
1265 tree orig_def = *def;
1266 gimple *stmt = SSA_NAME_DEF_STMT (*def);
1267 /* PR72835 - Record the stmt chain that has to be updated such that
1268 we dont use the same LHS when the values computed are different. */
1269 auto_vec<gimple *, 64> stmts_to_fix;
1273 tree name;
1275 if (opcode == MULT_EXPR)
1277 if (stmt_is_power_of_op (stmt, op))
1279 if (decrement_power (stmt) == 1)
1281 if (stmts_to_fix.length () > 0)
1282 stmts_to_fix.pop ();
1283 propagate_op_to_single_use (op, stmt, def);
1285 break;
1287 else if (gimple_assign_rhs_code (stmt) == NEGATE_EXPR)
1289 if (gimple_assign_rhs1 (stmt) == op)
1291 tree cst = build_minus_one_cst (TREE_TYPE (op));
1292 if (stmts_to_fix.length () > 0)
1293 stmts_to_fix.pop ();
1294 propagate_op_to_single_use (cst, stmt, def);
1295 break;
1297 else if (integer_minus_onep (op)
1298 || real_minus_onep (op))
1300 gimple_assign_set_rhs_code
1301 (stmt, TREE_CODE (gimple_assign_rhs1 (stmt)));
1302 break;
1307 name = gimple_assign_rhs1 (stmt);
1309 /* If this is the operation we look for and one of the operands
1310 is ours simply propagate the other operand into the stmts
1311 single use. */
1312 if (gimple_assign_rhs_code (stmt) == opcode
1313 && (name == op
1314 || gimple_assign_rhs2 (stmt) == op))
1316 if (name == op)
1317 name = gimple_assign_rhs2 (stmt);
1318 if (stmts_to_fix.length () > 0)
1319 stmts_to_fix.pop ();
1320 propagate_op_to_single_use (name, stmt, def);
1321 break;
1324 /* We might have a multiply of two __builtin_pow* calls, and
1325 the operand might be hiding in the rightmost one. Likewise
1326 this can happen for a negate. */
1327 if (opcode == MULT_EXPR
1328 && gimple_assign_rhs_code (stmt) == opcode
1329 && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME
1330 && has_single_use (gimple_assign_rhs2 (stmt)))
1332 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
1333 if (stmt_is_power_of_op (stmt2, op))
1335 if (decrement_power (stmt2) == 1)
1336 propagate_op_to_single_use (op, stmt2, def);
1337 else
1338 stmts_to_fix.safe_push (stmt2);
1339 break;
1341 else if (is_gimple_assign (stmt2)
1342 && gimple_assign_rhs_code (stmt2) == NEGATE_EXPR)
1344 if (gimple_assign_rhs1 (stmt2) == op)
1346 tree cst = build_minus_one_cst (TREE_TYPE (op));
1347 propagate_op_to_single_use (cst, stmt2, def);
1348 break;
1350 else if (integer_minus_onep (op)
1351 || real_minus_onep (op))
1353 stmts_to_fix.safe_push (stmt2);
1354 gimple_assign_set_rhs_code
1355 (stmt2, TREE_CODE (gimple_assign_rhs1 (stmt2)));
1356 break;
1361 /* Continue walking the chain. */
1362 gcc_assert (name != op
1363 && TREE_CODE (name) == SSA_NAME);
1364 stmt = SSA_NAME_DEF_STMT (name);
1365 stmts_to_fix.safe_push (stmt);
1367 while (1);
1369 if (stmts_to_fix.length () > 0 || *def == orig_def)
1370 make_new_ssa_for_all_defs (def, opcode, op, stmts_to_fix);
1373 /* Returns true if statement S1 dominates statement S2. Like
1374 stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */
1376 static bool
1377 reassoc_stmt_dominates_stmt_p (gimple *s1, gimple *s2)
1379 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
1381 /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D)
1382 SSA_NAME. Assume it lives at the beginning of function and
1383 thus dominates everything. */
1384 if (!bb1 || s1 == s2)
1385 return true;
1387 /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */
1388 if (!bb2)
1389 return false;
1391 if (bb1 == bb2)
1393 /* PHIs in the same basic block are assumed to be
1394 executed all in parallel, if only one stmt is a PHI,
1395 it dominates the other stmt in the same basic block. */
1396 if (gimple_code (s1) == GIMPLE_PHI)
1397 return true;
1399 if (gimple_code (s2) == GIMPLE_PHI)
1400 return false;
1402 gcc_assert (gimple_uid (s1) && gimple_uid (s2));
1404 if (gimple_uid (s1) < gimple_uid (s2))
1405 return true;
1407 if (gimple_uid (s1) > gimple_uid (s2))
1408 return false;
1410 gimple_stmt_iterator gsi = gsi_for_stmt (s1);
1411 unsigned int uid = gimple_uid (s1);
1412 for (gsi_next (&gsi); !gsi_end_p (gsi); gsi_next (&gsi))
1414 gimple *s = gsi_stmt (gsi);
1415 if (gimple_uid (s) != uid)
1416 break;
1417 if (s == s2)
1418 return true;
1421 return false;
1424 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
1427 /* Insert STMT after INSERT_POINT. */
1429 static void
1430 insert_stmt_after (gimple *stmt, gimple *insert_point)
1432 gimple_stmt_iterator gsi;
1433 basic_block bb;
1435 if (gimple_code (insert_point) == GIMPLE_PHI)
1436 bb = gimple_bb (insert_point);
1437 else if (!stmt_ends_bb_p (insert_point))
1439 gsi = gsi_for_stmt (insert_point);
1440 gimple_set_uid (stmt, gimple_uid (insert_point));
1441 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
1442 return;
1444 else
1445 /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME,
1446 thus if it must end a basic block, it should be a call that can
1447 throw, or some assignment that can throw. If it throws, the LHS
1448 of it will not be initialized though, so only valid places using
1449 the SSA_NAME should be dominated by the fallthru edge. */
1450 bb = find_fallthru_edge (gimple_bb (insert_point)->succs)->dest;
1451 gsi = gsi_after_labels (bb);
1452 if (gsi_end_p (gsi))
1454 gimple_stmt_iterator gsi2 = gsi_last_bb (bb);
1455 gimple_set_uid (stmt,
1456 gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2)));
1458 else
1459 gimple_set_uid (stmt, gimple_uid (gsi_stmt (gsi)));
1460 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1463 /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for
1464 the result. Places the statement after the definition of either
1465 OP1 or OP2. Returns the new statement. */
1467 static gimple *
1468 build_and_add_sum (tree type, tree op1, tree op2, enum tree_code opcode)
1470 gimple *op1def = NULL, *op2def = NULL;
1471 gimple_stmt_iterator gsi;
1472 tree op;
1473 gassign *sum;
1475 /* Create the addition statement. */
1476 op = make_ssa_name (type);
1477 sum = gimple_build_assign (op, opcode, op1, op2);
1479 /* Find an insertion place and insert. */
1480 if (TREE_CODE (op1) == SSA_NAME)
1481 op1def = SSA_NAME_DEF_STMT (op1);
1482 if (TREE_CODE (op2) == SSA_NAME)
1483 op2def = SSA_NAME_DEF_STMT (op2);
1484 if ((!op1def || gimple_nop_p (op1def))
1485 && (!op2def || gimple_nop_p (op2def)))
1487 gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
1488 if (gsi_end_p (gsi))
1490 gimple_stmt_iterator gsi2
1491 = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
1492 gimple_set_uid (sum,
1493 gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2)));
1495 else
1496 gimple_set_uid (sum, gimple_uid (gsi_stmt (gsi)));
1497 gsi_insert_before (&gsi, sum, GSI_NEW_STMT);
1499 else
1501 gimple *insert_point;
1502 if ((!op1def || gimple_nop_p (op1def))
1503 || (op2def && !gimple_nop_p (op2def)
1504 && reassoc_stmt_dominates_stmt_p (op1def, op2def)))
1505 insert_point = op2def;
1506 else
1507 insert_point = op1def;
1508 insert_stmt_after (sum, insert_point);
1510 update_stmt (sum);
1512 return sum;
1515 /* Perform un-distribution of divisions and multiplications.
1516 A * X + B * X is transformed into (A + B) * X and A / X + B / X
1517 to (A + B) / X for real X.
1519 The algorithm is organized as follows.
1521 - First we walk the addition chain *OPS looking for summands that
1522 are defined by a multiplication or a real division. This results
1523 in the candidates bitmap with relevant indices into *OPS.
1525 - Second we build the chains of multiplications or divisions for
1526 these candidates, counting the number of occurrences of (operand, code)
1527 pairs in all of the candidates chains.
1529 - Third we sort the (operand, code) pairs by number of occurrence and
1530 process them starting with the pair with the most uses.
1532 * For each such pair we walk the candidates again to build a
1533 second candidate bitmap noting all multiplication/division chains
1534 that have at least one occurrence of (operand, code).
1536 * We build an alternate addition chain only covering these
1537 candidates with one (operand, code) operation removed from their
1538 multiplication/division chain.
1540 * The first candidate gets replaced by the alternate addition chain
1541 multiplied/divided by the operand.
1543 * All candidate chains get disabled for further processing and
1544 processing of (operand, code) pairs continues.
1546 The alternate addition chains built are re-processed by the main
1547 reassociation algorithm which allows optimizing a * x * y + b * y * x
1548 to (a + b ) * x * y in one invocation of the reassociation pass. */
1550 static bool
1551 undistribute_ops_list (enum tree_code opcode,
1552 vec<operand_entry *> *ops, struct loop *loop)
1554 unsigned int length = ops->length ();
1555 operand_entry *oe1;
1556 unsigned i, j;
1557 unsigned nr_candidates, nr_candidates2;
1558 sbitmap_iterator sbi0;
1559 vec<operand_entry *> *subops;
1560 bool changed = false;
1561 unsigned int next_oecount_id = 0;
1563 if (length <= 1
1564 || opcode != PLUS_EXPR)
1565 return false;
1567 /* Build a list of candidates to process. */
1568 auto_sbitmap candidates (length);
1569 bitmap_clear (candidates);
1570 nr_candidates = 0;
1571 FOR_EACH_VEC_ELT (*ops, i, oe1)
1573 enum tree_code dcode;
1574 gimple *oe1def;
1576 if (TREE_CODE (oe1->op) != SSA_NAME)
1577 continue;
1578 oe1def = SSA_NAME_DEF_STMT (oe1->op);
1579 if (!is_gimple_assign (oe1def))
1580 continue;
1581 dcode = gimple_assign_rhs_code (oe1def);
1582 if ((dcode != MULT_EXPR
1583 && dcode != RDIV_EXPR)
1584 || !is_reassociable_op (oe1def, dcode, loop))
1585 continue;
1587 bitmap_set_bit (candidates, i);
1588 nr_candidates++;
1591 if (nr_candidates < 2)
1592 return false;
1594 if (dump_file && (dump_flags & TDF_DETAILS))
1596 fprintf (dump_file, "searching for un-distribute opportunities ");
1597 print_generic_expr (dump_file,
1598 (*ops)[bitmap_first_set_bit (candidates)]->op, 0);
1599 fprintf (dump_file, " %d\n", nr_candidates);
1602 /* Build linearized sub-operand lists and the counting table. */
1603 cvec.create (0);
1605 hash_table<oecount_hasher> ctable (15);
1607 /* ??? Macro arguments cannot have multi-argument template types in
1608 them. This typedef is needed to workaround that limitation. */
1609 typedef vec<operand_entry *> vec_operand_entry_t_heap;
1610 subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ());
1611 EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0)
1613 gimple *oedef;
1614 enum tree_code oecode;
1615 unsigned j;
1617 oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op);
1618 oecode = gimple_assign_rhs_code (oedef);
1619 linearize_expr_tree (&subops[i], oedef,
1620 associative_tree_code (oecode), false);
1622 FOR_EACH_VEC_ELT (subops[i], j, oe1)
1624 oecount c;
1625 int *slot;
1626 int idx;
1627 c.oecode = oecode;
1628 c.cnt = 1;
1629 c.id = next_oecount_id++;
1630 c.op = oe1->op;
1631 cvec.safe_push (c);
1632 idx = cvec.length () + 41;
1633 slot = ctable.find_slot (idx, INSERT);
1634 if (!*slot)
1636 *slot = idx;
1638 else
1640 cvec.pop ();
1641 cvec[*slot - 42].cnt++;
1646 /* Sort the counting table. */
1647 cvec.qsort (oecount_cmp);
1649 if (dump_file && (dump_flags & TDF_DETAILS))
1651 oecount *c;
1652 fprintf (dump_file, "Candidates:\n");
1653 FOR_EACH_VEC_ELT (cvec, j, c)
1655 fprintf (dump_file, " %u %s: ", c->cnt,
1656 c->oecode == MULT_EXPR
1657 ? "*" : c->oecode == RDIV_EXPR ? "/" : "?");
1658 print_generic_expr (dump_file, c->op);
1659 fprintf (dump_file, "\n");
1663 /* Process the (operand, code) pairs in order of most occurrence. */
1664 auto_sbitmap candidates2 (length);
1665 while (!cvec.is_empty ())
1667 oecount *c = &cvec.last ();
1668 if (c->cnt < 2)
1669 break;
1671 /* Now collect the operands in the outer chain that contain
1672 the common operand in their inner chain. */
1673 bitmap_clear (candidates2);
1674 nr_candidates2 = 0;
1675 EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0)
1677 gimple *oedef;
1678 enum tree_code oecode;
1679 unsigned j;
1680 tree op = (*ops)[i]->op;
1682 /* If we undistributed in this chain already this may be
1683 a constant. */
1684 if (TREE_CODE (op) != SSA_NAME)
1685 continue;
1687 oedef = SSA_NAME_DEF_STMT (op);
1688 oecode = gimple_assign_rhs_code (oedef);
1689 if (oecode != c->oecode)
1690 continue;
1692 FOR_EACH_VEC_ELT (subops[i], j, oe1)
1694 if (oe1->op == c->op)
1696 bitmap_set_bit (candidates2, i);
1697 ++nr_candidates2;
1698 break;
1703 if (nr_candidates2 >= 2)
1705 operand_entry *oe1, *oe2;
1706 gimple *prod;
1707 int first = bitmap_first_set_bit (candidates2);
1709 /* Build the new addition chain. */
1710 oe1 = (*ops)[first];
1711 if (dump_file && (dump_flags & TDF_DETAILS))
1713 fprintf (dump_file, "Building (");
1714 print_generic_expr (dump_file, oe1->op);
1716 zero_one_operation (&oe1->op, c->oecode, c->op);
1717 EXECUTE_IF_SET_IN_BITMAP (candidates2, first+1, i, sbi0)
1719 gimple *sum;
1720 oe2 = (*ops)[i];
1721 if (dump_file && (dump_flags & TDF_DETAILS))
1723 fprintf (dump_file, " + ");
1724 print_generic_expr (dump_file, oe2->op);
1726 zero_one_operation (&oe2->op, c->oecode, c->op);
1727 sum = build_and_add_sum (TREE_TYPE (oe1->op),
1728 oe1->op, oe2->op, opcode);
1729 oe2->op = build_zero_cst (TREE_TYPE (oe2->op));
1730 oe2->rank = 0;
1731 oe1->op = gimple_get_lhs (sum);
1734 /* Apply the multiplication/division. */
1735 prod = build_and_add_sum (TREE_TYPE (oe1->op),
1736 oe1->op, c->op, c->oecode);
1737 if (dump_file && (dump_flags & TDF_DETAILS))
1739 fprintf (dump_file, ") %s ", c->oecode == MULT_EXPR ? "*" : "/");
1740 print_generic_expr (dump_file, c->op);
1741 fprintf (dump_file, "\n");
1744 /* Record it in the addition chain and disable further
1745 undistribution with this op. */
1746 oe1->op = gimple_assign_lhs (prod);
1747 oe1->rank = get_rank (oe1->op);
1748 subops[first].release ();
1750 changed = true;
1753 cvec.pop ();
1756 for (i = 0; i < ops->length (); ++i)
1757 subops[i].release ();
1758 free (subops);
1759 cvec.release ();
1761 return changed;
1764 /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison
1765 expression, examine the other OPS to see if any of them are comparisons
1766 of the same values, which we may be able to combine or eliminate.
1767 For example, we can rewrite (a < b) | (a == b) as (a <= b). */
1769 static bool
1770 eliminate_redundant_comparison (enum tree_code opcode,
1771 vec<operand_entry *> *ops,
1772 unsigned int currindex,
1773 operand_entry *curr)
1775 tree op1, op2;
1776 enum tree_code lcode, rcode;
1777 gimple *def1, *def2;
1778 int i;
1779 operand_entry *oe;
1781 if (opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR)
1782 return false;
1784 /* Check that CURR is a comparison. */
1785 if (TREE_CODE (curr->op) != SSA_NAME)
1786 return false;
1787 def1 = SSA_NAME_DEF_STMT (curr->op);
1788 if (!is_gimple_assign (def1))
1789 return false;
1790 lcode = gimple_assign_rhs_code (def1);
1791 if (TREE_CODE_CLASS (lcode) != tcc_comparison)
1792 return false;
1793 op1 = gimple_assign_rhs1 (def1);
1794 op2 = gimple_assign_rhs2 (def1);
1796 /* Now look for a similar comparison in the remaining OPS. */
1797 for (i = currindex + 1; ops->iterate (i, &oe); i++)
1799 tree t;
1801 if (TREE_CODE (oe->op) != SSA_NAME)
1802 continue;
1803 def2 = SSA_NAME_DEF_STMT (oe->op);
1804 if (!is_gimple_assign (def2))
1805 continue;
1806 rcode = gimple_assign_rhs_code (def2);
1807 if (TREE_CODE_CLASS (rcode) != tcc_comparison)
1808 continue;
1810 /* If we got here, we have a match. See if we can combine the
1811 two comparisons. */
1812 if (opcode == BIT_IOR_EXPR)
1813 t = maybe_fold_or_comparisons (lcode, op1, op2,
1814 rcode, gimple_assign_rhs1 (def2),
1815 gimple_assign_rhs2 (def2));
1816 else
1817 t = maybe_fold_and_comparisons (lcode, op1, op2,
1818 rcode, gimple_assign_rhs1 (def2),
1819 gimple_assign_rhs2 (def2));
1820 if (!t)
1821 continue;
1823 /* maybe_fold_and_comparisons and maybe_fold_or_comparisons
1824 always give us a boolean_type_node value back. If the original
1825 BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type,
1826 we need to convert. */
1827 if (!useless_type_conversion_p (TREE_TYPE (curr->op), TREE_TYPE (t)))
1828 t = fold_convert (TREE_TYPE (curr->op), t);
1830 if (TREE_CODE (t) != INTEGER_CST
1831 && !operand_equal_p (t, curr->op, 0))
1833 enum tree_code subcode;
1834 tree newop1, newop2;
1835 if (!COMPARISON_CLASS_P (t))
1836 continue;
1837 extract_ops_from_tree (t, &subcode, &newop1, &newop2);
1838 STRIP_USELESS_TYPE_CONVERSION (newop1);
1839 STRIP_USELESS_TYPE_CONVERSION (newop2);
1840 if (!is_gimple_val (newop1) || !is_gimple_val (newop2))
1841 continue;
1844 if (dump_file && (dump_flags & TDF_DETAILS))
1846 fprintf (dump_file, "Equivalence: ");
1847 print_generic_expr (dump_file, curr->op);
1848 fprintf (dump_file, " %s ", op_symbol_code (opcode));
1849 print_generic_expr (dump_file, oe->op);
1850 fprintf (dump_file, " -> ");
1851 print_generic_expr (dump_file, t);
1852 fprintf (dump_file, "\n");
1855 /* Now we can delete oe, as it has been subsumed by the new combined
1856 expression t. */
1857 ops->ordered_remove (i);
1858 reassociate_stats.ops_eliminated ++;
1860 /* If t is the same as curr->op, we're done. Otherwise we must
1861 replace curr->op with t. Special case is if we got a constant
1862 back, in which case we add it to the end instead of in place of
1863 the current entry. */
1864 if (TREE_CODE (t) == INTEGER_CST)
1866 ops->ordered_remove (currindex);
1867 add_to_ops_vec (ops, t);
1869 else if (!operand_equal_p (t, curr->op, 0))
1871 gimple *sum;
1872 enum tree_code subcode;
1873 tree newop1;
1874 tree newop2;
1875 gcc_assert (COMPARISON_CLASS_P (t));
1876 extract_ops_from_tree (t, &subcode, &newop1, &newop2);
1877 STRIP_USELESS_TYPE_CONVERSION (newop1);
1878 STRIP_USELESS_TYPE_CONVERSION (newop2);
1879 gcc_checking_assert (is_gimple_val (newop1)
1880 && is_gimple_val (newop2));
1881 sum = build_and_add_sum (TREE_TYPE (t), newop1, newop2, subcode);
1882 curr->op = gimple_get_lhs (sum);
1884 return true;
1887 return false;
1891 /* Transform repeated addition of same values into multiply with
1892 constant. */
1893 static bool
1894 transform_add_to_multiply (vec<operand_entry *> *ops)
1896 operand_entry *oe;
1897 tree op = NULL_TREE;
1898 int j;
1899 int i, start = -1, end = 0, count = 0;
1900 auto_vec<std::pair <int, int> > indxs;
1901 bool changed = false;
1903 if (!INTEGRAL_TYPE_P (TREE_TYPE ((*ops)[0]->op))
1904 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE ((*ops)[0]->op))
1905 || !flag_unsafe_math_optimizations))
1906 return false;
1908 /* Look for repeated operands. */
1909 FOR_EACH_VEC_ELT (*ops, i, oe)
1911 if (start == -1)
1913 count = 1;
1914 op = oe->op;
1915 start = i;
1917 else if (operand_equal_p (oe->op, op, 0))
1919 count++;
1920 end = i;
1922 else
1924 if (count > 1)
1925 indxs.safe_push (std::make_pair (start, end));
1926 count = 1;
1927 op = oe->op;
1928 start = i;
1932 if (count > 1)
1933 indxs.safe_push (std::make_pair (start, end));
1935 for (j = indxs.length () - 1; j >= 0; --j)
1937 /* Convert repeated operand addition to multiplication. */
1938 start = indxs[j].first;
1939 end = indxs[j].second;
1940 op = (*ops)[start]->op;
1941 count = end - start + 1;
1942 for (i = end; i >= start; --i)
1943 ops->unordered_remove (i);
1944 tree tmp = make_ssa_name (TREE_TYPE (op));
1945 tree cst = build_int_cst (integer_type_node, count);
1946 gassign *mul_stmt
1947 = gimple_build_assign (tmp, MULT_EXPR,
1948 op, fold_convert (TREE_TYPE (op), cst));
1949 gimple_set_visited (mul_stmt, true);
1950 add_to_ops_vec (ops, tmp, mul_stmt);
1951 changed = true;
1954 return changed;
1958 /* Perform various identities and other optimizations on the list of
1959 operand entries, stored in OPS. The tree code for the binary
1960 operation between all the operands is OPCODE. */
1962 static void
1963 optimize_ops_list (enum tree_code opcode,
1964 vec<operand_entry *> *ops)
1966 unsigned int length = ops->length ();
1967 unsigned int i;
1968 operand_entry *oe;
1969 operand_entry *oelast = NULL;
1970 bool iterate = false;
1972 if (length == 1)
1973 return;
1975 oelast = ops->last ();
1977 /* If the last two are constants, pop the constants off, merge them
1978 and try the next two. */
1979 if (oelast->rank == 0 && is_gimple_min_invariant (oelast->op))
1981 operand_entry *oelm1 = (*ops)[length - 2];
1983 if (oelm1->rank == 0
1984 && is_gimple_min_invariant (oelm1->op)
1985 && useless_type_conversion_p (TREE_TYPE (oelm1->op),
1986 TREE_TYPE (oelast->op)))
1988 tree folded = fold_binary (opcode, TREE_TYPE (oelm1->op),
1989 oelm1->op, oelast->op);
1991 if (folded && is_gimple_min_invariant (folded))
1993 if (dump_file && (dump_flags & TDF_DETAILS))
1994 fprintf (dump_file, "Merging constants\n");
1996 ops->pop ();
1997 ops->pop ();
1999 add_to_ops_vec (ops, folded);
2000 reassociate_stats.constants_eliminated++;
2002 optimize_ops_list (opcode, ops);
2003 return;
2008 eliminate_using_constants (opcode, ops);
2009 oelast = NULL;
2011 for (i = 0; ops->iterate (i, &oe);)
2013 bool done = false;
2015 if (eliminate_not_pairs (opcode, ops, i, oe))
2016 return;
2017 if (eliminate_duplicate_pair (opcode, ops, &done, i, oe, oelast)
2018 || (!done && eliminate_plus_minus_pair (opcode, ops, i, oe))
2019 || (!done && eliminate_redundant_comparison (opcode, ops, i, oe)))
2021 if (done)
2022 return;
2023 iterate = true;
2024 oelast = NULL;
2025 continue;
2027 oelast = oe;
2028 i++;
2031 length = ops->length ();
2032 oelast = ops->last ();
2034 if (iterate)
2035 optimize_ops_list (opcode, ops);
2038 /* The following functions are subroutines to optimize_range_tests and allow
2039 it to try to change a logical combination of comparisons into a range
2040 test.
2042 For example, both
2043 X == 2 || X == 5 || X == 3 || X == 4
2045 X >= 2 && X <= 5
2046 are converted to
2047 (unsigned) (X - 2) <= 3
2049 For more information see comments above fold_test_range in fold-const.c,
2050 this implementation is for GIMPLE. */
2052 struct range_entry
2054 tree exp;
2055 tree low;
2056 tree high;
2057 bool in_p;
2058 bool strict_overflow_p;
2059 unsigned int idx, next;
2062 /* This is similar to make_range in fold-const.c, but on top of
2063 GIMPLE instead of trees. If EXP is non-NULL, it should be
2064 an SSA_NAME and STMT argument is ignored, otherwise STMT
2065 argument should be a GIMPLE_COND. */
2067 static void
2068 init_range_entry (struct range_entry *r, tree exp, gimple *stmt)
2070 int in_p;
2071 tree low, high;
2072 bool is_bool, strict_overflow_p;
2074 r->exp = NULL_TREE;
2075 r->in_p = false;
2076 r->strict_overflow_p = false;
2077 r->low = NULL_TREE;
2078 r->high = NULL_TREE;
2079 if (exp != NULL_TREE
2080 && (TREE_CODE (exp) != SSA_NAME || !INTEGRAL_TYPE_P (TREE_TYPE (exp))))
2081 return;
2083 /* Start with simply saying "EXP != 0" and then look at the code of EXP
2084 and see if we can refine the range. Some of the cases below may not
2085 happen, but it doesn't seem worth worrying about this. We "continue"
2086 the outer loop when we've changed something; otherwise we "break"
2087 the switch, which will "break" the while. */
2088 low = exp ? build_int_cst (TREE_TYPE (exp), 0) : boolean_false_node;
2089 high = low;
2090 in_p = 0;
2091 strict_overflow_p = false;
2092 is_bool = false;
2093 if (exp == NULL_TREE)
2094 is_bool = true;
2095 else if (TYPE_PRECISION (TREE_TYPE (exp)) == 1)
2097 if (TYPE_UNSIGNED (TREE_TYPE (exp)))
2098 is_bool = true;
2099 else
2100 return;
2102 else if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE)
2103 is_bool = true;
2105 while (1)
2107 enum tree_code code;
2108 tree arg0, arg1, exp_type;
2109 tree nexp;
2110 location_t loc;
2112 if (exp != NULL_TREE)
2114 if (TREE_CODE (exp) != SSA_NAME
2115 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp))
2116 break;
2118 stmt = SSA_NAME_DEF_STMT (exp);
2119 if (!is_gimple_assign (stmt))
2120 break;
2122 code = gimple_assign_rhs_code (stmt);
2123 arg0 = gimple_assign_rhs1 (stmt);
2124 arg1 = gimple_assign_rhs2 (stmt);
2125 exp_type = TREE_TYPE (exp);
2127 else
2129 code = gimple_cond_code (stmt);
2130 arg0 = gimple_cond_lhs (stmt);
2131 arg1 = gimple_cond_rhs (stmt);
2132 exp_type = boolean_type_node;
2135 if (TREE_CODE (arg0) != SSA_NAME)
2136 break;
2137 loc = gimple_location (stmt);
2138 switch (code)
2140 case BIT_NOT_EXPR:
2141 if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE
2142 /* Ensure the range is either +[-,0], +[0,0],
2143 -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or
2144 -[1,1]. If it is e.g. +[-,-] or -[-,-]
2145 or similar expression of unconditional true or
2146 false, it should not be negated. */
2147 && ((high && integer_zerop (high))
2148 || (low && integer_onep (low))))
2150 in_p = !in_p;
2151 exp = arg0;
2152 continue;
2154 break;
2155 case SSA_NAME:
2156 exp = arg0;
2157 continue;
2158 CASE_CONVERT:
2159 if (is_bool)
2160 goto do_default;
2161 if (TYPE_PRECISION (TREE_TYPE (arg0)) == 1)
2163 if (TYPE_UNSIGNED (TREE_TYPE (arg0)))
2164 is_bool = true;
2165 else
2166 return;
2168 else if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE)
2169 is_bool = true;
2170 goto do_default;
2171 case EQ_EXPR:
2172 case NE_EXPR:
2173 case LT_EXPR:
2174 case LE_EXPR:
2175 case GE_EXPR:
2176 case GT_EXPR:
2177 is_bool = true;
2178 /* FALLTHRU */
2179 default:
2180 if (!is_bool)
2181 return;
2182 do_default:
2183 nexp = make_range_step (loc, code, arg0, arg1, exp_type,
2184 &low, &high, &in_p,
2185 &strict_overflow_p);
2186 if (nexp != NULL_TREE)
2188 exp = nexp;
2189 gcc_assert (TREE_CODE (exp) == SSA_NAME);
2190 continue;
2192 break;
2194 break;
2196 if (is_bool)
2198 r->exp = exp;
2199 r->in_p = in_p;
2200 r->low = low;
2201 r->high = high;
2202 r->strict_overflow_p = strict_overflow_p;
2206 /* Comparison function for qsort. Sort entries
2207 without SSA_NAME exp first, then with SSA_NAMEs sorted
2208 by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs
2209 by increasing ->low and if ->low is the same, by increasing
2210 ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE
2211 maximum. */
2213 static int
2214 range_entry_cmp (const void *a, const void *b)
2216 const struct range_entry *p = (const struct range_entry *) a;
2217 const struct range_entry *q = (const struct range_entry *) b;
2219 if (p->exp != NULL_TREE && TREE_CODE (p->exp) == SSA_NAME)
2221 if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME)
2223 /* Group range_entries for the same SSA_NAME together. */
2224 if (SSA_NAME_VERSION (p->exp) < SSA_NAME_VERSION (q->exp))
2225 return -1;
2226 else if (SSA_NAME_VERSION (p->exp) > SSA_NAME_VERSION (q->exp))
2227 return 1;
2228 /* If ->low is different, NULL low goes first, then by
2229 ascending low. */
2230 if (p->low != NULL_TREE)
2232 if (q->low != NULL_TREE)
2234 tree tem = fold_binary (LT_EXPR, boolean_type_node,
2235 p->low, q->low);
2236 if (tem && integer_onep (tem))
2237 return -1;
2238 tem = fold_binary (GT_EXPR, boolean_type_node,
2239 p->low, q->low);
2240 if (tem && integer_onep (tem))
2241 return 1;
2243 else
2244 return 1;
2246 else if (q->low != NULL_TREE)
2247 return -1;
2248 /* If ->high is different, NULL high goes last, before that by
2249 ascending high. */
2250 if (p->high != NULL_TREE)
2252 if (q->high != NULL_TREE)
2254 tree tem = fold_binary (LT_EXPR, boolean_type_node,
2255 p->high, q->high);
2256 if (tem && integer_onep (tem))
2257 return -1;
2258 tem = fold_binary (GT_EXPR, boolean_type_node,
2259 p->high, q->high);
2260 if (tem && integer_onep (tem))
2261 return 1;
2263 else
2264 return -1;
2266 else if (q->high != NULL_TREE)
2267 return 1;
2268 /* If both ranges are the same, sort below by ascending idx. */
2270 else
2271 return 1;
2273 else if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME)
2274 return -1;
2276 if (p->idx < q->idx)
2277 return -1;
2278 else
2280 gcc_checking_assert (p->idx > q->idx);
2281 return 1;
2285 /* Helper function for update_range_test. Force EXPR into an SSA_NAME,
2286 insert needed statements BEFORE or after GSI. */
2288 static tree
2289 force_into_ssa_name (gimple_stmt_iterator *gsi, tree expr, bool before)
2291 enum gsi_iterator_update m = before ? GSI_SAME_STMT : GSI_CONTINUE_LINKING;
2292 tree ret = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, before, m);
2293 if (TREE_CODE (ret) != SSA_NAME)
2295 gimple *g = gimple_build_assign (make_ssa_name (TREE_TYPE (ret)), ret);
2296 if (before)
2297 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2298 else
2299 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
2300 ret = gimple_assign_lhs (g);
2302 return ret;
2305 /* Helper routine of optimize_range_test.
2306 [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for
2307 RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges,
2308 OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE
2309 is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to
2310 an array of COUNT pointers to other ranges. Return
2311 true if the range merge has been successful.
2312 If OPCODE is ERROR_MARK, this is called from within
2313 maybe_optimize_range_tests and is performing inter-bb range optimization.
2314 In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in
2315 oe->rank. */
2317 static bool
2318 update_range_test (struct range_entry *range, struct range_entry *otherrange,
2319 struct range_entry **otherrangep,
2320 unsigned int count, enum tree_code opcode,
2321 vec<operand_entry *> *ops, tree exp, gimple_seq seq,
2322 bool in_p, tree low, tree high, bool strict_overflow_p)
2324 operand_entry *oe = (*ops)[range->idx];
2325 tree op = oe->op;
2326 gimple *stmt = op ? SSA_NAME_DEF_STMT (op)
2327 : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id));
2328 location_t loc = gimple_location (stmt);
2329 tree optype = op ? TREE_TYPE (op) : boolean_type_node;
2330 tree tem = build_range_check (loc, optype, unshare_expr (exp),
2331 in_p, low, high);
2332 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
2333 gimple_stmt_iterator gsi;
2334 unsigned int i, uid;
2336 if (tem == NULL_TREE)
2337 return false;
2339 /* If op is default def SSA_NAME, there is no place to insert the
2340 new comparison. Give up, unless we can use OP itself as the
2341 range test. */
2342 if (op && SSA_NAME_IS_DEFAULT_DEF (op))
2344 if (op == range->exp
2345 && ((TYPE_PRECISION (optype) == 1 && TYPE_UNSIGNED (optype))
2346 || TREE_CODE (optype) == BOOLEAN_TYPE)
2347 && (op == tem
2348 || (TREE_CODE (tem) == EQ_EXPR
2349 && TREE_OPERAND (tem, 0) == op
2350 && integer_onep (TREE_OPERAND (tem, 1))))
2351 && opcode != BIT_IOR_EXPR
2352 && (opcode != ERROR_MARK || oe->rank != BIT_IOR_EXPR))
2354 stmt = NULL;
2355 tem = op;
2357 else
2358 return false;
2361 if (strict_overflow_p && issue_strict_overflow_warning (wc))
2362 warning_at (loc, OPT_Wstrict_overflow,
2363 "assuming signed overflow does not occur "
2364 "when simplifying range test");
2366 if (dump_file && (dump_flags & TDF_DETAILS))
2368 struct range_entry *r;
2369 fprintf (dump_file, "Optimizing range tests ");
2370 print_generic_expr (dump_file, range->exp);
2371 fprintf (dump_file, " %c[", range->in_p ? '+' : '-');
2372 print_generic_expr (dump_file, range->low);
2373 fprintf (dump_file, ", ");
2374 print_generic_expr (dump_file, range->high);
2375 fprintf (dump_file, "]");
2376 for (i = 0; i < count; i++)
2378 if (otherrange)
2379 r = otherrange + i;
2380 else
2381 r = otherrangep[i];
2382 fprintf (dump_file, " and %c[", r->in_p ? '+' : '-');
2383 print_generic_expr (dump_file, r->low);
2384 fprintf (dump_file, ", ");
2385 print_generic_expr (dump_file, r->high);
2386 fprintf (dump_file, "]");
2388 fprintf (dump_file, "\n into ");
2389 print_generic_expr (dump_file, tem);
2390 fprintf (dump_file, "\n");
2393 if (opcode == BIT_IOR_EXPR
2394 || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR))
2395 tem = invert_truthvalue_loc (loc, tem);
2397 tem = fold_convert_loc (loc, optype, tem);
2398 if (stmt)
2400 gsi = gsi_for_stmt (stmt);
2401 uid = gimple_uid (stmt);
2403 else
2405 gsi = gsi_none ();
2406 uid = 0;
2408 if (stmt == NULL)
2409 gcc_checking_assert (tem == op);
2410 /* In rare cases range->exp can be equal to lhs of stmt.
2411 In that case we have to insert after the stmt rather then before
2412 it. If stmt is a PHI, insert it at the start of the basic block. */
2413 else if (op != range->exp)
2415 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
2416 tem = force_into_ssa_name (&gsi, tem, true);
2417 gsi_prev (&gsi);
2419 else if (gimple_code (stmt) != GIMPLE_PHI)
2421 gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
2422 tem = force_into_ssa_name (&gsi, tem, false);
2424 else
2426 gsi = gsi_after_labels (gimple_bb (stmt));
2427 if (!gsi_end_p (gsi))
2428 uid = gimple_uid (gsi_stmt (gsi));
2429 else
2431 gsi = gsi_start_bb (gimple_bb (stmt));
2432 uid = 1;
2433 while (!gsi_end_p (gsi))
2435 uid = gimple_uid (gsi_stmt (gsi));
2436 gsi_next (&gsi);
2439 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
2440 tem = force_into_ssa_name (&gsi, tem, true);
2441 if (gsi_end_p (gsi))
2442 gsi = gsi_last_bb (gimple_bb (stmt));
2443 else
2444 gsi_prev (&gsi);
2446 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2447 if (gimple_uid (gsi_stmt (gsi)))
2448 break;
2449 else
2450 gimple_set_uid (gsi_stmt (gsi), uid);
2452 oe->op = tem;
2453 range->exp = exp;
2454 range->low = low;
2455 range->high = high;
2456 range->in_p = in_p;
2457 range->strict_overflow_p = false;
2459 for (i = 0; i < count; i++)
2461 if (otherrange)
2462 range = otherrange + i;
2463 else
2464 range = otherrangep[i];
2465 oe = (*ops)[range->idx];
2466 /* Now change all the other range test immediate uses, so that
2467 those tests will be optimized away. */
2468 if (opcode == ERROR_MARK)
2470 if (oe->op)
2471 oe->op = build_int_cst (TREE_TYPE (oe->op),
2472 oe->rank == BIT_IOR_EXPR ? 0 : 1);
2473 else
2474 oe->op = (oe->rank == BIT_IOR_EXPR
2475 ? boolean_false_node : boolean_true_node);
2477 else
2478 oe->op = error_mark_node;
2479 range->exp = NULL_TREE;
2480 range->low = NULL_TREE;
2481 range->high = NULL_TREE;
2483 return true;
2486 /* Optimize X == CST1 || X == CST2
2487 if popcount (CST1 ^ CST2) == 1 into
2488 (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)).
2489 Similarly for ranges. E.g.
2490 X != 2 && X != 3 && X != 10 && X != 11
2491 will be transformed by the previous optimization into
2492 !((X - 2U) <= 1U || (X - 10U) <= 1U)
2493 and this loop can transform that into
2494 !(((X & ~8) - 2U) <= 1U). */
2496 static bool
2497 optimize_range_tests_xor (enum tree_code opcode, tree type,
2498 tree lowi, tree lowj, tree highi, tree highj,
2499 vec<operand_entry *> *ops,
2500 struct range_entry *rangei,
2501 struct range_entry *rangej)
2503 tree lowxor, highxor, tem, exp;
2504 /* Check lowi ^ lowj == highi ^ highj and
2505 popcount (lowi ^ lowj) == 1. */
2506 lowxor = fold_binary (BIT_XOR_EXPR, type, lowi, lowj);
2507 if (lowxor == NULL_TREE || TREE_CODE (lowxor) != INTEGER_CST)
2508 return false;
2509 if (!integer_pow2p (lowxor))
2510 return false;
2511 highxor = fold_binary (BIT_XOR_EXPR, type, highi, highj);
2512 if (!tree_int_cst_equal (lowxor, highxor))
2513 return false;
2515 tem = fold_build1 (BIT_NOT_EXPR, type, lowxor);
2516 exp = fold_build2 (BIT_AND_EXPR, type, rangei->exp, tem);
2517 lowj = fold_build2 (BIT_AND_EXPR, type, lowi, tem);
2518 highj = fold_build2 (BIT_AND_EXPR, type, highi, tem);
2519 if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, exp,
2520 NULL, rangei->in_p, lowj, highj,
2521 rangei->strict_overflow_p
2522 || rangej->strict_overflow_p))
2523 return true;
2524 return false;
2527 /* Optimize X == CST1 || X == CST2
2528 if popcount (CST2 - CST1) == 1 into
2529 ((X - CST1) & ~(CST2 - CST1)) == 0.
2530 Similarly for ranges. E.g.
2531 X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46
2532 || X == 75 || X == 45
2533 will be transformed by the previous optimization into
2534 (X - 43U) <= 3U || (X - 75U) <= 3U
2535 and this loop can transform that into
2536 ((X - 43U) & ~(75U - 43U)) <= 3U. */
2537 static bool
2538 optimize_range_tests_diff (enum tree_code opcode, tree type,
2539 tree lowi, tree lowj, tree highi, tree highj,
2540 vec<operand_entry *> *ops,
2541 struct range_entry *rangei,
2542 struct range_entry *rangej)
2544 tree tem1, tem2, mask;
2545 /* Check highi - lowi == highj - lowj. */
2546 tem1 = fold_binary (MINUS_EXPR, type, highi, lowi);
2547 if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST)
2548 return false;
2549 tem2 = fold_binary (MINUS_EXPR, type, highj, lowj);
2550 if (!tree_int_cst_equal (tem1, tem2))
2551 return false;
2552 /* Check popcount (lowj - lowi) == 1. */
2553 tem1 = fold_binary (MINUS_EXPR, type, lowj, lowi);
2554 if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST)
2555 return false;
2556 if (!integer_pow2p (tem1))
2557 return false;
2559 type = unsigned_type_for (type);
2560 tem1 = fold_convert (type, tem1);
2561 tem2 = fold_convert (type, tem2);
2562 lowi = fold_convert (type, lowi);
2563 mask = fold_build1 (BIT_NOT_EXPR, type, tem1);
2564 tem1 = fold_binary (MINUS_EXPR, type,
2565 fold_convert (type, rangei->exp), lowi);
2566 tem1 = fold_build2 (BIT_AND_EXPR, type, tem1, mask);
2567 lowj = build_int_cst (type, 0);
2568 if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, tem1,
2569 NULL, rangei->in_p, lowj, tem2,
2570 rangei->strict_overflow_p
2571 || rangej->strict_overflow_p))
2572 return true;
2573 return false;
2576 /* It does some common checks for function optimize_range_tests_xor and
2577 optimize_range_tests_diff.
2578 If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor.
2579 Else it calls optimize_range_tests_diff. */
2581 static bool
2582 optimize_range_tests_1 (enum tree_code opcode, int first, int length,
2583 bool optimize_xor, vec<operand_entry *> *ops,
2584 struct range_entry *ranges)
2586 int i, j;
2587 bool any_changes = false;
2588 for (i = first; i < length; i++)
2590 tree lowi, highi, lowj, highj, type, tem;
2592 if (ranges[i].exp == NULL_TREE || ranges[i].in_p)
2593 continue;
2594 type = TREE_TYPE (ranges[i].exp);
2595 if (!INTEGRAL_TYPE_P (type))
2596 continue;
2597 lowi = ranges[i].low;
2598 if (lowi == NULL_TREE)
2599 lowi = TYPE_MIN_VALUE (type);
2600 highi = ranges[i].high;
2601 if (highi == NULL_TREE)
2602 continue;
2603 for (j = i + 1; j < length && j < i + 64; j++)
2605 bool changes;
2606 if (ranges[i].exp != ranges[j].exp || ranges[j].in_p)
2607 continue;
2608 lowj = ranges[j].low;
2609 if (lowj == NULL_TREE)
2610 continue;
2611 highj = ranges[j].high;
2612 if (highj == NULL_TREE)
2613 highj = TYPE_MAX_VALUE (type);
2614 /* Check lowj > highi. */
2615 tem = fold_binary (GT_EXPR, boolean_type_node,
2616 lowj, highi);
2617 if (tem == NULL_TREE || !integer_onep (tem))
2618 continue;
2619 if (optimize_xor)
2620 changes = optimize_range_tests_xor (opcode, type, lowi, lowj,
2621 highi, highj, ops,
2622 ranges + i, ranges + j);
2623 else
2624 changes = optimize_range_tests_diff (opcode, type, lowi, lowj,
2625 highi, highj, ops,
2626 ranges + i, ranges + j);
2627 if (changes)
2629 any_changes = true;
2630 break;
2634 return any_changes;
2637 /* Helper function of optimize_range_tests_to_bit_test. Handle a single
2638 range, EXP, LOW, HIGH, compute bit mask of bits to test and return
2639 EXP on success, NULL otherwise. */
2641 static tree
2642 extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high,
2643 wide_int *mask, tree *totallowp)
2645 tree tem = int_const_binop (MINUS_EXPR, high, low);
2646 if (tem == NULL_TREE
2647 || TREE_CODE (tem) != INTEGER_CST
2648 || TREE_OVERFLOW (tem)
2649 || tree_int_cst_sgn (tem) == -1
2650 || compare_tree_int (tem, prec) != -1)
2651 return NULL_TREE;
2653 unsigned HOST_WIDE_INT max = tree_to_uhwi (tem) + 1;
2654 *mask = wi::shifted_mask (0, max, false, prec);
2655 if (TREE_CODE (exp) == BIT_AND_EXPR
2656 && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
2658 widest_int msk = wi::to_widest (TREE_OPERAND (exp, 1));
2659 msk = wi::zext (~msk, TYPE_PRECISION (TREE_TYPE (exp)));
2660 if (wi::popcount (msk) == 1
2661 && wi::ltu_p (msk, prec - max))
2663 *mask |= wi::shifted_mask (msk.to_uhwi (), max, false, prec);
2664 max += msk.to_uhwi ();
2665 exp = TREE_OPERAND (exp, 0);
2666 if (integer_zerop (low)
2667 && TREE_CODE (exp) == PLUS_EXPR
2668 && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
2670 tree ret = TREE_OPERAND (exp, 0);
2671 STRIP_NOPS (ret);
2672 widest_int bias
2673 = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp, 1)),
2674 TYPE_PRECISION (TREE_TYPE (low))));
2675 tree tbias = wide_int_to_tree (TREE_TYPE (ret), bias);
2676 if (totallowp)
2678 *totallowp = tbias;
2679 return ret;
2681 else if (!tree_int_cst_lt (totallow, tbias))
2682 return NULL_TREE;
2683 bias = wi::to_widest (tbias);
2684 bias -= wi::to_widest (totallow);
2685 if (bias >= 0 && bias < prec - max)
2687 *mask = wi::lshift (*mask, bias);
2688 return ret;
2693 if (totallowp)
2694 return exp;
2695 if (!tree_int_cst_lt (totallow, low))
2696 return exp;
2697 tem = int_const_binop (MINUS_EXPR, low, totallow);
2698 if (tem == NULL_TREE
2699 || TREE_CODE (tem) != INTEGER_CST
2700 || TREE_OVERFLOW (tem)
2701 || compare_tree_int (tem, prec - max) == 1)
2702 return NULL_TREE;
2704 *mask = wi::lshift (*mask, wi::to_widest (tem));
2705 return exp;
2708 /* Attempt to optimize small range tests using bit test.
2709 E.g.
2710 X != 43 && X != 76 && X != 44 && X != 78 && X != 49
2711 && X != 77 && X != 46 && X != 75 && X != 45 && X != 82
2712 has been by earlier optimizations optimized into:
2713 ((X - 43U) & ~32U) > 3U && X != 49 && X != 82
2714 As all the 43 through 82 range is less than 64 numbers,
2715 for 64-bit word targets optimize that into:
2716 (X - 43U) > 40U && ((1 << (X - 43U)) & 0x8F0000004FULL) == 0 */
2718 static bool
2719 optimize_range_tests_to_bit_test (enum tree_code opcode, int first, int length,
2720 vec<operand_entry *> *ops,
2721 struct range_entry *ranges)
2723 int i, j;
2724 bool any_changes = false;
2725 int prec = GET_MODE_BITSIZE (word_mode);
2726 auto_vec<struct range_entry *, 64> candidates;
2728 for (i = first; i < length - 2; i++)
2730 tree lowi, highi, lowj, highj, type;
2732 if (ranges[i].exp == NULL_TREE || ranges[i].in_p)
2733 continue;
2734 type = TREE_TYPE (ranges[i].exp);
2735 if (!INTEGRAL_TYPE_P (type))
2736 continue;
2737 lowi = ranges[i].low;
2738 if (lowi == NULL_TREE)
2739 lowi = TYPE_MIN_VALUE (type);
2740 highi = ranges[i].high;
2741 if (highi == NULL_TREE)
2742 continue;
2743 wide_int mask;
2744 tree exp = extract_bit_test_mask (ranges[i].exp, prec, lowi, lowi,
2745 highi, &mask, &lowi);
2746 if (exp == NULL_TREE)
2747 continue;
2748 bool strict_overflow_p = ranges[i].strict_overflow_p;
2749 candidates.truncate (0);
2750 int end = MIN (i + 64, length);
2751 for (j = i + 1; j < end; j++)
2753 tree exp2;
2754 if (ranges[j].exp == NULL_TREE || ranges[j].in_p)
2755 continue;
2756 if (ranges[j].exp == exp)
2758 else if (TREE_CODE (ranges[j].exp) == BIT_AND_EXPR)
2760 exp2 = TREE_OPERAND (ranges[j].exp, 0);
2761 if (exp2 == exp)
2763 else if (TREE_CODE (exp2) == PLUS_EXPR)
2765 exp2 = TREE_OPERAND (exp2, 0);
2766 STRIP_NOPS (exp2);
2767 if (exp2 != exp)
2768 continue;
2770 else
2771 continue;
2773 else
2774 continue;
2775 lowj = ranges[j].low;
2776 if (lowj == NULL_TREE)
2777 continue;
2778 highj = ranges[j].high;
2779 if (highj == NULL_TREE)
2780 highj = TYPE_MAX_VALUE (type);
2781 wide_int mask2;
2782 exp2 = extract_bit_test_mask (ranges[j].exp, prec, lowi, lowj,
2783 highj, &mask2, NULL);
2784 if (exp2 != exp)
2785 continue;
2786 mask |= mask2;
2787 strict_overflow_p |= ranges[j].strict_overflow_p;
2788 candidates.safe_push (&ranges[j]);
2791 /* If we need otherwise 3 or more comparisons, use a bit test. */
2792 if (candidates.length () >= 2)
2794 tree high = wide_int_to_tree (TREE_TYPE (lowi),
2795 wi::to_widest (lowi)
2796 + prec - 1 - wi::clz (mask));
2797 operand_entry *oe = (*ops)[ranges[i].idx];
2798 tree op = oe->op;
2799 gimple *stmt = op ? SSA_NAME_DEF_STMT (op)
2800 : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id));
2801 location_t loc = gimple_location (stmt);
2802 tree optype = op ? TREE_TYPE (op) : boolean_type_node;
2804 /* See if it isn't cheaper to pretend the minimum value of the
2805 range is 0, if maximum value is small enough.
2806 We can avoid then subtraction of the minimum value, but the
2807 mask constant could be perhaps more expensive. */
2808 if (compare_tree_int (lowi, 0) > 0
2809 && compare_tree_int (high, prec) < 0)
2811 int cost_diff;
2812 HOST_WIDE_INT m = tree_to_uhwi (lowi);
2813 rtx reg = gen_raw_REG (word_mode, 10000);
2814 bool speed_p = optimize_bb_for_speed_p (gimple_bb (stmt));
2815 cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
2816 GEN_INT (-m)), speed_p);
2817 rtx r = immed_wide_int_const (mask, word_mode);
2818 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
2819 word_mode, speed_p);
2820 r = immed_wide_int_const (wi::lshift (mask, m), word_mode);
2821 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
2822 word_mode, speed_p);
2823 if (cost_diff > 0)
2825 mask = wi::lshift (mask, m);
2826 lowi = build_zero_cst (TREE_TYPE (lowi));
2830 tree tem = build_range_check (loc, optype, unshare_expr (exp),
2831 false, lowi, high);
2832 if (tem == NULL_TREE || is_gimple_val (tem))
2833 continue;
2834 tree etype = unsigned_type_for (TREE_TYPE (exp));
2835 exp = fold_build2_loc (loc, MINUS_EXPR, etype,
2836 fold_convert_loc (loc, etype, exp),
2837 fold_convert_loc (loc, etype, lowi));
2838 exp = fold_convert_loc (loc, integer_type_node, exp);
2839 tree word_type = lang_hooks.types.type_for_mode (word_mode, 1);
2840 exp = fold_build2_loc (loc, LSHIFT_EXPR, word_type,
2841 build_int_cst (word_type, 1), exp);
2842 exp = fold_build2_loc (loc, BIT_AND_EXPR, word_type, exp,
2843 wide_int_to_tree (word_type, mask));
2844 exp = fold_build2_loc (loc, EQ_EXPR, optype, exp,
2845 build_zero_cst (word_type));
2846 if (is_gimple_val (exp))
2847 continue;
2849 /* The shift might have undefined behavior if TEM is true,
2850 but reassociate_bb isn't prepared to have basic blocks
2851 split when it is running. So, temporarily emit a code
2852 with BIT_IOR_EXPR instead of &&, and fix it up in
2853 branch_fixup. */
2854 gimple_seq seq;
2855 tem = force_gimple_operand (tem, &seq, true, NULL_TREE);
2856 gcc_assert (TREE_CODE (tem) == SSA_NAME);
2857 gimple_set_visited (SSA_NAME_DEF_STMT (tem), true);
2858 gimple_seq seq2;
2859 exp = force_gimple_operand (exp, &seq2, true, NULL_TREE);
2860 gimple_seq_add_seq_without_update (&seq, seq2);
2861 gcc_assert (TREE_CODE (exp) == SSA_NAME);
2862 gimple_set_visited (SSA_NAME_DEF_STMT (exp), true);
2863 gimple *g = gimple_build_assign (make_ssa_name (optype),
2864 BIT_IOR_EXPR, tem, exp);
2865 gimple_set_location (g, loc);
2866 gimple_seq_add_stmt_without_update (&seq, g);
2867 exp = gimple_assign_lhs (g);
2868 tree val = build_zero_cst (optype);
2869 if (update_range_test (&ranges[i], NULL, candidates.address (),
2870 candidates.length (), opcode, ops, exp,
2871 seq, false, val, val, strict_overflow_p))
2873 any_changes = true;
2874 reassoc_branch_fixups.safe_push (tem);
2876 else
2877 gimple_seq_discard (seq);
2880 return any_changes;
2883 /* Attempt to optimize for signed a and b where b is known to be >= 0:
2884 a >= 0 && a < b into (unsigned) a < (unsigned) b
2885 a >= 0 && a <= b into (unsigned) a <= (unsigned) b */
2887 static bool
2888 optimize_range_tests_var_bound (enum tree_code opcode, int first, int length,
2889 vec<operand_entry *> *ops,
2890 struct range_entry *ranges)
2892 int i;
2893 bool any_changes = false;
2894 hash_map<tree, int> *map = NULL;
2896 for (i = first; i < length; i++)
2898 if (ranges[i].exp == NULL_TREE
2899 || TREE_CODE (ranges[i].exp) != SSA_NAME
2900 || !ranges[i].in_p)
2901 continue;
2903 tree type = TREE_TYPE (ranges[i].exp);
2904 if (!INTEGRAL_TYPE_P (type)
2905 || TYPE_UNSIGNED (type)
2906 || ranges[i].low == NULL_TREE
2907 || !integer_zerop (ranges[i].low)
2908 || ranges[i].high != NULL_TREE)
2909 continue;
2910 /* EXP >= 0 here. */
2911 if (map == NULL)
2912 map = new hash_map <tree, int>;
2913 map->put (ranges[i].exp, i);
2916 if (map == NULL)
2917 return false;
2919 for (i = 0; i < length; i++)
2921 if (ranges[i].low == NULL_TREE
2922 || ranges[i].high == NULL_TREE
2923 || !integer_zerop (ranges[i].low)
2924 || !integer_zerop (ranges[i].high))
2925 continue;
2927 gimple *stmt;
2928 tree_code ccode;
2929 tree rhs1, rhs2;
2930 if (ranges[i].exp)
2932 if (TREE_CODE (ranges[i].exp) != SSA_NAME)
2933 continue;
2934 stmt = SSA_NAME_DEF_STMT (ranges[i].exp);
2935 if (!is_gimple_assign (stmt))
2936 continue;
2937 ccode = gimple_assign_rhs_code (stmt);
2938 rhs1 = gimple_assign_rhs1 (stmt);
2939 rhs2 = gimple_assign_rhs2 (stmt);
2941 else
2943 operand_entry *oe = (*ops)[ranges[i].idx];
2944 stmt = last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id));
2945 if (gimple_code (stmt) != GIMPLE_COND)
2946 continue;
2947 ccode = gimple_cond_code (stmt);
2948 rhs1 = gimple_cond_lhs (stmt);
2949 rhs2 = gimple_cond_rhs (stmt);
2952 if (TREE_CODE (rhs1) != SSA_NAME
2953 || rhs2 == NULL_TREE
2954 || TREE_CODE (rhs2) != SSA_NAME)
2955 continue;
2957 switch (ccode)
2959 case GT_EXPR:
2960 case GE_EXPR:
2961 if (!ranges[i].in_p)
2962 std::swap (rhs1, rhs2);
2963 ccode = swap_tree_comparison (ccode);
2964 break;
2965 case LT_EXPR:
2966 case LE_EXPR:
2967 if (ranges[i].in_p)
2968 std::swap (rhs1, rhs2);
2969 break;
2970 default:
2971 continue;
2974 int *idx = map->get (rhs1);
2975 if (idx == NULL)
2976 continue;
2978 wide_int nz = get_nonzero_bits (rhs2);
2979 if (wi::neg_p (nz))
2980 continue;
2982 /* We have EXP < RHS2 or EXP <= RHS2 where EXP >= 0
2983 and RHS2 is known to be RHS2 >= 0. */
2984 tree utype = unsigned_type_for (TREE_TYPE (rhs1));
2986 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
2987 if ((ranges[*idx].strict_overflow_p
2988 || ranges[i].strict_overflow_p)
2989 && issue_strict_overflow_warning (wc))
2990 warning_at (gimple_location (stmt), OPT_Wstrict_overflow,
2991 "assuming signed overflow does not occur "
2992 "when simplifying range test");
2994 if (dump_file && (dump_flags & TDF_DETAILS))
2996 struct range_entry *r = &ranges[*idx];
2997 fprintf (dump_file, "Optimizing range test ");
2998 print_generic_expr (dump_file, r->exp);
2999 fprintf (dump_file, " +[");
3000 print_generic_expr (dump_file, r->low);
3001 fprintf (dump_file, ", ");
3002 print_generic_expr (dump_file, r->high);
3003 fprintf (dump_file, "] and comparison ");
3004 print_generic_expr (dump_file, rhs1);
3005 fprintf (dump_file, " %s ", op_symbol_code (ccode));
3006 print_generic_expr (dump_file, rhs2);
3007 fprintf (dump_file, "\n into (");
3008 print_generic_expr (dump_file, utype);
3009 fprintf (dump_file, ") ");
3010 print_generic_expr (dump_file, rhs1);
3011 fprintf (dump_file, " %s (", op_symbol_code (ccode));
3012 print_generic_expr (dump_file, utype);
3013 fprintf (dump_file, ") ");
3014 print_generic_expr (dump_file, rhs2);
3015 fprintf (dump_file, "\n");
3018 if (ranges[i].in_p)
3019 std::swap (rhs1, rhs2);
3021 unsigned int uid = gimple_uid (stmt);
3022 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3023 gimple *g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, rhs1);
3024 gimple_set_uid (g, uid);
3025 rhs1 = gimple_assign_lhs (g);
3026 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3027 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, rhs2);
3028 gimple_set_uid (g, uid);
3029 rhs2 = gimple_assign_lhs (g);
3030 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3031 if (tree_swap_operands_p (rhs1, rhs2))
3033 std::swap (rhs1, rhs2);
3034 ccode = swap_tree_comparison (ccode);
3036 if (gimple_code (stmt) == GIMPLE_COND)
3038 gcond *c = as_a <gcond *> (stmt);
3039 gimple_cond_set_code (c, ccode);
3040 gimple_cond_set_lhs (c, rhs1);
3041 gimple_cond_set_rhs (c, rhs2);
3042 update_stmt (stmt);
3044 else
3046 operand_entry *oe = (*ops)[ranges[i].idx];
3047 tree ctype = oe->op ? TREE_TYPE (oe->op) : boolean_type_node;
3048 if (!INTEGRAL_TYPE_P (ctype)
3049 || (TREE_CODE (ctype) != BOOLEAN_TYPE
3050 && TYPE_PRECISION (ctype) != 1))
3051 ctype = boolean_type_node;
3052 g = gimple_build_assign (make_ssa_name (ctype), ccode, rhs1, rhs2);
3053 gimple_set_uid (g, uid);
3054 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3055 if (oe->op && ctype != TREE_TYPE (oe->op))
3057 g = gimple_build_assign (make_ssa_name (TREE_TYPE (oe->op)),
3058 NOP_EXPR, gimple_assign_lhs (g));
3059 gimple_set_uid (g, uid);
3060 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3062 ranges[i].exp = gimple_assign_lhs (g);
3063 oe->op = ranges[i].exp;
3064 ranges[i].low = build_zero_cst (TREE_TYPE (ranges[i].exp));
3065 ranges[i].high = ranges[i].low;
3067 ranges[i].strict_overflow_p = false;
3068 operand_entry *oe = (*ops)[ranges[*idx].idx];
3069 /* Now change all the other range test immediate uses, so that
3070 those tests will be optimized away. */
3071 if (opcode == ERROR_MARK)
3073 if (oe->op)
3074 oe->op = build_int_cst (TREE_TYPE (oe->op),
3075 oe->rank == BIT_IOR_EXPR ? 0 : 1);
3076 else
3077 oe->op = (oe->rank == BIT_IOR_EXPR
3078 ? boolean_false_node : boolean_true_node);
3080 else
3081 oe->op = error_mark_node;
3082 ranges[*idx].exp = NULL_TREE;
3083 ranges[*idx].low = NULL_TREE;
3084 ranges[*idx].high = NULL_TREE;
3085 any_changes = true;
3088 delete map;
3089 return any_changes;
3092 /* Optimize range tests, similarly how fold_range_test optimizes
3093 it on trees. The tree code for the binary
3094 operation between all the operands is OPCODE.
3095 If OPCODE is ERROR_MARK, optimize_range_tests is called from within
3096 maybe_optimize_range_tests for inter-bb range optimization.
3097 In that case if oe->op is NULL, oe->id is bb->index whose
3098 GIMPLE_COND is && or ||ed into the test, and oe->rank says
3099 the actual opcode. */
3101 static bool
3102 optimize_range_tests (enum tree_code opcode,
3103 vec<operand_entry *> *ops)
3105 unsigned int length = ops->length (), i, j, first;
3106 operand_entry *oe;
3107 struct range_entry *ranges;
3108 bool any_changes = false;
3110 if (length == 1)
3111 return false;
3113 ranges = XNEWVEC (struct range_entry, length);
3114 for (i = 0; i < length; i++)
3116 oe = (*ops)[i];
3117 ranges[i].idx = i;
3118 init_range_entry (ranges + i, oe->op,
3119 oe->op
3120 ? NULL
3121 : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)));
3122 /* For | invert it now, we will invert it again before emitting
3123 the optimized expression. */
3124 if (opcode == BIT_IOR_EXPR
3125 || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR))
3126 ranges[i].in_p = !ranges[i].in_p;
3129 qsort (ranges, length, sizeof (*ranges), range_entry_cmp);
3130 for (i = 0; i < length; i++)
3131 if (ranges[i].exp != NULL_TREE && TREE_CODE (ranges[i].exp) == SSA_NAME)
3132 break;
3134 /* Try to merge ranges. */
3135 for (first = i; i < length; i++)
3137 tree low = ranges[i].low;
3138 tree high = ranges[i].high;
3139 int in_p = ranges[i].in_p;
3140 bool strict_overflow_p = ranges[i].strict_overflow_p;
3141 int update_fail_count = 0;
3143 for (j = i + 1; j < length; j++)
3145 if (ranges[i].exp != ranges[j].exp)
3146 break;
3147 if (!merge_ranges (&in_p, &low, &high, in_p, low, high,
3148 ranges[j].in_p, ranges[j].low, ranges[j].high))
3149 break;
3150 strict_overflow_p |= ranges[j].strict_overflow_p;
3153 if (j == i + 1)
3154 continue;
3156 if (update_range_test (ranges + i, ranges + i + 1, NULL, j - i - 1,
3157 opcode, ops, ranges[i].exp, NULL, in_p,
3158 low, high, strict_overflow_p))
3160 i = j - 1;
3161 any_changes = true;
3163 /* Avoid quadratic complexity if all merge_ranges calls would succeed,
3164 while update_range_test would fail. */
3165 else if (update_fail_count == 64)
3166 i = j - 1;
3167 else
3168 ++update_fail_count;
3171 any_changes |= optimize_range_tests_1 (opcode, first, length, true,
3172 ops, ranges);
3174 if (BRANCH_COST (optimize_function_for_speed_p (cfun), false) >= 2)
3175 any_changes |= optimize_range_tests_1 (opcode, first, length, false,
3176 ops, ranges);
3177 if (lshift_cheap_p (optimize_function_for_speed_p (cfun)))
3178 any_changes |= optimize_range_tests_to_bit_test (opcode, first, length,
3179 ops, ranges);
3180 any_changes |= optimize_range_tests_var_bound (opcode, first, length, ops,
3181 ranges);
3183 if (any_changes && opcode != ERROR_MARK)
3185 j = 0;
3186 FOR_EACH_VEC_ELT (*ops, i, oe)
3188 if (oe->op == error_mark_node)
3189 continue;
3190 else if (i != j)
3191 (*ops)[j] = oe;
3192 j++;
3194 ops->truncate (j);
3197 XDELETEVEC (ranges);
3198 return any_changes;
3201 /* A subroutine of optimize_vec_cond_expr to extract and canonicalize
3202 the operands of the VEC_COND_EXPR. Returns ERROR_MARK on failure,
3203 otherwise the comparison code. */
3205 static tree_code
3206 ovce_extract_ops (tree var, gassign **rets, bool *reti)
3208 if (TREE_CODE (var) != SSA_NAME)
3209 return ERROR_MARK;
3211 gassign *stmt = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (var));
3212 if (stmt == NULL)
3213 return ERROR_MARK;
3215 /* ??? If we start creating more COND_EXPR, we could perform
3216 this same optimization with them. For now, simplify. */
3217 if (gimple_assign_rhs_code (stmt) != VEC_COND_EXPR)
3218 return ERROR_MARK;
3220 tree cond = gimple_assign_rhs1 (stmt);
3221 tree_code cmp = TREE_CODE (cond);
3222 if (TREE_CODE_CLASS (cmp) != tcc_comparison)
3223 return ERROR_MARK;
3225 /* ??? For now, allow only canonical true and false result vectors.
3226 We could expand this to other constants should the need arise,
3227 but at the moment we don't create them. */
3228 tree t = gimple_assign_rhs2 (stmt);
3229 tree f = gimple_assign_rhs3 (stmt);
3230 bool inv;
3231 if (integer_all_onesp (t))
3232 inv = false;
3233 else if (integer_all_onesp (f))
3235 cmp = invert_tree_comparison (cmp, false);
3236 inv = true;
3238 else
3239 return ERROR_MARK;
3240 if (!integer_zerop (f))
3241 return ERROR_MARK;
3243 /* Success! */
3244 if (rets)
3245 *rets = stmt;
3246 if (reti)
3247 *reti = inv;
3248 return cmp;
3251 /* Optimize the condition of VEC_COND_EXPRs which have been combined
3252 with OPCODE (either BIT_AND_EXPR or BIT_IOR_EXPR). */
3254 static bool
3255 optimize_vec_cond_expr (tree_code opcode, vec<operand_entry *> *ops)
3257 unsigned int length = ops->length (), i, j;
3258 bool any_changes = false;
3260 if (length == 1)
3261 return false;
3263 for (i = 0; i < length; ++i)
3265 tree elt0 = (*ops)[i]->op;
3267 gassign *stmt0;
3268 bool invert;
3269 tree_code cmp0 = ovce_extract_ops (elt0, &stmt0, &invert);
3270 if (cmp0 == ERROR_MARK)
3271 continue;
3273 for (j = i + 1; j < length; ++j)
3275 tree &elt1 = (*ops)[j]->op;
3277 gassign *stmt1;
3278 tree_code cmp1 = ovce_extract_ops (elt1, &stmt1, NULL);
3279 if (cmp1 == ERROR_MARK)
3280 continue;
3282 tree cond0 = gimple_assign_rhs1 (stmt0);
3283 tree x0 = TREE_OPERAND (cond0, 0);
3284 tree y0 = TREE_OPERAND (cond0, 1);
3286 tree cond1 = gimple_assign_rhs1 (stmt1);
3287 tree x1 = TREE_OPERAND (cond1, 0);
3288 tree y1 = TREE_OPERAND (cond1, 1);
3290 tree comb;
3291 if (opcode == BIT_AND_EXPR)
3292 comb = maybe_fold_and_comparisons (cmp0, x0, y0, cmp1, x1, y1);
3293 else if (opcode == BIT_IOR_EXPR)
3294 comb = maybe_fold_or_comparisons (cmp0, x0, y0, cmp1, x1, y1);
3295 else
3296 gcc_unreachable ();
3297 if (comb == NULL)
3298 continue;
3300 /* Success! */
3301 if (dump_file && (dump_flags & TDF_DETAILS))
3303 fprintf (dump_file, "Transforming ");
3304 print_generic_expr (dump_file, cond0);
3305 fprintf (dump_file, " %c ", opcode == BIT_AND_EXPR ? '&' : '|');
3306 print_generic_expr (dump_file, cond1);
3307 fprintf (dump_file, " into ");
3308 print_generic_expr (dump_file, comb);
3309 fputc ('\n', dump_file);
3312 gimple_assign_set_rhs1 (stmt0, comb);
3313 if (invert)
3314 std::swap (*gimple_assign_rhs2_ptr (stmt0),
3315 *gimple_assign_rhs3_ptr (stmt0));
3316 update_stmt (stmt0);
3318 elt1 = error_mark_node;
3319 any_changes = true;
3323 if (any_changes)
3325 operand_entry *oe;
3326 j = 0;
3327 FOR_EACH_VEC_ELT (*ops, i, oe)
3329 if (oe->op == error_mark_node)
3330 continue;
3331 else if (i != j)
3332 (*ops)[j] = oe;
3333 j++;
3335 ops->truncate (j);
3338 return any_changes;
3341 /* Return true if STMT is a cast like:
3342 <bb N>:
3344 _123 = (int) _234;
3346 <bb M>:
3347 # _345 = PHI <_123(N), 1(...), 1(...)>
3348 where _234 has bool type, _123 has single use and
3349 bb N has a single successor M. This is commonly used in
3350 the last block of a range test.
3352 Also Return true if STMT is tcc_compare like:
3353 <bb N>:
3355 _234 = a_2(D) == 2;
3357 <bb M>:
3358 # _345 = PHI <_234(N), 1(...), 1(...)>
3359 _346 = (int) _345;
3360 where _234 has booltype, single use and
3361 bb N has a single successor M. This is commonly used in
3362 the last block of a range test. */
3364 static bool
3365 final_range_test_p (gimple *stmt)
3367 basic_block bb, rhs_bb, lhs_bb;
3368 edge e;
3369 tree lhs, rhs;
3370 use_operand_p use_p;
3371 gimple *use_stmt;
3373 if (!gimple_assign_cast_p (stmt)
3374 && (!is_gimple_assign (stmt)
3375 || (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
3376 != tcc_comparison)))
3377 return false;
3378 bb = gimple_bb (stmt);
3379 if (!single_succ_p (bb))
3380 return false;
3381 e = single_succ_edge (bb);
3382 if (e->flags & EDGE_COMPLEX)
3383 return false;
3385 lhs = gimple_assign_lhs (stmt);
3386 rhs = gimple_assign_rhs1 (stmt);
3387 if (gimple_assign_cast_p (stmt)
3388 && (!INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3389 || TREE_CODE (rhs) != SSA_NAME
3390 || TREE_CODE (TREE_TYPE (rhs)) != BOOLEAN_TYPE))
3391 return false;
3393 if (!gimple_assign_cast_p (stmt)
3394 && (TREE_CODE (TREE_TYPE (lhs)) != BOOLEAN_TYPE))
3395 return false;
3397 /* Test whether lhs is consumed only by a PHI in the only successor bb. */
3398 if (!single_imm_use (lhs, &use_p, &use_stmt))
3399 return false;
3401 if (gimple_code (use_stmt) != GIMPLE_PHI
3402 || gimple_bb (use_stmt) != e->dest)
3403 return false;
3405 /* And that the rhs is defined in the same loop. */
3406 if (gimple_assign_cast_p (stmt))
3408 if (TREE_CODE (rhs) != SSA_NAME
3409 || !(rhs_bb = gimple_bb (SSA_NAME_DEF_STMT (rhs)))
3410 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt), rhs_bb))
3411 return false;
3413 else
3415 if (TREE_CODE (lhs) != SSA_NAME
3416 || !(lhs_bb = gimple_bb (SSA_NAME_DEF_STMT (lhs)))
3417 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt), lhs_bb))
3418 return false;
3421 return true;
3424 /* Return true if BB is suitable basic block for inter-bb range test
3425 optimization. If BACKWARD is true, BB should be the only predecessor
3426 of TEST_BB, and *OTHER_BB is either NULL and filled by the routine,
3427 or compared with to find a common basic block to which all conditions
3428 branch to if true resp. false. If BACKWARD is false, TEST_BB should
3429 be the only predecessor of BB. */
3431 static bool
3432 suitable_cond_bb (basic_block bb, basic_block test_bb, basic_block *other_bb,
3433 bool backward)
3435 edge_iterator ei, ei2;
3436 edge e, e2;
3437 gimple *stmt;
3438 gphi_iterator gsi;
3439 bool other_edge_seen = false;
3440 bool is_cond;
3442 if (test_bb == bb)
3443 return false;
3444 /* Check last stmt first. */
3445 stmt = last_stmt (bb);
3446 if (stmt == NULL
3447 || (gimple_code (stmt) != GIMPLE_COND
3448 && (backward || !final_range_test_p (stmt)))
3449 || gimple_visited_p (stmt)
3450 || stmt_could_throw_p (stmt)
3451 || *other_bb == bb)
3452 return false;
3453 is_cond = gimple_code (stmt) == GIMPLE_COND;
3454 if (is_cond)
3456 /* If last stmt is GIMPLE_COND, verify that one of the succ edges
3457 goes to the next bb (if BACKWARD, it is TEST_BB), and the other
3458 to *OTHER_BB (if not set yet, try to find it out). */
3459 if (EDGE_COUNT (bb->succs) != 2)
3460 return false;
3461 FOR_EACH_EDGE (e, ei, bb->succs)
3463 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
3464 return false;
3465 if (e->dest == test_bb)
3467 if (backward)
3468 continue;
3469 else
3470 return false;
3472 if (e->dest == bb)
3473 return false;
3474 if (*other_bb == NULL)
3476 FOR_EACH_EDGE (e2, ei2, test_bb->succs)
3477 if (!(e2->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
3478 return false;
3479 else if (e->dest == e2->dest)
3480 *other_bb = e->dest;
3481 if (*other_bb == NULL)
3482 return false;
3484 if (e->dest == *other_bb)
3485 other_edge_seen = true;
3486 else if (backward)
3487 return false;
3489 if (*other_bb == NULL || !other_edge_seen)
3490 return false;
3492 else if (single_succ (bb) != *other_bb)
3493 return false;
3495 /* Now check all PHIs of *OTHER_BB. */
3496 e = find_edge (bb, *other_bb);
3497 e2 = find_edge (test_bb, *other_bb);
3498 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3500 gphi *phi = gsi.phi ();
3501 /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments
3502 corresponding to BB and TEST_BB predecessor must be the same. */
3503 if (!operand_equal_p (gimple_phi_arg_def (phi, e->dest_idx),
3504 gimple_phi_arg_def (phi, e2->dest_idx), 0))
3506 /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND,
3507 one of the PHIs should have the lhs of the last stmt in
3508 that block as PHI arg and that PHI should have 0 or 1
3509 corresponding to it in all other range test basic blocks
3510 considered. */
3511 if (!is_cond)
3513 if (gimple_phi_arg_def (phi, e->dest_idx)
3514 == gimple_assign_lhs (stmt)
3515 && (integer_zerop (gimple_phi_arg_def (phi, e2->dest_idx))
3516 || integer_onep (gimple_phi_arg_def (phi,
3517 e2->dest_idx))))
3518 continue;
3520 else
3522 gimple *test_last = last_stmt (test_bb);
3523 if (gimple_code (test_last) != GIMPLE_COND
3524 && gimple_phi_arg_def (phi, e2->dest_idx)
3525 == gimple_assign_lhs (test_last)
3526 && (integer_zerop (gimple_phi_arg_def (phi, e->dest_idx))
3527 || integer_onep (gimple_phi_arg_def (phi, e->dest_idx))))
3528 continue;
3531 return false;
3534 return true;
3537 /* Return true if BB doesn't have side-effects that would disallow
3538 range test optimization, all SSA_NAMEs set in the bb are consumed
3539 in the bb and there are no PHIs. */
3541 static bool
3542 no_side_effect_bb (basic_block bb)
3544 gimple_stmt_iterator gsi;
3545 gimple *last;
3547 if (!gimple_seq_empty_p (phi_nodes (bb)))
3548 return false;
3549 last = last_stmt (bb);
3550 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3552 gimple *stmt = gsi_stmt (gsi);
3553 tree lhs;
3554 imm_use_iterator imm_iter;
3555 use_operand_p use_p;
3557 if (is_gimple_debug (stmt))
3558 continue;
3559 if (gimple_has_side_effects (stmt))
3560 return false;
3561 if (stmt == last)
3562 return true;
3563 if (!is_gimple_assign (stmt))
3564 return false;
3565 lhs = gimple_assign_lhs (stmt);
3566 if (TREE_CODE (lhs) != SSA_NAME)
3567 return false;
3568 if (gimple_assign_rhs_could_trap_p (stmt))
3569 return false;
3570 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
3572 gimple *use_stmt = USE_STMT (use_p);
3573 if (is_gimple_debug (use_stmt))
3574 continue;
3575 if (gimple_bb (use_stmt) != bb)
3576 return false;
3579 return false;
3582 /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable,
3583 return true and fill in *OPS recursively. */
3585 static bool
3586 get_ops (tree var, enum tree_code code, vec<operand_entry *> *ops,
3587 struct loop *loop)
3589 gimple *stmt = SSA_NAME_DEF_STMT (var);
3590 tree rhs[2];
3591 int i;
3593 if (!is_reassociable_op (stmt, code, loop))
3594 return false;
3596 rhs[0] = gimple_assign_rhs1 (stmt);
3597 rhs[1] = gimple_assign_rhs2 (stmt);
3598 gimple_set_visited (stmt, true);
3599 for (i = 0; i < 2; i++)
3600 if (TREE_CODE (rhs[i]) == SSA_NAME
3601 && !get_ops (rhs[i], code, ops, loop)
3602 && has_single_use (rhs[i]))
3604 operand_entry *oe = operand_entry_pool.allocate ();
3606 oe->op = rhs[i];
3607 oe->rank = code;
3608 oe->id = 0;
3609 oe->count = 1;
3610 oe->stmt_to_insert = NULL;
3611 ops->safe_push (oe);
3613 return true;
3616 /* Find the ops that were added by get_ops starting from VAR, see if
3617 they were changed during update_range_test and if yes, create new
3618 stmts. */
3620 static tree
3621 update_ops (tree var, enum tree_code code, vec<operand_entry *> ops,
3622 unsigned int *pidx, struct loop *loop)
3624 gimple *stmt = SSA_NAME_DEF_STMT (var);
3625 tree rhs[4];
3626 int i;
3628 if (!is_reassociable_op (stmt, code, loop))
3629 return NULL;
3631 rhs[0] = gimple_assign_rhs1 (stmt);
3632 rhs[1] = gimple_assign_rhs2 (stmt);
3633 rhs[2] = rhs[0];
3634 rhs[3] = rhs[1];
3635 for (i = 0; i < 2; i++)
3636 if (TREE_CODE (rhs[i]) == SSA_NAME)
3638 rhs[2 + i] = update_ops (rhs[i], code, ops, pidx, loop);
3639 if (rhs[2 + i] == NULL_TREE)
3641 if (has_single_use (rhs[i]))
3642 rhs[2 + i] = ops[(*pidx)++]->op;
3643 else
3644 rhs[2 + i] = rhs[i];
3647 if ((rhs[2] != rhs[0] || rhs[3] != rhs[1])
3648 && (rhs[2] != rhs[1] || rhs[3] != rhs[0]))
3650 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3651 var = make_ssa_name (TREE_TYPE (var));
3652 gassign *g = gimple_build_assign (var, gimple_assign_rhs_code (stmt),
3653 rhs[2], rhs[3]);
3654 gimple_set_uid (g, gimple_uid (stmt));
3655 gimple_set_visited (g, true);
3656 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3658 return var;
3661 /* Structure to track the initial value passed to get_ops and
3662 the range in the ops vector for each basic block. */
3664 struct inter_bb_range_test_entry
3666 tree op;
3667 unsigned int first_idx, last_idx;
3670 /* Inter-bb range test optimization.
3672 Returns TRUE if a gimple conditional is optimized to a true/false,
3673 otherwise return FALSE.
3675 This indicates to the caller that it should run a CFG cleanup pass
3676 once reassociation is completed. */
3678 static bool
3679 maybe_optimize_range_tests (gimple *stmt)
3681 basic_block first_bb = gimple_bb (stmt);
3682 basic_block last_bb = first_bb;
3683 basic_block other_bb = NULL;
3684 basic_block bb;
3685 edge_iterator ei;
3686 edge e;
3687 auto_vec<operand_entry *> ops;
3688 auto_vec<inter_bb_range_test_entry> bbinfo;
3689 bool any_changes = false;
3690 bool cfg_cleanup_needed = false;
3692 /* Consider only basic blocks that end with GIMPLE_COND or
3693 a cast statement satisfying final_range_test_p. All
3694 but the last bb in the first_bb .. last_bb range
3695 should end with GIMPLE_COND. */
3696 if (gimple_code (stmt) == GIMPLE_COND)
3698 if (EDGE_COUNT (first_bb->succs) != 2)
3699 return cfg_cleanup_needed;
3701 else if (final_range_test_p (stmt))
3702 other_bb = single_succ (first_bb);
3703 else
3704 return cfg_cleanup_needed;
3706 if (stmt_could_throw_p (stmt))
3707 return cfg_cleanup_needed;
3709 /* As relative ordering of post-dominator sons isn't fixed,
3710 maybe_optimize_range_tests can be called first on any
3711 bb in the range we want to optimize. So, start searching
3712 backwards, if first_bb can be set to a predecessor. */
3713 while (single_pred_p (first_bb))
3715 basic_block pred_bb = single_pred (first_bb);
3716 if (!suitable_cond_bb (pred_bb, first_bb, &other_bb, true))
3717 break;
3718 if (!no_side_effect_bb (first_bb))
3719 break;
3720 first_bb = pred_bb;
3722 /* If first_bb is last_bb, other_bb hasn't been computed yet.
3723 Before starting forward search in last_bb successors, find
3724 out the other_bb. */
3725 if (first_bb == last_bb)
3727 other_bb = NULL;
3728 /* As non-GIMPLE_COND last stmt always terminates the range,
3729 if forward search didn't discover anything, just give up. */
3730 if (gimple_code (stmt) != GIMPLE_COND)
3731 return cfg_cleanup_needed;
3732 /* Look at both successors. Either it ends with a GIMPLE_COND
3733 and satisfies suitable_cond_bb, or ends with a cast and
3734 other_bb is that cast's successor. */
3735 FOR_EACH_EDGE (e, ei, first_bb->succs)
3736 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))
3737 || e->dest == first_bb)
3738 return cfg_cleanup_needed;
3739 else if (single_pred_p (e->dest))
3741 stmt = last_stmt (e->dest);
3742 if (stmt
3743 && gimple_code (stmt) == GIMPLE_COND
3744 && EDGE_COUNT (e->dest->succs) == 2)
3746 if (suitable_cond_bb (first_bb, e->dest, &other_bb, true))
3747 break;
3748 else
3749 other_bb = NULL;
3751 else if (stmt
3752 && final_range_test_p (stmt)
3753 && find_edge (first_bb, single_succ (e->dest)))
3755 other_bb = single_succ (e->dest);
3756 if (other_bb == first_bb)
3757 other_bb = NULL;
3760 if (other_bb == NULL)
3761 return cfg_cleanup_needed;
3763 /* Now do the forward search, moving last_bb to successor bbs
3764 that aren't other_bb. */
3765 while (EDGE_COUNT (last_bb->succs) == 2)
3767 FOR_EACH_EDGE (e, ei, last_bb->succs)
3768 if (e->dest != other_bb)
3769 break;
3770 if (e == NULL)
3771 break;
3772 if (!single_pred_p (e->dest))
3773 break;
3774 if (!suitable_cond_bb (e->dest, last_bb, &other_bb, false))
3775 break;
3776 if (!no_side_effect_bb (e->dest))
3777 break;
3778 last_bb = e->dest;
3780 if (first_bb == last_bb)
3781 return cfg_cleanup_needed;
3782 /* Here basic blocks first_bb through last_bb's predecessor
3783 end with GIMPLE_COND, all of them have one of the edges to
3784 other_bb and another to another block in the range,
3785 all blocks except first_bb don't have side-effects and
3786 last_bb ends with either GIMPLE_COND, or cast satisfying
3787 final_range_test_p. */
3788 for (bb = last_bb; ; bb = single_pred (bb))
3790 enum tree_code code;
3791 tree lhs, rhs;
3792 inter_bb_range_test_entry bb_ent;
3794 bb_ent.op = NULL_TREE;
3795 bb_ent.first_idx = ops.length ();
3796 bb_ent.last_idx = bb_ent.first_idx;
3797 e = find_edge (bb, other_bb);
3798 stmt = last_stmt (bb);
3799 gimple_set_visited (stmt, true);
3800 if (gimple_code (stmt) != GIMPLE_COND)
3802 use_operand_p use_p;
3803 gimple *phi;
3804 edge e2;
3805 unsigned int d;
3807 lhs = gimple_assign_lhs (stmt);
3808 rhs = gimple_assign_rhs1 (stmt);
3809 gcc_assert (bb == last_bb);
3811 /* stmt is
3812 _123 = (int) _234;
3814 _234 = a_2(D) == 2;
3816 followed by:
3817 <bb M>:
3818 # _345 = PHI <_123(N), 1(...), 1(...)>
3820 or 0 instead of 1. If it is 0, the _234
3821 range test is anded together with all the
3822 other range tests, if it is 1, it is ored with
3823 them. */
3824 single_imm_use (lhs, &use_p, &phi);
3825 gcc_assert (gimple_code (phi) == GIMPLE_PHI);
3826 e2 = find_edge (first_bb, other_bb);
3827 d = e2->dest_idx;
3828 gcc_assert (gimple_phi_arg_def (phi, e->dest_idx) == lhs);
3829 if (integer_zerop (gimple_phi_arg_def (phi, d)))
3830 code = BIT_AND_EXPR;
3831 else
3833 gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi, d)));
3834 code = BIT_IOR_EXPR;
3837 /* If _234 SSA_NAME_DEF_STMT is
3838 _234 = _567 | _789;
3839 (or &, corresponding to 1/0 in the phi arguments,
3840 push into ops the individual range test arguments
3841 of the bitwise or resp. and, recursively. */
3842 if (TREE_CODE (rhs) == SSA_NAME
3843 && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
3844 != tcc_comparison)
3845 && !get_ops (rhs, code, &ops,
3846 loop_containing_stmt (stmt))
3847 && has_single_use (rhs))
3849 /* Otherwise, push the _234 range test itself. */
3850 operand_entry *oe = operand_entry_pool.allocate ();
3852 oe->op = rhs;
3853 oe->rank = code;
3854 oe->id = 0;
3855 oe->count = 1;
3856 oe->stmt_to_insert = NULL;
3857 ops.safe_push (oe);
3858 bb_ent.last_idx++;
3859 bb_ent.op = rhs;
3861 else if (is_gimple_assign (stmt)
3862 && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
3863 == tcc_comparison)
3864 && !get_ops (lhs, code, &ops,
3865 loop_containing_stmt (stmt))
3866 && has_single_use (lhs))
3868 operand_entry *oe = operand_entry_pool.allocate ();
3869 oe->op = lhs;
3870 oe->rank = code;
3871 oe->id = 0;
3872 oe->count = 1;
3873 ops.safe_push (oe);
3874 bb_ent.last_idx++;
3875 bb_ent.op = lhs;
3877 else
3879 bb_ent.last_idx = ops.length ();
3880 bb_ent.op = rhs;
3882 bbinfo.safe_push (bb_ent);
3883 continue;
3885 /* Otherwise stmt is GIMPLE_COND. */
3886 code = gimple_cond_code (stmt);
3887 lhs = gimple_cond_lhs (stmt);
3888 rhs = gimple_cond_rhs (stmt);
3889 if (TREE_CODE (lhs) == SSA_NAME
3890 && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3891 && ((code != EQ_EXPR && code != NE_EXPR)
3892 || rhs != boolean_false_node
3893 /* Either push into ops the individual bitwise
3894 or resp. and operands, depending on which
3895 edge is other_bb. */
3896 || !get_ops (lhs, (((e->flags & EDGE_TRUE_VALUE) == 0)
3897 ^ (code == EQ_EXPR))
3898 ? BIT_AND_EXPR : BIT_IOR_EXPR, &ops,
3899 loop_containing_stmt (stmt))))
3901 /* Or push the GIMPLE_COND stmt itself. */
3902 operand_entry *oe = operand_entry_pool.allocate ();
3904 oe->op = NULL;
3905 oe->rank = (e->flags & EDGE_TRUE_VALUE)
3906 ? BIT_IOR_EXPR : BIT_AND_EXPR;
3907 /* oe->op = NULL signs that there is no SSA_NAME
3908 for the range test, and oe->id instead is the
3909 basic block number, at which's end the GIMPLE_COND
3910 is. */
3911 oe->id = bb->index;
3912 oe->count = 1;
3913 oe->stmt_to_insert = NULL;
3914 ops.safe_push (oe);
3915 bb_ent.op = NULL;
3916 bb_ent.last_idx++;
3918 else if (ops.length () > bb_ent.first_idx)
3920 bb_ent.op = lhs;
3921 bb_ent.last_idx = ops.length ();
3923 bbinfo.safe_push (bb_ent);
3924 if (bb == first_bb)
3925 break;
3927 if (ops.length () > 1)
3928 any_changes = optimize_range_tests (ERROR_MARK, &ops);
3929 if (any_changes)
3931 unsigned int idx, max_idx = 0;
3932 /* update_ops relies on has_single_use predicates returning the
3933 same values as it did during get_ops earlier. Additionally it
3934 never removes statements, only adds new ones and it should walk
3935 from the single imm use and check the predicate already before
3936 making those changes.
3937 On the other side, the handling of GIMPLE_COND directly can turn
3938 previously multiply used SSA_NAMEs into single use SSA_NAMEs, so
3939 it needs to be done in a separate loop afterwards. */
3940 for (bb = last_bb, idx = 0; ; bb = single_pred (bb), idx++)
3942 if (bbinfo[idx].first_idx < bbinfo[idx].last_idx
3943 && bbinfo[idx].op != NULL_TREE)
3945 tree new_op;
3947 max_idx = idx;
3948 stmt = last_stmt (bb);
3949 new_op = update_ops (bbinfo[idx].op,
3950 (enum tree_code)
3951 ops[bbinfo[idx].first_idx]->rank,
3952 ops, &bbinfo[idx].first_idx,
3953 loop_containing_stmt (stmt));
3954 if (new_op == NULL_TREE)
3956 gcc_assert (bb == last_bb);
3957 new_op = ops[bbinfo[idx].first_idx++]->op;
3959 if (bbinfo[idx].op != new_op)
3961 imm_use_iterator iter;
3962 use_operand_p use_p;
3963 gimple *use_stmt, *cast_or_tcc_cmp_stmt = NULL;
3965 FOR_EACH_IMM_USE_STMT (use_stmt, iter, bbinfo[idx].op)
3966 if (is_gimple_debug (use_stmt))
3967 continue;
3968 else if (gimple_code (use_stmt) == GIMPLE_COND
3969 || gimple_code (use_stmt) == GIMPLE_PHI)
3970 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3971 SET_USE (use_p, new_op);
3972 else if ((is_gimple_assign (use_stmt)
3973 && (TREE_CODE_CLASS
3974 (gimple_assign_rhs_code (use_stmt))
3975 == tcc_comparison)))
3976 cast_or_tcc_cmp_stmt = use_stmt;
3977 else if (gimple_assign_cast_p (use_stmt))
3978 cast_or_tcc_cmp_stmt = use_stmt;
3979 else
3980 gcc_unreachable ();
3982 if (cast_or_tcc_cmp_stmt)
3984 gcc_assert (bb == last_bb);
3985 tree lhs = gimple_assign_lhs (cast_or_tcc_cmp_stmt);
3986 tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
3987 enum tree_code rhs_code
3988 = gimple_assign_cast_p (cast_or_tcc_cmp_stmt)
3989 ? gimple_assign_rhs_code (cast_or_tcc_cmp_stmt)
3990 : CONVERT_EXPR;
3991 gassign *g;
3992 if (is_gimple_min_invariant (new_op))
3994 new_op = fold_convert (TREE_TYPE (lhs), new_op);
3995 g = gimple_build_assign (new_lhs, new_op);
3997 else
3998 g = gimple_build_assign (new_lhs, rhs_code, new_op);
3999 gimple_stmt_iterator gsi
4000 = gsi_for_stmt (cast_or_tcc_cmp_stmt);
4001 gimple_set_uid (g, gimple_uid (cast_or_tcc_cmp_stmt));
4002 gimple_set_visited (g, true);
4003 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4004 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4005 if (is_gimple_debug (use_stmt))
4006 continue;
4007 else if (gimple_code (use_stmt) == GIMPLE_COND
4008 || gimple_code (use_stmt) == GIMPLE_PHI)
4009 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4010 SET_USE (use_p, new_lhs);
4011 else
4012 gcc_unreachable ();
4016 if (bb == first_bb)
4017 break;
4019 for (bb = last_bb, idx = 0; ; bb = single_pred (bb), idx++)
4021 if (bbinfo[idx].first_idx < bbinfo[idx].last_idx
4022 && bbinfo[idx].op == NULL_TREE
4023 && ops[bbinfo[idx].first_idx]->op != NULL_TREE)
4025 gcond *cond_stmt = as_a <gcond *> (last_stmt (bb));
4027 if (idx > max_idx)
4028 max_idx = idx;
4030 /* If we collapse the conditional to a true/false
4031 condition, then bubble that knowledge up to our caller. */
4032 if (integer_zerop (ops[bbinfo[idx].first_idx]->op))
4034 gimple_cond_make_false (cond_stmt);
4035 cfg_cleanup_needed = true;
4037 else if (integer_onep (ops[bbinfo[idx].first_idx]->op))
4039 gimple_cond_make_true (cond_stmt);
4040 cfg_cleanup_needed = true;
4042 else
4044 gimple_cond_set_code (cond_stmt, NE_EXPR);
4045 gimple_cond_set_lhs (cond_stmt,
4046 ops[bbinfo[idx].first_idx]->op);
4047 gimple_cond_set_rhs (cond_stmt, boolean_false_node);
4049 update_stmt (cond_stmt);
4051 if (bb == first_bb)
4052 break;
4055 /* The above changes could result in basic blocks after the first
4056 modified one, up to and including last_bb, to be executed even if
4057 they would not be in the original program. If the value ranges of
4058 assignment lhs' in those bbs were dependent on the conditions
4059 guarding those basic blocks which now can change, the VRs might
4060 be incorrect. As no_side_effect_bb should ensure those SSA_NAMEs
4061 are only used within the same bb, it should be not a big deal if
4062 we just reset all the VRs in those bbs. See PR68671. */
4063 for (bb = last_bb, idx = 0; idx < max_idx; bb = single_pred (bb), idx++)
4064 reset_flow_sensitive_info_in_bb (bb);
4066 return cfg_cleanup_needed;
4069 /* Return true if OPERAND is defined by a PHI node which uses the LHS
4070 of STMT in it's operands. This is also known as a "destructive
4071 update" operation. */
4073 static bool
4074 is_phi_for_stmt (gimple *stmt, tree operand)
4076 gimple *def_stmt;
4077 gphi *def_phi;
4078 tree lhs;
4079 use_operand_p arg_p;
4080 ssa_op_iter i;
4082 if (TREE_CODE (operand) != SSA_NAME)
4083 return false;
4085 lhs = gimple_assign_lhs (stmt);
4087 def_stmt = SSA_NAME_DEF_STMT (operand);
4088 def_phi = dyn_cast <gphi *> (def_stmt);
4089 if (!def_phi)
4090 return false;
4092 FOR_EACH_PHI_ARG (arg_p, def_phi, i, SSA_OP_USE)
4093 if (lhs == USE_FROM_PTR (arg_p))
4094 return true;
4095 return false;
4098 /* Remove def stmt of VAR if VAR has zero uses and recurse
4099 on rhs1 operand if so. */
4101 static void
4102 remove_visited_stmt_chain (tree var)
4104 gimple *stmt;
4105 gimple_stmt_iterator gsi;
4107 while (1)
4109 if (TREE_CODE (var) != SSA_NAME || !has_zero_uses (var))
4110 return;
4111 stmt = SSA_NAME_DEF_STMT (var);
4112 if (is_gimple_assign (stmt) && gimple_visited_p (stmt))
4114 var = gimple_assign_rhs1 (stmt);
4115 gsi = gsi_for_stmt (stmt);
4116 reassoc_remove_stmt (&gsi);
4117 release_defs (stmt);
4119 else
4120 return;
4124 /* This function checks three consequtive operands in
4125 passed operands vector OPS starting from OPINDEX and
4126 swaps two operands if it is profitable for binary operation
4127 consuming OPINDEX + 1 abnd OPINDEX + 2 operands.
4129 We pair ops with the same rank if possible.
4131 The alternative we try is to see if STMT is a destructive
4132 update style statement, which is like:
4133 b = phi (a, ...)
4134 a = c + b;
4135 In that case, we want to use the destructive update form to
4136 expose the possible vectorizer sum reduction opportunity.
4137 In that case, the third operand will be the phi node. This
4138 check is not performed if STMT is null.
4140 We could, of course, try to be better as noted above, and do a
4141 lot of work to try to find these opportunities in >3 operand
4142 cases, but it is unlikely to be worth it. */
4144 static void
4145 swap_ops_for_binary_stmt (vec<operand_entry *> ops,
4146 unsigned int opindex, gimple *stmt)
4148 operand_entry *oe1, *oe2, *oe3;
4150 oe1 = ops[opindex];
4151 oe2 = ops[opindex + 1];
4152 oe3 = ops[opindex + 2];
4154 if ((oe1->rank == oe2->rank
4155 && oe2->rank != oe3->rank)
4156 || (stmt && is_phi_for_stmt (stmt, oe3->op)
4157 && !is_phi_for_stmt (stmt, oe1->op)
4158 && !is_phi_for_stmt (stmt, oe2->op)))
4159 std::swap (*oe1, *oe3);
4160 else if ((oe1->rank == oe3->rank
4161 && oe2->rank != oe3->rank)
4162 || (stmt && is_phi_for_stmt (stmt, oe2->op)
4163 && !is_phi_for_stmt (stmt, oe1->op)
4164 && !is_phi_for_stmt (stmt, oe3->op)))
4165 std::swap (*oe1, *oe2);
4168 /* If definition of RHS1 or RHS2 dominates STMT, return the later of those
4169 two definitions, otherwise return STMT. */
4171 static inline gimple *
4172 find_insert_point (gimple *stmt, tree rhs1, tree rhs2)
4174 if (TREE_CODE (rhs1) == SSA_NAME
4175 && reassoc_stmt_dominates_stmt_p (stmt, SSA_NAME_DEF_STMT (rhs1)))
4176 stmt = SSA_NAME_DEF_STMT (rhs1);
4177 if (TREE_CODE (rhs2) == SSA_NAME
4178 && reassoc_stmt_dominates_stmt_p (stmt, SSA_NAME_DEF_STMT (rhs2)))
4179 stmt = SSA_NAME_DEF_STMT (rhs2);
4180 return stmt;
4183 /* If the stmt that defines operand has to be inserted, insert it
4184 before the use. */
4185 static void
4186 insert_stmt_before_use (gimple *stmt, gimple *stmt_to_insert)
4188 gcc_assert (is_gimple_assign (stmt_to_insert));
4189 tree rhs1 = gimple_assign_rhs1 (stmt_to_insert);
4190 tree rhs2 = gimple_assign_rhs2 (stmt_to_insert);
4191 gimple *insert_point = find_insert_point (stmt, rhs1, rhs2);
4192 gimple_stmt_iterator gsi = gsi_for_stmt (insert_point);
4193 gimple_set_uid (stmt_to_insert, gimple_uid (insert_point));
4195 /* If the insert point is not stmt, then insert_point would be
4196 the point where operand rhs1 or rhs2 is defined. In this case,
4197 stmt_to_insert has to be inserted afterwards. This would
4198 only happen when the stmt insertion point is flexible. */
4199 if (stmt == insert_point)
4200 gsi_insert_before (&gsi, stmt_to_insert, GSI_NEW_STMT);
4201 else
4202 insert_stmt_after (stmt_to_insert, insert_point);
4206 /* Recursively rewrite our linearized statements so that the operators
4207 match those in OPS[OPINDEX], putting the computation in rank
4208 order. Return new lhs. */
4210 static tree
4211 rewrite_expr_tree (gimple *stmt, unsigned int opindex,
4212 vec<operand_entry *> ops, bool changed)
4214 tree rhs1 = gimple_assign_rhs1 (stmt);
4215 tree rhs2 = gimple_assign_rhs2 (stmt);
4216 tree lhs = gimple_assign_lhs (stmt);
4217 operand_entry *oe;
4219 /* The final recursion case for this function is that you have
4220 exactly two operations left.
4221 If we had exactly one op in the entire list to start with, we
4222 would have never called this function, and the tail recursion
4223 rewrites them one at a time. */
4224 if (opindex + 2 == ops.length ())
4226 operand_entry *oe1, *oe2;
4228 oe1 = ops[opindex];
4229 oe2 = ops[opindex + 1];
4231 if (rhs1 != oe1->op || rhs2 != oe2->op)
4233 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4234 unsigned int uid = gimple_uid (stmt);
4236 if (dump_file && (dump_flags & TDF_DETAILS))
4238 fprintf (dump_file, "Transforming ");
4239 print_gimple_stmt (dump_file, stmt, 0);
4242 /* If the stmt that defines operand has to be inserted, insert it
4243 before the use. */
4244 if (oe1->stmt_to_insert)
4245 insert_stmt_before_use (stmt, oe1->stmt_to_insert);
4246 if (oe2->stmt_to_insert)
4247 insert_stmt_before_use (stmt, oe2->stmt_to_insert);
4248 /* Even when changed is false, reassociation could have e.g. removed
4249 some redundant operations, so unless we are just swapping the
4250 arguments or unless there is no change at all (then we just
4251 return lhs), force creation of a new SSA_NAME. */
4252 if (changed || ((rhs1 != oe2->op || rhs2 != oe1->op) && opindex))
4254 gimple *insert_point
4255 = find_insert_point (stmt, oe1->op, oe2->op);
4256 lhs = make_ssa_name (TREE_TYPE (lhs));
4257 stmt
4258 = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt),
4259 oe1->op, oe2->op);
4260 gimple_set_uid (stmt, uid);
4261 gimple_set_visited (stmt, true);
4262 if (insert_point == gsi_stmt (gsi))
4263 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4264 else
4265 insert_stmt_after (stmt, insert_point);
4267 else
4269 gcc_checking_assert (find_insert_point (stmt, oe1->op, oe2->op)
4270 == stmt);
4271 gimple_assign_set_rhs1 (stmt, oe1->op);
4272 gimple_assign_set_rhs2 (stmt, oe2->op);
4273 update_stmt (stmt);
4276 if (rhs1 != oe1->op && rhs1 != oe2->op)
4277 remove_visited_stmt_chain (rhs1);
4279 if (dump_file && (dump_flags & TDF_DETAILS))
4281 fprintf (dump_file, " into ");
4282 print_gimple_stmt (dump_file, stmt, 0);
4285 return lhs;
4288 /* If we hit here, we should have 3 or more ops left. */
4289 gcc_assert (opindex + 2 < ops.length ());
4291 /* Rewrite the next operator. */
4292 oe = ops[opindex];
4294 /* If the stmt that defines operand has to be inserted, insert it
4295 before the use. */
4296 if (oe->stmt_to_insert)
4297 insert_stmt_before_use (stmt, oe->stmt_to_insert);
4299 /* Recurse on the LHS of the binary operator, which is guaranteed to
4300 be the non-leaf side. */
4301 tree new_rhs1
4302 = rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1), opindex + 1, ops,
4303 changed || oe->op != rhs2);
4305 if (oe->op != rhs2 || new_rhs1 != rhs1)
4307 if (dump_file && (dump_flags & TDF_DETAILS))
4309 fprintf (dump_file, "Transforming ");
4310 print_gimple_stmt (dump_file, stmt, 0);
4313 /* If changed is false, this is either opindex == 0
4314 or all outer rhs2's were equal to corresponding oe->op,
4315 and powi_result is NULL.
4316 That means lhs is equivalent before and after reassociation.
4317 Otherwise ensure the old lhs SSA_NAME is not reused and
4318 create a new stmt as well, so that any debug stmts will be
4319 properly adjusted. */
4320 if (changed)
4322 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4323 unsigned int uid = gimple_uid (stmt);
4324 gimple *insert_point = find_insert_point (stmt, new_rhs1, oe->op);
4326 lhs = make_ssa_name (TREE_TYPE (lhs));
4327 stmt = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt),
4328 new_rhs1, oe->op);
4329 gimple_set_uid (stmt, uid);
4330 gimple_set_visited (stmt, true);
4331 if (insert_point == gsi_stmt (gsi))
4332 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4333 else
4334 insert_stmt_after (stmt, insert_point);
4336 else
4338 gcc_checking_assert (find_insert_point (stmt, new_rhs1, oe->op)
4339 == stmt);
4340 gimple_assign_set_rhs1 (stmt, new_rhs1);
4341 gimple_assign_set_rhs2 (stmt, oe->op);
4342 update_stmt (stmt);
4345 if (dump_file && (dump_flags & TDF_DETAILS))
4347 fprintf (dump_file, " into ");
4348 print_gimple_stmt (dump_file, stmt, 0);
4351 return lhs;
4354 /* Find out how many cycles we need to compute statements chain.
4355 OPS_NUM holds number os statements in a chain. CPU_WIDTH is a
4356 maximum number of independent statements we may execute per cycle. */
4358 static int
4359 get_required_cycles (int ops_num, int cpu_width)
4361 int res;
4362 int elog;
4363 unsigned int rest;
4365 /* While we have more than 2 * cpu_width operands
4366 we may reduce number of operands by cpu_width
4367 per cycle. */
4368 res = ops_num / (2 * cpu_width);
4370 /* Remained operands count may be reduced twice per cycle
4371 until we have only one operand. */
4372 rest = (unsigned)(ops_num - res * cpu_width);
4373 elog = exact_log2 (rest);
4374 if (elog >= 0)
4375 res += elog;
4376 else
4377 res += floor_log2 (rest) + 1;
4379 return res;
4382 /* Returns an optimal number of registers to use for computation of
4383 given statements. */
4385 static int
4386 get_reassociation_width (int ops_num, enum tree_code opc,
4387 machine_mode mode)
4389 int param_width = PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH);
4390 int width;
4391 int width_min;
4392 int cycles_best;
4394 if (param_width > 0)
4395 width = param_width;
4396 else
4397 width = targetm.sched.reassociation_width (opc, mode);
4399 if (width == 1)
4400 return width;
4402 /* Get the minimal time required for sequence computation. */
4403 cycles_best = get_required_cycles (ops_num, width);
4405 /* Check if we may use less width and still compute sequence for
4406 the same time. It will allow us to reduce registers usage.
4407 get_required_cycles is monotonically increasing with lower width
4408 so we can perform a binary search for the minimal width that still
4409 results in the optimal cycle count. */
4410 width_min = 1;
4411 while (width > width_min)
4413 int width_mid = (width + width_min) / 2;
4415 if (get_required_cycles (ops_num, width_mid) == cycles_best)
4416 width = width_mid;
4417 else if (width_min < width_mid)
4418 width_min = width_mid;
4419 else
4420 break;
4423 return width;
4426 /* Recursively rewrite our linearized statements so that the operators
4427 match those in OPS[OPINDEX], putting the computation in rank
4428 order and trying to allow operations to be executed in
4429 parallel. */
4431 static void
4432 rewrite_expr_tree_parallel (gassign *stmt, int width,
4433 vec<operand_entry *> ops)
4435 enum tree_code opcode = gimple_assign_rhs_code (stmt);
4436 int op_num = ops.length ();
4437 gcc_assert (op_num > 0);
4438 int stmt_num = op_num - 1;
4439 gimple **stmts = XALLOCAVEC (gimple *, stmt_num);
4440 int op_index = op_num - 1;
4441 int stmt_index = 0;
4442 int ready_stmts_end = 0;
4443 int i = 0;
4444 gimple *stmt1 = NULL, *stmt2 = NULL;
4445 tree last_rhs1 = gimple_assign_rhs1 (stmt);
4447 /* We start expression rewriting from the top statements.
4448 So, in this loop we create a full list of statements
4449 we will work with. */
4450 stmts[stmt_num - 1] = stmt;
4451 for (i = stmt_num - 2; i >= 0; i--)
4452 stmts[i] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts[i+1]));
4454 for (i = 0; i < stmt_num; i++)
4456 tree op1, op2;
4458 /* Determine whether we should use results of
4459 already handled statements or not. */
4460 if (ready_stmts_end == 0
4461 && (i - stmt_index >= width || op_index < 1))
4462 ready_stmts_end = i;
4464 /* Now we choose operands for the next statement. Non zero
4465 value in ready_stmts_end means here that we should use
4466 the result of already generated statements as new operand. */
4467 if (ready_stmts_end > 0)
4469 op1 = gimple_assign_lhs (stmts[stmt_index++]);
4470 if (ready_stmts_end > stmt_index)
4471 op2 = gimple_assign_lhs (stmts[stmt_index++]);
4472 else if (op_index >= 0)
4474 operand_entry *oe = ops[op_index--];
4475 stmt2 = oe->stmt_to_insert;
4476 op2 = oe->op;
4478 else
4480 gcc_assert (stmt_index < i);
4481 op2 = gimple_assign_lhs (stmts[stmt_index++]);
4484 if (stmt_index >= ready_stmts_end)
4485 ready_stmts_end = 0;
4487 else
4489 if (op_index > 1)
4490 swap_ops_for_binary_stmt (ops, op_index - 2, NULL);
4491 operand_entry *oe2 = ops[op_index--];
4492 operand_entry *oe1 = ops[op_index--];
4493 op2 = oe2->op;
4494 stmt2 = oe2->stmt_to_insert;
4495 op1 = oe1->op;
4496 stmt1 = oe1->stmt_to_insert;
4499 /* If we emit the last statement then we should put
4500 operands into the last statement. It will also
4501 break the loop. */
4502 if (op_index < 0 && stmt_index == i)
4503 i = stmt_num - 1;
4505 if (dump_file && (dump_flags & TDF_DETAILS))
4507 fprintf (dump_file, "Transforming ");
4508 print_gimple_stmt (dump_file, stmts[i], 0);
4511 /* If the stmt that defines operand has to be inserted, insert it
4512 before the use. */
4513 if (stmt1)
4514 insert_stmt_before_use (stmts[i], stmt1);
4515 if (stmt2)
4516 insert_stmt_before_use (stmts[i], stmt2);
4517 stmt1 = stmt2 = NULL;
4519 /* We keep original statement only for the last one. All
4520 others are recreated. */
4521 if (i == stmt_num - 1)
4523 gimple_assign_set_rhs1 (stmts[i], op1);
4524 gimple_assign_set_rhs2 (stmts[i], op2);
4525 update_stmt (stmts[i]);
4527 else
4529 stmts[i] = build_and_add_sum (TREE_TYPE (last_rhs1), op1, op2, opcode);
4531 if (dump_file && (dump_flags & TDF_DETAILS))
4533 fprintf (dump_file, " into ");
4534 print_gimple_stmt (dump_file, stmts[i], 0);
4538 remove_visited_stmt_chain (last_rhs1);
4541 /* Transform STMT, which is really (A +B) + (C + D) into the left
4542 linear form, ((A+B)+C)+D.
4543 Recurse on D if necessary. */
4545 static void
4546 linearize_expr (gimple *stmt)
4548 gimple_stmt_iterator gsi;
4549 gimple *binlhs = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4550 gimple *binrhs = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
4551 gimple *oldbinrhs = binrhs;
4552 enum tree_code rhscode = gimple_assign_rhs_code (stmt);
4553 gimple *newbinrhs = NULL;
4554 struct loop *loop = loop_containing_stmt (stmt);
4555 tree lhs = gimple_assign_lhs (stmt);
4557 gcc_assert (is_reassociable_op (binlhs, rhscode, loop)
4558 && is_reassociable_op (binrhs, rhscode, loop));
4560 gsi = gsi_for_stmt (stmt);
4562 gimple_assign_set_rhs2 (stmt, gimple_assign_rhs1 (binrhs));
4563 binrhs = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
4564 gimple_assign_rhs_code (binrhs),
4565 gimple_assign_lhs (binlhs),
4566 gimple_assign_rhs2 (binrhs));
4567 gimple_assign_set_rhs1 (stmt, gimple_assign_lhs (binrhs));
4568 gsi_insert_before (&gsi, binrhs, GSI_SAME_STMT);
4569 gimple_set_uid (binrhs, gimple_uid (stmt));
4571 if (TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
4572 newbinrhs = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
4574 if (dump_file && (dump_flags & TDF_DETAILS))
4576 fprintf (dump_file, "Linearized: ");
4577 print_gimple_stmt (dump_file, stmt, 0);
4580 reassociate_stats.linearized++;
4581 update_stmt (stmt);
4583 gsi = gsi_for_stmt (oldbinrhs);
4584 reassoc_remove_stmt (&gsi);
4585 release_defs (oldbinrhs);
4587 gimple_set_visited (stmt, true);
4588 gimple_set_visited (binlhs, true);
4589 gimple_set_visited (binrhs, true);
4591 /* Tail recurse on the new rhs if it still needs reassociation. */
4592 if (newbinrhs && is_reassociable_op (newbinrhs, rhscode, loop))
4593 /* ??? This should probably be linearize_expr (newbinrhs) but I don't
4594 want to change the algorithm while converting to tuples. */
4595 linearize_expr (stmt);
4598 /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return
4599 it. Otherwise, return NULL. */
4601 static gimple *
4602 get_single_immediate_use (tree lhs)
4604 use_operand_p immuse;
4605 gimple *immusestmt;
4607 if (TREE_CODE (lhs) == SSA_NAME
4608 && single_imm_use (lhs, &immuse, &immusestmt)
4609 && is_gimple_assign (immusestmt))
4610 return immusestmt;
4612 return NULL;
4615 /* Recursively negate the value of TONEGATE, and return the SSA_NAME
4616 representing the negated value. Insertions of any necessary
4617 instructions go before GSI.
4618 This function is recursive in that, if you hand it "a_5" as the
4619 value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will
4620 transform b_3 + b_4 into a_5 = -b_3 + -b_4. */
4622 static tree
4623 negate_value (tree tonegate, gimple_stmt_iterator *gsip)
4625 gimple *negatedefstmt = NULL;
4626 tree resultofnegate;
4627 gimple_stmt_iterator gsi;
4628 unsigned int uid;
4630 /* If we are trying to negate a name, defined by an add, negate the
4631 add operands instead. */
4632 if (TREE_CODE (tonegate) == SSA_NAME)
4633 negatedefstmt = SSA_NAME_DEF_STMT (tonegate);
4634 if (TREE_CODE (tonegate) == SSA_NAME
4635 && is_gimple_assign (negatedefstmt)
4636 && TREE_CODE (gimple_assign_lhs (negatedefstmt)) == SSA_NAME
4637 && has_single_use (gimple_assign_lhs (negatedefstmt))
4638 && gimple_assign_rhs_code (negatedefstmt) == PLUS_EXPR)
4640 tree rhs1 = gimple_assign_rhs1 (negatedefstmt);
4641 tree rhs2 = gimple_assign_rhs2 (negatedefstmt);
4642 tree lhs = gimple_assign_lhs (negatedefstmt);
4643 gimple *g;
4645 gsi = gsi_for_stmt (negatedefstmt);
4646 rhs1 = negate_value (rhs1, &gsi);
4648 gsi = gsi_for_stmt (negatedefstmt);
4649 rhs2 = negate_value (rhs2, &gsi);
4651 gsi = gsi_for_stmt (negatedefstmt);
4652 lhs = make_ssa_name (TREE_TYPE (lhs));
4653 gimple_set_visited (negatedefstmt, true);
4654 g = gimple_build_assign (lhs, PLUS_EXPR, rhs1, rhs2);
4655 gimple_set_uid (g, gimple_uid (negatedefstmt));
4656 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4657 return lhs;
4660 tonegate = fold_build1 (NEGATE_EXPR, TREE_TYPE (tonegate), tonegate);
4661 resultofnegate = force_gimple_operand_gsi (gsip, tonegate, true,
4662 NULL_TREE, true, GSI_SAME_STMT);
4663 gsi = *gsip;
4664 uid = gimple_uid (gsi_stmt (gsi));
4665 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
4667 gimple *stmt = gsi_stmt (gsi);
4668 if (gimple_uid (stmt) != 0)
4669 break;
4670 gimple_set_uid (stmt, uid);
4672 return resultofnegate;
4675 /* Return true if we should break up the subtract in STMT into an add
4676 with negate. This is true when we the subtract operands are really
4677 adds, or the subtract itself is used in an add expression. In
4678 either case, breaking up the subtract into an add with negate
4679 exposes the adds to reassociation. */
4681 static bool
4682 should_break_up_subtract (gimple *stmt)
4684 tree lhs = gimple_assign_lhs (stmt);
4685 tree binlhs = gimple_assign_rhs1 (stmt);
4686 tree binrhs = gimple_assign_rhs2 (stmt);
4687 gimple *immusestmt;
4688 struct loop *loop = loop_containing_stmt (stmt);
4690 if (TREE_CODE (binlhs) == SSA_NAME
4691 && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs), PLUS_EXPR, loop))
4692 return true;
4694 if (TREE_CODE (binrhs) == SSA_NAME
4695 && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs), PLUS_EXPR, loop))
4696 return true;
4698 if (TREE_CODE (lhs) == SSA_NAME
4699 && (immusestmt = get_single_immediate_use (lhs))
4700 && is_gimple_assign (immusestmt)
4701 && (gimple_assign_rhs_code (immusestmt) == PLUS_EXPR
4702 || gimple_assign_rhs_code (immusestmt) == MULT_EXPR))
4703 return true;
4704 return false;
4707 /* Transform STMT from A - B into A + -B. */
4709 static void
4710 break_up_subtract (gimple *stmt, gimple_stmt_iterator *gsip)
4712 tree rhs1 = gimple_assign_rhs1 (stmt);
4713 tree rhs2 = gimple_assign_rhs2 (stmt);
4715 if (dump_file && (dump_flags & TDF_DETAILS))
4717 fprintf (dump_file, "Breaking up subtract ");
4718 print_gimple_stmt (dump_file, stmt, 0);
4721 rhs2 = negate_value (rhs2, gsip);
4722 gimple_assign_set_rhs_with_ops (gsip, PLUS_EXPR, rhs1, rhs2);
4723 update_stmt (stmt);
4726 /* Determine whether STMT is a builtin call that raises an SSA name
4727 to an integer power and has only one use. If so, and this is early
4728 reassociation and unsafe math optimizations are permitted, place
4729 the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE.
4730 If any of these conditions does not hold, return FALSE. */
4732 static bool
4733 acceptable_pow_call (gcall *stmt, tree *base, HOST_WIDE_INT *exponent)
4735 tree arg1;
4736 REAL_VALUE_TYPE c, cint;
4738 switch (gimple_call_combined_fn (stmt))
4740 CASE_CFN_POW:
4741 if (flag_errno_math)
4742 return false;
4744 *base = gimple_call_arg (stmt, 0);
4745 arg1 = gimple_call_arg (stmt, 1);
4747 if (TREE_CODE (arg1) != REAL_CST)
4748 return false;
4750 c = TREE_REAL_CST (arg1);
4752 if (REAL_EXP (&c) > HOST_BITS_PER_WIDE_INT)
4753 return false;
4755 *exponent = real_to_integer (&c);
4756 real_from_integer (&cint, VOIDmode, *exponent, SIGNED);
4757 if (!real_identical (&c, &cint))
4758 return false;
4760 break;
4762 CASE_CFN_POWI:
4763 *base = gimple_call_arg (stmt, 0);
4764 arg1 = gimple_call_arg (stmt, 1);
4766 if (!tree_fits_shwi_p (arg1))
4767 return false;
4769 *exponent = tree_to_shwi (arg1);
4770 break;
4772 default:
4773 return false;
4776 /* Expanding negative exponents is generally unproductive, so we don't
4777 complicate matters with those. Exponents of zero and one should
4778 have been handled by expression folding. */
4779 if (*exponent < 2 || TREE_CODE (*base) != SSA_NAME)
4780 return false;
4782 return true;
4785 /* Try to derive and add operand entry for OP to *OPS. Return false if
4786 unsuccessful. */
4788 static bool
4789 try_special_add_to_ops (vec<operand_entry *> *ops,
4790 enum tree_code code,
4791 tree op, gimple* def_stmt)
4793 tree base = NULL_TREE;
4794 HOST_WIDE_INT exponent = 0;
4796 if (TREE_CODE (op) != SSA_NAME
4797 || ! has_single_use (op))
4798 return false;
4800 if (code == MULT_EXPR
4801 && reassoc_insert_powi_p
4802 && flag_unsafe_math_optimizations
4803 && is_gimple_call (def_stmt)
4804 && acceptable_pow_call (as_a <gcall *> (def_stmt), &base, &exponent))
4806 add_repeat_to_ops_vec (ops, base, exponent);
4807 gimple_set_visited (def_stmt, true);
4808 return true;
4810 else if (code == MULT_EXPR
4811 && is_gimple_assign (def_stmt)
4812 && gimple_assign_rhs_code (def_stmt) == NEGATE_EXPR
4813 && !HONOR_SNANS (TREE_TYPE (op))
4814 && (!HONOR_SIGNED_ZEROS (TREE_TYPE (op))
4815 || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (op))))
4817 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4818 tree cst = build_minus_one_cst (TREE_TYPE (op));
4819 add_to_ops_vec (ops, rhs1);
4820 add_to_ops_vec (ops, cst);
4821 gimple_set_visited (def_stmt, true);
4822 return true;
4825 return false;
4828 /* Recursively linearize a binary expression that is the RHS of STMT.
4829 Place the operands of the expression tree in the vector named OPS. */
4831 static void
4832 linearize_expr_tree (vec<operand_entry *> *ops, gimple *stmt,
4833 bool is_associative, bool set_visited)
4835 tree binlhs = gimple_assign_rhs1 (stmt);
4836 tree binrhs = gimple_assign_rhs2 (stmt);
4837 gimple *binlhsdef = NULL, *binrhsdef = NULL;
4838 bool binlhsisreassoc = false;
4839 bool binrhsisreassoc = false;
4840 enum tree_code rhscode = gimple_assign_rhs_code (stmt);
4841 struct loop *loop = loop_containing_stmt (stmt);
4843 if (set_visited)
4844 gimple_set_visited (stmt, true);
4846 if (TREE_CODE (binlhs) == SSA_NAME)
4848 binlhsdef = SSA_NAME_DEF_STMT (binlhs);
4849 binlhsisreassoc = (is_reassociable_op (binlhsdef, rhscode, loop)
4850 && !stmt_could_throw_p (binlhsdef));
4853 if (TREE_CODE (binrhs) == SSA_NAME)
4855 binrhsdef = SSA_NAME_DEF_STMT (binrhs);
4856 binrhsisreassoc = (is_reassociable_op (binrhsdef, rhscode, loop)
4857 && !stmt_could_throw_p (binrhsdef));
4860 /* If the LHS is not reassociable, but the RHS is, we need to swap
4861 them. If neither is reassociable, there is nothing we can do, so
4862 just put them in the ops vector. If the LHS is reassociable,
4863 linearize it. If both are reassociable, then linearize the RHS
4864 and the LHS. */
4866 if (!binlhsisreassoc)
4868 /* If this is not a associative operation like division, give up. */
4869 if (!is_associative)
4871 add_to_ops_vec (ops, binrhs);
4872 return;
4875 if (!binrhsisreassoc)
4877 if (!try_special_add_to_ops (ops, rhscode, binrhs, binrhsdef))
4878 add_to_ops_vec (ops, binrhs);
4880 if (!try_special_add_to_ops (ops, rhscode, binlhs, binlhsdef))
4881 add_to_ops_vec (ops, binlhs);
4883 return;
4886 if (dump_file && (dump_flags & TDF_DETAILS))
4888 fprintf (dump_file, "swapping operands of ");
4889 print_gimple_stmt (dump_file, stmt, 0);
4892 swap_ssa_operands (stmt,
4893 gimple_assign_rhs1_ptr (stmt),
4894 gimple_assign_rhs2_ptr (stmt));
4895 update_stmt (stmt);
4897 if (dump_file && (dump_flags & TDF_DETAILS))
4899 fprintf (dump_file, " is now ");
4900 print_gimple_stmt (dump_file, stmt, 0);
4903 /* We want to make it so the lhs is always the reassociative op,
4904 so swap. */
4905 std::swap (binlhs, binrhs);
4907 else if (binrhsisreassoc)
4909 linearize_expr (stmt);
4910 binlhs = gimple_assign_rhs1 (stmt);
4911 binrhs = gimple_assign_rhs2 (stmt);
4914 gcc_assert (TREE_CODE (binrhs) != SSA_NAME
4915 || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs),
4916 rhscode, loop));
4917 linearize_expr_tree (ops, SSA_NAME_DEF_STMT (binlhs),
4918 is_associative, set_visited);
4920 if (!try_special_add_to_ops (ops, rhscode, binrhs, binrhsdef))
4921 add_to_ops_vec (ops, binrhs);
4924 /* Repropagate the negates back into subtracts, since no other pass
4925 currently does it. */
4927 static void
4928 repropagate_negates (void)
4930 unsigned int i = 0;
4931 tree negate;
4933 FOR_EACH_VEC_ELT (plus_negates, i, negate)
4935 gimple *user = get_single_immediate_use (negate);
4937 if (!user || !is_gimple_assign (user))
4938 continue;
4940 /* The negate operand can be either operand of a PLUS_EXPR
4941 (it can be the LHS if the RHS is a constant for example).
4943 Force the negate operand to the RHS of the PLUS_EXPR, then
4944 transform the PLUS_EXPR into a MINUS_EXPR. */
4945 if (gimple_assign_rhs_code (user) == PLUS_EXPR)
4947 /* If the negated operand appears on the LHS of the
4948 PLUS_EXPR, exchange the operands of the PLUS_EXPR
4949 to force the negated operand to the RHS of the PLUS_EXPR. */
4950 if (gimple_assign_rhs1 (user) == negate)
4952 swap_ssa_operands (user,
4953 gimple_assign_rhs1_ptr (user),
4954 gimple_assign_rhs2_ptr (user));
4957 /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace
4958 the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */
4959 if (gimple_assign_rhs2 (user) == negate)
4961 tree rhs1 = gimple_assign_rhs1 (user);
4962 tree rhs2 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (negate));
4963 gimple_stmt_iterator gsi = gsi_for_stmt (user);
4964 gimple_assign_set_rhs_with_ops (&gsi, MINUS_EXPR, rhs1, rhs2);
4965 update_stmt (user);
4968 else if (gimple_assign_rhs_code (user) == MINUS_EXPR)
4970 if (gimple_assign_rhs1 (user) == negate)
4972 /* We have
4973 x = -a
4974 y = x - b
4975 which we transform into
4976 x = a + b
4977 y = -x .
4978 This pushes down the negate which we possibly can merge
4979 into some other operation, hence insert it into the
4980 plus_negates vector. */
4981 gimple *feed = SSA_NAME_DEF_STMT (negate);
4982 tree a = gimple_assign_rhs1 (feed);
4983 tree b = gimple_assign_rhs2 (user);
4984 gimple_stmt_iterator gsi = gsi_for_stmt (feed);
4985 gimple_stmt_iterator gsi2 = gsi_for_stmt (user);
4986 tree x = make_ssa_name (TREE_TYPE (gimple_assign_lhs (feed)));
4987 gimple *g = gimple_build_assign (x, PLUS_EXPR, a, b);
4988 gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
4989 gimple_assign_set_rhs_with_ops (&gsi2, NEGATE_EXPR, x);
4990 user = gsi_stmt (gsi2);
4991 update_stmt (user);
4992 reassoc_remove_stmt (&gsi);
4993 release_defs (feed);
4994 plus_negates.safe_push (gimple_assign_lhs (user));
4996 else
4998 /* Transform "x = -a; y = b - x" into "y = b + a", getting
4999 rid of one operation. */
5000 gimple *feed = SSA_NAME_DEF_STMT (negate);
5001 tree a = gimple_assign_rhs1 (feed);
5002 tree rhs1 = gimple_assign_rhs1 (user);
5003 gimple_stmt_iterator gsi = gsi_for_stmt (user);
5004 gimple_assign_set_rhs_with_ops (&gsi, PLUS_EXPR, rhs1, a);
5005 update_stmt (gsi_stmt (gsi));
5011 /* Returns true if OP is of a type for which we can do reassociation.
5012 That is for integral or non-saturating fixed-point types, and for
5013 floating point type when associative-math is enabled. */
5015 static bool
5016 can_reassociate_p (tree op)
5018 tree type = TREE_TYPE (op);
5019 if (TREE_CODE (op) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
5020 return false;
5021 if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
5022 || NON_SAT_FIXED_POINT_TYPE_P (type)
5023 || (flag_associative_math && FLOAT_TYPE_P (type)))
5024 return true;
5025 return false;
5028 /* Break up subtract operations in block BB.
5030 We do this top down because we don't know whether the subtract is
5031 part of a possible chain of reassociation except at the top.
5033 IE given
5034 d = f + g
5035 c = a + e
5036 b = c - d
5037 q = b - r
5038 k = t - q
5040 we want to break up k = t - q, but we won't until we've transformed q
5041 = b - r, which won't be broken up until we transform b = c - d.
5043 En passant, clear the GIMPLE visited flag on every statement
5044 and set UIDs within each basic block. */
5046 static void
5047 break_up_subtract_bb (basic_block bb)
5049 gimple_stmt_iterator gsi;
5050 basic_block son;
5051 unsigned int uid = 1;
5053 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5055 gimple *stmt = gsi_stmt (gsi);
5056 gimple_set_visited (stmt, false);
5057 gimple_set_uid (stmt, uid++);
5059 if (!is_gimple_assign (stmt)
5060 || !can_reassociate_p (gimple_assign_lhs (stmt)))
5061 continue;
5063 /* Look for simple gimple subtract operations. */
5064 if (gimple_assign_rhs_code (stmt) == MINUS_EXPR)
5066 if (!can_reassociate_p (gimple_assign_rhs1 (stmt))
5067 || !can_reassociate_p (gimple_assign_rhs2 (stmt)))
5068 continue;
5070 /* Check for a subtract used only in an addition. If this
5071 is the case, transform it into add of a negate for better
5072 reassociation. IE transform C = A-B into C = A + -B if C
5073 is only used in an addition. */
5074 if (should_break_up_subtract (stmt))
5075 break_up_subtract (stmt, &gsi);
5077 else if (gimple_assign_rhs_code (stmt) == NEGATE_EXPR
5078 && can_reassociate_p (gimple_assign_rhs1 (stmt)))
5079 plus_negates.safe_push (gimple_assign_lhs (stmt));
5081 for (son = first_dom_son (CDI_DOMINATORS, bb);
5082 son;
5083 son = next_dom_son (CDI_DOMINATORS, son))
5084 break_up_subtract_bb (son);
5087 /* Used for repeated factor analysis. */
5088 struct repeat_factor
5090 /* An SSA name that occurs in a multiply chain. */
5091 tree factor;
5093 /* Cached rank of the factor. */
5094 unsigned rank;
5096 /* Number of occurrences of the factor in the chain. */
5097 HOST_WIDE_INT count;
5099 /* An SSA name representing the product of this factor and
5100 all factors appearing later in the repeated factor vector. */
5101 tree repr;
5105 static vec<repeat_factor> repeat_factor_vec;
5107 /* Used for sorting the repeat factor vector. Sort primarily by
5108 ascending occurrence count, secondarily by descending rank. */
5110 static int
5111 compare_repeat_factors (const void *x1, const void *x2)
5113 const repeat_factor *rf1 = (const repeat_factor *) x1;
5114 const repeat_factor *rf2 = (const repeat_factor *) x2;
5116 if (rf1->count != rf2->count)
5117 return rf1->count - rf2->count;
5119 return rf2->rank - rf1->rank;
5122 /* Look for repeated operands in OPS in the multiply tree rooted at
5123 STMT. Replace them with an optimal sequence of multiplies and powi
5124 builtin calls, and remove the used operands from OPS. Return an
5125 SSA name representing the value of the replacement sequence. */
5127 static tree
5128 attempt_builtin_powi (gimple *stmt, vec<operand_entry *> *ops)
5130 unsigned i, j, vec_len;
5131 int ii;
5132 operand_entry *oe;
5133 repeat_factor *rf1, *rf2;
5134 repeat_factor rfnew;
5135 tree result = NULL_TREE;
5136 tree target_ssa, iter_result;
5137 tree type = TREE_TYPE (gimple_get_lhs (stmt));
5138 tree powi_fndecl = mathfn_built_in (type, BUILT_IN_POWI);
5139 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5140 gimple *mul_stmt, *pow_stmt;
5142 /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and
5143 target. */
5144 if (!powi_fndecl)
5145 return NULL_TREE;
5147 /* Allocate the repeated factor vector. */
5148 repeat_factor_vec.create (10);
5150 /* Scan the OPS vector for all SSA names in the product and build
5151 up a vector of occurrence counts for each factor. */
5152 FOR_EACH_VEC_ELT (*ops, i, oe)
5154 if (TREE_CODE (oe->op) == SSA_NAME)
5156 FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1)
5158 if (rf1->factor == oe->op)
5160 rf1->count += oe->count;
5161 break;
5165 if (j >= repeat_factor_vec.length ())
5167 rfnew.factor = oe->op;
5168 rfnew.rank = oe->rank;
5169 rfnew.count = oe->count;
5170 rfnew.repr = NULL_TREE;
5171 repeat_factor_vec.safe_push (rfnew);
5176 /* Sort the repeated factor vector by (a) increasing occurrence count,
5177 and (b) decreasing rank. */
5178 repeat_factor_vec.qsort (compare_repeat_factors);
5180 /* It is generally best to combine as many base factors as possible
5181 into a product before applying __builtin_powi to the result.
5182 However, the sort order chosen for the repeated factor vector
5183 allows us to cache partial results for the product of the base
5184 factors for subsequent use. When we already have a cached partial
5185 result from a previous iteration, it is best to make use of it
5186 before looking for another __builtin_pow opportunity.
5188 As an example, consider x * x * y * y * y * z * z * z * z.
5189 We want to first compose the product x * y * z, raise it to the
5190 second power, then multiply this by y * z, and finally multiply
5191 by z. This can be done in 5 multiplies provided we cache y * z
5192 for use in both expressions:
5194 t1 = y * z
5195 t2 = t1 * x
5196 t3 = t2 * t2
5197 t4 = t1 * t3
5198 result = t4 * z
5200 If we instead ignored the cached y * z and first multiplied by
5201 the __builtin_pow opportunity z * z, we would get the inferior:
5203 t1 = y * z
5204 t2 = t1 * x
5205 t3 = t2 * t2
5206 t4 = z * z
5207 t5 = t3 * t4
5208 result = t5 * y */
5210 vec_len = repeat_factor_vec.length ();
5212 /* Repeatedly look for opportunities to create a builtin_powi call. */
5213 while (true)
5215 HOST_WIDE_INT power;
5217 /* First look for the largest cached product of factors from
5218 preceding iterations. If found, create a builtin_powi for
5219 it if the minimum occurrence count for its factors is at
5220 least 2, or just use this cached product as our next
5221 multiplicand if the minimum occurrence count is 1. */
5222 FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1)
5224 if (rf1->repr && rf1->count > 0)
5225 break;
5228 if (j < vec_len)
5230 power = rf1->count;
5232 if (power == 1)
5234 iter_result = rf1->repr;
5236 if (dump_file && (dump_flags & TDF_DETAILS))
5238 unsigned elt;
5239 repeat_factor *rf;
5240 fputs ("Multiplying by cached product ", dump_file);
5241 for (elt = j; elt < vec_len; elt++)
5243 rf = &repeat_factor_vec[elt];
5244 print_generic_expr (dump_file, rf->factor);
5245 if (elt < vec_len - 1)
5246 fputs (" * ", dump_file);
5248 fputs ("\n", dump_file);
5251 else
5253 iter_result = make_temp_ssa_name (type, NULL, "reassocpow");
5254 pow_stmt = gimple_build_call (powi_fndecl, 2, rf1->repr,
5255 build_int_cst (integer_type_node,
5256 power));
5257 gimple_call_set_lhs (pow_stmt, iter_result);
5258 gimple_set_location (pow_stmt, gimple_location (stmt));
5259 gimple_set_uid (pow_stmt, gimple_uid (stmt));
5260 gsi_insert_before (&gsi, pow_stmt, GSI_SAME_STMT);
5262 if (dump_file && (dump_flags & TDF_DETAILS))
5264 unsigned elt;
5265 repeat_factor *rf;
5266 fputs ("Building __builtin_pow call for cached product (",
5267 dump_file);
5268 for (elt = j; elt < vec_len; elt++)
5270 rf = &repeat_factor_vec[elt];
5271 print_generic_expr (dump_file, rf->factor);
5272 if (elt < vec_len - 1)
5273 fputs (" * ", dump_file);
5275 fprintf (dump_file, ")^" HOST_WIDE_INT_PRINT_DEC"\n",
5276 power);
5280 else
5282 /* Otherwise, find the first factor in the repeated factor
5283 vector whose occurrence count is at least 2. If no such
5284 factor exists, there are no builtin_powi opportunities
5285 remaining. */
5286 FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1)
5288 if (rf1->count >= 2)
5289 break;
5292 if (j >= vec_len)
5293 break;
5295 power = rf1->count;
5297 if (dump_file && (dump_flags & TDF_DETAILS))
5299 unsigned elt;
5300 repeat_factor *rf;
5301 fputs ("Building __builtin_pow call for (", dump_file);
5302 for (elt = j; elt < vec_len; elt++)
5304 rf = &repeat_factor_vec[elt];
5305 print_generic_expr (dump_file, rf->factor);
5306 if (elt < vec_len - 1)
5307 fputs (" * ", dump_file);
5309 fprintf (dump_file, ")^" HOST_WIDE_INT_PRINT_DEC"\n", power);
5312 reassociate_stats.pows_created++;
5314 /* Visit each element of the vector in reverse order (so that
5315 high-occurrence elements are visited first, and within the
5316 same occurrence count, lower-ranked elements are visited
5317 first). Form a linear product of all elements in this order
5318 whose occurrencce count is at least that of element J.
5319 Record the SSA name representing the product of each element
5320 with all subsequent elements in the vector. */
5321 if (j == vec_len - 1)
5322 rf1->repr = rf1->factor;
5323 else
5325 for (ii = vec_len - 2; ii >= (int)j; ii--)
5327 tree op1, op2;
5329 rf1 = &repeat_factor_vec[ii];
5330 rf2 = &repeat_factor_vec[ii + 1];
5332 /* Init the last factor's representative to be itself. */
5333 if (!rf2->repr)
5334 rf2->repr = rf2->factor;
5336 op1 = rf1->factor;
5337 op2 = rf2->repr;
5339 target_ssa = make_temp_ssa_name (type, NULL, "reassocpow");
5340 mul_stmt = gimple_build_assign (target_ssa, MULT_EXPR,
5341 op1, op2);
5342 gimple_set_location (mul_stmt, gimple_location (stmt));
5343 gimple_set_uid (mul_stmt, gimple_uid (stmt));
5344 gsi_insert_before (&gsi, mul_stmt, GSI_SAME_STMT);
5345 rf1->repr = target_ssa;
5347 /* Don't reprocess the multiply we just introduced. */
5348 gimple_set_visited (mul_stmt, true);
5352 /* Form a call to __builtin_powi for the maximum product
5353 just formed, raised to the power obtained earlier. */
5354 rf1 = &repeat_factor_vec[j];
5355 iter_result = make_temp_ssa_name (type, NULL, "reassocpow");
5356 pow_stmt = gimple_build_call (powi_fndecl, 2, rf1->repr,
5357 build_int_cst (integer_type_node,
5358 power));
5359 gimple_call_set_lhs (pow_stmt, iter_result);
5360 gimple_set_location (pow_stmt, gimple_location (stmt));
5361 gimple_set_uid (pow_stmt, gimple_uid (stmt));
5362 gsi_insert_before (&gsi, pow_stmt, GSI_SAME_STMT);
5365 /* If we previously formed at least one other builtin_powi call,
5366 form the product of this one and those others. */
5367 if (result)
5369 tree new_result = make_temp_ssa_name (type, NULL, "reassocpow");
5370 mul_stmt = gimple_build_assign (new_result, MULT_EXPR,
5371 result, iter_result);
5372 gimple_set_location (mul_stmt, gimple_location (stmt));
5373 gimple_set_uid (mul_stmt, gimple_uid (stmt));
5374 gsi_insert_before (&gsi, mul_stmt, GSI_SAME_STMT);
5375 gimple_set_visited (mul_stmt, true);
5376 result = new_result;
5378 else
5379 result = iter_result;
5381 /* Decrement the occurrence count of each element in the product
5382 by the count found above, and remove this many copies of each
5383 factor from OPS. */
5384 for (i = j; i < vec_len; i++)
5386 unsigned k = power;
5387 unsigned n;
5389 rf1 = &repeat_factor_vec[i];
5390 rf1->count -= power;
5392 FOR_EACH_VEC_ELT_REVERSE (*ops, n, oe)
5394 if (oe->op == rf1->factor)
5396 if (oe->count <= k)
5398 ops->ordered_remove (n);
5399 k -= oe->count;
5401 if (k == 0)
5402 break;
5404 else
5406 oe->count -= k;
5407 break;
5414 /* At this point all elements in the repeated factor vector have a
5415 remaining occurrence count of 0 or 1, and those with a count of 1
5416 don't have cached representatives. Re-sort the ops vector and
5417 clean up. */
5418 ops->qsort (sort_by_operand_rank);
5419 repeat_factor_vec.release ();
5421 /* Return the final product computed herein. Note that there may
5422 still be some elements with single occurrence count left in OPS;
5423 those will be handled by the normal reassociation logic. */
5424 return result;
5427 /* Attempt to optimize
5428 CST1 * copysign (CST2, y) -> copysign (CST1 * CST2, y) if CST1 > 0, or
5429 CST1 * copysign (CST2, y) -> -copysign (CST1 * CST2, y) if CST1 < 0. */
5431 static void
5432 attempt_builtin_copysign (vec<operand_entry *> *ops)
5434 operand_entry *oe;
5435 unsigned int i;
5436 unsigned int length = ops->length ();
5437 tree cst = ops->last ()->op;
5439 if (length == 1 || TREE_CODE (cst) != REAL_CST)
5440 return;
5442 FOR_EACH_VEC_ELT (*ops, i, oe)
5444 if (TREE_CODE (oe->op) == SSA_NAME
5445 && has_single_use (oe->op))
5447 gimple *def_stmt = SSA_NAME_DEF_STMT (oe->op);
5448 if (gcall *old_call = dyn_cast <gcall *> (def_stmt))
5450 tree arg0, arg1;
5451 switch (gimple_call_combined_fn (old_call))
5453 CASE_CFN_COPYSIGN:
5454 arg0 = gimple_call_arg (old_call, 0);
5455 arg1 = gimple_call_arg (old_call, 1);
5456 /* The first argument of copysign must be a constant,
5457 otherwise there's nothing to do. */
5458 if (TREE_CODE (arg0) == REAL_CST)
5460 tree type = TREE_TYPE (arg0);
5461 tree mul = const_binop (MULT_EXPR, type, cst, arg0);
5462 /* If we couldn't fold to a single constant, skip it.
5463 That happens e.g. for inexact multiplication when
5464 -frounding-math. */
5465 if (mul == NULL_TREE)
5466 break;
5467 /* Instead of adjusting OLD_CALL, let's build a new
5468 call to not leak the LHS and prevent keeping bogus
5469 debug statements. DCE will clean up the old call. */
5470 gcall *new_call;
5471 if (gimple_call_internal_p (old_call))
5472 new_call = gimple_build_call_internal
5473 (IFN_COPYSIGN, 2, mul, arg1);
5474 else
5475 new_call = gimple_build_call
5476 (gimple_call_fndecl (old_call), 2, mul, arg1);
5477 tree lhs = make_ssa_name (type);
5478 gimple_call_set_lhs (new_call, lhs);
5479 gimple_set_location (new_call,
5480 gimple_location (old_call));
5481 insert_stmt_after (new_call, old_call);
5482 /* We've used the constant, get rid of it. */
5483 ops->pop ();
5484 bool cst1_neg = real_isneg (TREE_REAL_CST_PTR (cst));
5485 /* Handle the CST1 < 0 case by negating the result. */
5486 if (cst1_neg)
5488 tree negrhs = make_ssa_name (TREE_TYPE (lhs));
5489 gimple *negate_stmt
5490 = gimple_build_assign (negrhs, NEGATE_EXPR, lhs);
5491 insert_stmt_after (negate_stmt, new_call);
5492 oe->op = negrhs;
5494 else
5495 oe->op = lhs;
5496 if (dump_file && (dump_flags & TDF_DETAILS))
5498 fprintf (dump_file, "Optimizing copysign: ");
5499 print_generic_expr (dump_file, cst);
5500 fprintf (dump_file, " * COPYSIGN (");
5501 print_generic_expr (dump_file, arg0);
5502 fprintf (dump_file, ", ");
5503 print_generic_expr (dump_file, arg1);
5504 fprintf (dump_file, ") into %sCOPYSIGN (",
5505 cst1_neg ? "-" : "");
5506 print_generic_expr (dump_file, mul);
5507 fprintf (dump_file, ", ");
5508 print_generic_expr (dump_file, arg1);
5509 fprintf (dump_file, "\n");
5511 return;
5513 break;
5514 default:
5515 break;
5522 /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */
5524 static void
5525 transform_stmt_to_copy (gimple_stmt_iterator *gsi, gimple *stmt, tree new_rhs)
5527 tree rhs1;
5529 if (dump_file && (dump_flags & TDF_DETAILS))
5531 fprintf (dump_file, "Transforming ");
5532 print_gimple_stmt (dump_file, stmt, 0);
5535 rhs1 = gimple_assign_rhs1 (stmt);
5536 gimple_assign_set_rhs_from_tree (gsi, new_rhs);
5537 update_stmt (stmt);
5538 remove_visited_stmt_chain (rhs1);
5540 if (dump_file && (dump_flags & TDF_DETAILS))
5542 fprintf (dump_file, " into ");
5543 print_gimple_stmt (dump_file, stmt, 0);
5547 /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */
5549 static void
5550 transform_stmt_to_multiply (gimple_stmt_iterator *gsi, gimple *stmt,
5551 tree rhs1, tree rhs2)
5553 if (dump_file && (dump_flags & TDF_DETAILS))
5555 fprintf (dump_file, "Transforming ");
5556 print_gimple_stmt (dump_file, stmt, 0);
5559 gimple_assign_set_rhs_with_ops (gsi, MULT_EXPR, rhs1, rhs2);
5560 update_stmt (gsi_stmt (*gsi));
5561 remove_visited_stmt_chain (rhs1);
5563 if (dump_file && (dump_flags & TDF_DETAILS))
5565 fprintf (dump_file, " into ");
5566 print_gimple_stmt (dump_file, stmt, 0);
5570 /* Reassociate expressions in basic block BB and its post-dominator as
5571 children.
5573 Bubble up return status from maybe_optimize_range_tests. */
5575 static bool
5576 reassociate_bb (basic_block bb)
5578 gimple_stmt_iterator gsi;
5579 basic_block son;
5580 gimple *stmt = last_stmt (bb);
5581 bool cfg_cleanup_needed = false;
5583 if (stmt && !gimple_visited_p (stmt))
5584 cfg_cleanup_needed |= maybe_optimize_range_tests (stmt);
5586 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
5588 stmt = gsi_stmt (gsi);
5590 if (is_gimple_assign (stmt)
5591 && !stmt_could_throw_p (stmt))
5593 tree lhs, rhs1, rhs2;
5594 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
5596 /* If this is not a gimple binary expression, there is
5597 nothing for us to do with it. */
5598 if (get_gimple_rhs_class (rhs_code) != GIMPLE_BINARY_RHS)
5599 continue;
5601 /* If this was part of an already processed statement,
5602 we don't need to touch it again. */
5603 if (gimple_visited_p (stmt))
5605 /* This statement might have become dead because of previous
5606 reassociations. */
5607 if (has_zero_uses (gimple_get_lhs (stmt)))
5609 reassoc_remove_stmt (&gsi);
5610 release_defs (stmt);
5611 /* We might end up removing the last stmt above which
5612 places the iterator to the end of the sequence.
5613 Reset it to the last stmt in this case which might
5614 be the end of the sequence as well if we removed
5615 the last statement of the sequence. In which case
5616 we need to bail out. */
5617 if (gsi_end_p (gsi))
5619 gsi = gsi_last_bb (bb);
5620 if (gsi_end_p (gsi))
5621 break;
5624 continue;
5627 lhs = gimple_assign_lhs (stmt);
5628 rhs1 = gimple_assign_rhs1 (stmt);
5629 rhs2 = gimple_assign_rhs2 (stmt);
5631 /* For non-bit or min/max operations we can't associate
5632 all types. Verify that here. */
5633 if (rhs_code != BIT_IOR_EXPR
5634 && rhs_code != BIT_AND_EXPR
5635 && rhs_code != BIT_XOR_EXPR
5636 && rhs_code != MIN_EXPR
5637 && rhs_code != MAX_EXPR
5638 && (!can_reassociate_p (lhs)
5639 || !can_reassociate_p (rhs1)
5640 || !can_reassociate_p (rhs2)))
5641 continue;
5643 if (associative_tree_code (rhs_code))
5645 auto_vec<operand_entry *> ops;
5646 tree powi_result = NULL_TREE;
5647 bool is_vector = VECTOR_TYPE_P (TREE_TYPE (lhs));
5649 /* There may be no immediate uses left by the time we
5650 get here because we may have eliminated them all. */
5651 if (TREE_CODE (lhs) == SSA_NAME && has_zero_uses (lhs))
5652 continue;
5654 gimple_set_visited (stmt, true);
5655 linearize_expr_tree (&ops, stmt, true, true);
5656 ops.qsort (sort_by_operand_rank);
5657 optimize_ops_list (rhs_code, &ops);
5658 if (undistribute_ops_list (rhs_code, &ops,
5659 loop_containing_stmt (stmt)))
5661 ops.qsort (sort_by_operand_rank);
5662 optimize_ops_list (rhs_code, &ops);
5665 if (rhs_code == PLUS_EXPR
5666 && transform_add_to_multiply (&ops))
5667 ops.qsort (sort_by_operand_rank);
5669 if (rhs_code == BIT_IOR_EXPR || rhs_code == BIT_AND_EXPR)
5671 if (is_vector)
5672 optimize_vec_cond_expr (rhs_code, &ops);
5673 else
5674 optimize_range_tests (rhs_code, &ops);
5677 if (rhs_code == MULT_EXPR && !is_vector)
5679 attempt_builtin_copysign (&ops);
5681 if (reassoc_insert_powi_p
5682 && flag_unsafe_math_optimizations)
5683 powi_result = attempt_builtin_powi (stmt, &ops);
5686 operand_entry *last;
5687 bool negate_result = false;
5688 if (ops.length () > 1
5689 && rhs_code == MULT_EXPR)
5691 last = ops.last ();
5692 if ((integer_minus_onep (last->op)
5693 || real_minus_onep (last->op))
5694 && !HONOR_SNANS (TREE_TYPE (lhs))
5695 && (!HONOR_SIGNED_ZEROS (TREE_TYPE (lhs))
5696 || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (lhs))))
5698 ops.pop ();
5699 negate_result = true;
5703 tree new_lhs = lhs;
5704 /* If the operand vector is now empty, all operands were
5705 consumed by the __builtin_powi optimization. */
5706 if (ops.length () == 0)
5707 transform_stmt_to_copy (&gsi, stmt, powi_result);
5708 else if (ops.length () == 1)
5710 tree last_op = ops.last ()->op;
5712 /* If the stmt that defines operand has to be inserted, insert it
5713 before the use. */
5714 if (ops.last ()->stmt_to_insert)
5715 insert_stmt_before_use (stmt, ops.last ()->stmt_to_insert);
5716 if (powi_result)
5717 transform_stmt_to_multiply (&gsi, stmt, last_op,
5718 powi_result);
5719 else
5720 transform_stmt_to_copy (&gsi, stmt, last_op);
5722 else
5724 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
5725 int ops_num = ops.length ();
5726 int width = get_reassociation_width (ops_num, rhs_code, mode);
5728 if (dump_file && (dump_flags & TDF_DETAILS))
5729 fprintf (dump_file,
5730 "Width = %d was chosen for reassociation\n", width);
5732 if (width > 1
5733 && ops.length () > 3)
5734 rewrite_expr_tree_parallel (as_a <gassign *> (stmt),
5735 width, ops);
5736 else
5738 /* When there are three operands left, we want
5739 to make sure the ones that get the double
5740 binary op are chosen wisely. */
5741 int len = ops.length ();
5742 if (len >= 3)
5743 swap_ops_for_binary_stmt (ops, len - 3, stmt);
5745 new_lhs = rewrite_expr_tree (stmt, 0, ops,
5746 powi_result != NULL
5747 || negate_result);
5750 /* If we combined some repeated factors into a
5751 __builtin_powi call, multiply that result by the
5752 reassociated operands. */
5753 if (powi_result)
5755 gimple *mul_stmt, *lhs_stmt = SSA_NAME_DEF_STMT (lhs);
5756 tree type = TREE_TYPE (lhs);
5757 tree target_ssa = make_temp_ssa_name (type, NULL,
5758 "reassocpow");
5759 gimple_set_lhs (lhs_stmt, target_ssa);
5760 update_stmt (lhs_stmt);
5761 if (lhs != new_lhs)
5763 target_ssa = new_lhs;
5764 new_lhs = lhs;
5766 mul_stmt = gimple_build_assign (lhs, MULT_EXPR,
5767 powi_result, target_ssa);
5768 gimple_set_location (mul_stmt, gimple_location (stmt));
5769 gimple_set_uid (mul_stmt, gimple_uid (stmt));
5770 gsi_insert_after (&gsi, mul_stmt, GSI_NEW_STMT);
5774 if (negate_result)
5776 stmt = SSA_NAME_DEF_STMT (lhs);
5777 tree tmp = make_ssa_name (TREE_TYPE (lhs));
5778 gimple_set_lhs (stmt, tmp);
5779 if (lhs != new_lhs)
5780 tmp = new_lhs;
5781 gassign *neg_stmt = gimple_build_assign (lhs, NEGATE_EXPR,
5782 tmp);
5783 gimple_set_uid (neg_stmt, gimple_uid (stmt));
5784 gsi_insert_after (&gsi, neg_stmt, GSI_NEW_STMT);
5785 update_stmt (stmt);
5790 for (son = first_dom_son (CDI_POST_DOMINATORS, bb);
5791 son;
5792 son = next_dom_son (CDI_POST_DOMINATORS, son))
5793 cfg_cleanup_needed |= reassociate_bb (son);
5795 return cfg_cleanup_needed;
5798 /* Add jumps around shifts for range tests turned into bit tests.
5799 For each SSA_NAME VAR we have code like:
5800 VAR = ...; // final stmt of range comparison
5801 // bit test here...;
5802 OTHERVAR = ...; // final stmt of the bit test sequence
5803 RES = VAR | OTHERVAR;
5804 Turn the above into:
5805 VAR = ...;
5806 if (VAR != 0)
5807 goto <l3>;
5808 else
5809 goto <l2>;
5810 <l2>:
5811 // bit test here...;
5812 OTHERVAR = ...;
5813 <l3>:
5814 # RES = PHI<1(l1), OTHERVAR(l2)>; */
5816 static void
5817 branch_fixup (void)
5819 tree var;
5820 unsigned int i;
5822 FOR_EACH_VEC_ELT (reassoc_branch_fixups, i, var)
5824 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
5825 gimple *use_stmt;
5826 use_operand_p use;
5827 bool ok = single_imm_use (var, &use, &use_stmt);
5828 gcc_assert (ok
5829 && is_gimple_assign (use_stmt)
5830 && gimple_assign_rhs_code (use_stmt) == BIT_IOR_EXPR
5831 && gimple_bb (def_stmt) == gimple_bb (use_stmt));
5833 basic_block cond_bb = gimple_bb (def_stmt);
5834 basic_block then_bb = split_block (cond_bb, def_stmt)->dest;
5835 basic_block merge_bb = split_block (then_bb, use_stmt)->dest;
5837 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5838 gimple *g = gimple_build_cond (NE_EXPR, var,
5839 build_zero_cst (TREE_TYPE (var)),
5840 NULL_TREE, NULL_TREE);
5841 location_t loc = gimple_location (use_stmt);
5842 gimple_set_location (g, loc);
5843 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
5845 edge etrue = make_edge (cond_bb, merge_bb, EDGE_TRUE_VALUE);
5846 etrue->probability = REG_BR_PROB_BASE / 2;
5847 etrue->count = cond_bb->count.apply_scale (1, 2);
5848 edge efalse = find_edge (cond_bb, then_bb);
5849 efalse->flags = EDGE_FALSE_VALUE;
5850 efalse->probability -= etrue->probability;
5851 efalse->count -= etrue->count;
5852 then_bb->count -= etrue->count;
5854 tree othervar = NULL_TREE;
5855 if (gimple_assign_rhs1 (use_stmt) == var)
5856 othervar = gimple_assign_rhs2 (use_stmt);
5857 else if (gimple_assign_rhs2 (use_stmt) == var)
5858 othervar = gimple_assign_rhs1 (use_stmt);
5859 else
5860 gcc_unreachable ();
5861 tree lhs = gimple_assign_lhs (use_stmt);
5862 gphi *phi = create_phi_node (lhs, merge_bb);
5863 add_phi_arg (phi, build_one_cst (TREE_TYPE (lhs)), etrue, loc);
5864 add_phi_arg (phi, othervar, single_succ_edge (then_bb), loc);
5865 gsi = gsi_for_stmt (use_stmt);
5866 gsi_remove (&gsi, true);
5868 set_immediate_dominator (CDI_DOMINATORS, merge_bb, cond_bb);
5869 set_immediate_dominator (CDI_POST_DOMINATORS, cond_bb, merge_bb);
5871 reassoc_branch_fixups.release ();
5874 void dump_ops_vector (FILE *file, vec<operand_entry *> ops);
5875 void debug_ops_vector (vec<operand_entry *> ops);
5877 /* Dump the operand entry vector OPS to FILE. */
5879 void
5880 dump_ops_vector (FILE *file, vec<operand_entry *> ops)
5882 operand_entry *oe;
5883 unsigned int i;
5885 FOR_EACH_VEC_ELT (ops, i, oe)
5887 fprintf (file, "Op %d -> rank: %d, tree: ", i, oe->rank);
5888 print_generic_expr (file, oe->op);
5889 fprintf (file, "\n");
5893 /* Dump the operand entry vector OPS to STDERR. */
5895 DEBUG_FUNCTION void
5896 debug_ops_vector (vec<operand_entry *> ops)
5898 dump_ops_vector (stderr, ops);
5901 /* Bubble up return status from reassociate_bb. */
5903 static bool
5904 do_reassoc (void)
5906 break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5907 return reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
5910 /* Initialize the reassociation pass. */
5912 static void
5913 init_reassoc (void)
5915 int i;
5916 long rank = 2;
5917 int *bbs = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
5919 /* Find the loops, so that we can prevent moving calculations in
5920 them. */
5921 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5923 memset (&reassociate_stats, 0, sizeof (reassociate_stats));
5925 next_operand_entry_id = 0;
5927 /* Reverse RPO (Reverse Post Order) will give us something where
5928 deeper loops come later. */
5929 pre_and_rev_post_order_compute (NULL, bbs, false);
5930 bb_rank = XCNEWVEC (long, last_basic_block_for_fn (cfun));
5931 operand_rank = new hash_map<tree, long>;
5933 /* Give each default definition a distinct rank. This includes
5934 parameters and the static chain. Walk backwards over all
5935 SSA names so that we get proper rank ordering according
5936 to tree_swap_operands_p. */
5937 for (i = num_ssa_names - 1; i > 0; --i)
5939 tree name = ssa_name (i);
5940 if (name && SSA_NAME_IS_DEFAULT_DEF (name))
5941 insert_operand_rank (name, ++rank);
5944 /* Set up rank for each BB */
5945 for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
5946 bb_rank[bbs[i]] = ++rank << 16;
5948 free (bbs);
5949 calculate_dominance_info (CDI_POST_DOMINATORS);
5950 plus_negates = vNULL;
5953 /* Cleanup after the reassociation pass, and print stats if
5954 requested. */
5956 static void
5957 fini_reassoc (void)
5959 statistics_counter_event (cfun, "Linearized",
5960 reassociate_stats.linearized);
5961 statistics_counter_event (cfun, "Constants eliminated",
5962 reassociate_stats.constants_eliminated);
5963 statistics_counter_event (cfun, "Ops eliminated",
5964 reassociate_stats.ops_eliminated);
5965 statistics_counter_event (cfun, "Statements rewritten",
5966 reassociate_stats.rewritten);
5967 statistics_counter_event (cfun, "Built-in pow[i] calls encountered",
5968 reassociate_stats.pows_encountered);
5969 statistics_counter_event (cfun, "Built-in powi calls created",
5970 reassociate_stats.pows_created);
5972 delete operand_rank;
5973 operand_entry_pool.release ();
5974 free (bb_rank);
5975 plus_negates.release ();
5976 free_dominance_info (CDI_POST_DOMINATORS);
5977 loop_optimizer_finalize ();
5980 /* Gate and execute functions for Reassociation. If INSERT_POWI_P, enable
5981 insertion of __builtin_powi calls.
5983 Returns TODO_cfg_cleanup if a CFG cleanup pass is desired due to
5984 optimization of a gimple conditional. Otherwise returns zero. */
5986 static unsigned int
5987 execute_reassoc (bool insert_powi_p)
5989 reassoc_insert_powi_p = insert_powi_p;
5991 init_reassoc ();
5993 bool cfg_cleanup_needed;
5994 cfg_cleanup_needed = do_reassoc ();
5995 repropagate_negates ();
5996 branch_fixup ();
5998 fini_reassoc ();
5999 return cfg_cleanup_needed ? TODO_cleanup_cfg : 0;
6002 namespace {
6004 const pass_data pass_data_reassoc =
6006 GIMPLE_PASS, /* type */
6007 "reassoc", /* name */
6008 OPTGROUP_NONE, /* optinfo_flags */
6009 TV_TREE_REASSOC, /* tv_id */
6010 ( PROP_cfg | PROP_ssa ), /* properties_required */
6011 0, /* properties_provided */
6012 0, /* properties_destroyed */
6013 0, /* todo_flags_start */
6014 TODO_update_ssa_only_virtuals, /* todo_flags_finish */
6017 class pass_reassoc : public gimple_opt_pass
6019 public:
6020 pass_reassoc (gcc::context *ctxt)
6021 : gimple_opt_pass (pass_data_reassoc, ctxt), insert_powi_p (false)
6024 /* opt_pass methods: */
6025 opt_pass * clone () { return new pass_reassoc (m_ctxt); }
6026 void set_pass_param (unsigned int n, bool param)
6028 gcc_assert (n == 0);
6029 insert_powi_p = param;
6031 virtual bool gate (function *) { return flag_tree_reassoc != 0; }
6032 virtual unsigned int execute (function *)
6033 { return execute_reassoc (insert_powi_p); }
6035 private:
6036 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
6037 point 3a in the pass header comment. */
6038 bool insert_powi_p;
6039 }; // class pass_reassoc
6041 } // anon namespace
6043 gimple_opt_pass *
6044 make_pass_reassoc (gcc::context *ctxt)
6046 return new pass_reassoc (ctxt);