1 /* Reassociation for trees.
2 Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Daniel Berlin <dan@dberlin.org>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
27 #include "basic-block.h"
28 #include "gimple-pretty-print.h"
29 #include "tree-inline.h"
30 #include "tree-flow.h"
32 #include "tree-iterator.h"
33 #include "tree-pass.h"
34 #include "alloc-pool.h"
36 #include "langhooks.h"
37 #include "pointer-set.h"
42 #include "diagnostic-core.h"
44 /* This is a simple global reassociation pass. It is, in part, based
45 on the LLVM pass of the same name (They do some things more/less
46 than we do, in different orders, etc).
48 It consists of five steps:
50 1. Breaking up subtract operations into addition + negate, where
51 it would promote the reassociation of adds.
53 2. Left linearization of the expression trees, so that (A+B)+(C+D)
54 becomes (((A+B)+C)+D), which is easier for us to rewrite later.
55 During linearization, we place the operands of the binary
56 expressions into a vector of operand_entry_t
58 3. Optimization of the operand lists, eliminating things like a +
61 3a. Combine repeated factors with the same occurrence counts
62 into a __builtin_powi call that will later be optimized into
63 an optimal number of multiplies.
65 4. Rewrite the expression trees we linearized and optimized so
66 they are in proper rank order.
68 5. Repropagate negates, as nothing else will clean it up ATM.
70 A bit of theory on #4, since nobody seems to write anything down
71 about why it makes sense to do it the way they do it:
73 We could do this much nicer theoretically, but don't (for reasons
74 explained after how to do it theoretically nice :P).
76 In order to promote the most redundancy elimination, you want
77 binary expressions whose operands are the same rank (or
78 preferably, the same value) exposed to the redundancy eliminator,
79 for possible elimination.
81 So the way to do this if we really cared, is to build the new op
82 tree from the leaves to the roots, merging as you go, and putting the
83 new op on the end of the worklist, until you are left with one
84 thing on the worklist.
86 IE if you have to rewrite the following set of operands (listed with
87 rank in parentheses), with opcode PLUS_EXPR:
89 a (1), b (1), c (1), d (2), e (2)
92 We start with our merge worklist empty, and the ops list with all of
95 You want to first merge all leaves of the same rank, as much as
98 So first build a binary op of
100 mergetmp = a + b, and put "mergetmp" on the merge worklist.
102 Because there is no three operand form of PLUS_EXPR, c is not going to
103 be exposed to redundancy elimination as a rank 1 operand.
105 So you might as well throw it on the merge worklist (you could also
106 consider it to now be a rank two operand, and merge it with d and e,
107 but in this case, you then have evicted e from a binary op. So at
108 least in this situation, you can't win.)
110 Then build a binary op of d + e
113 and put mergetmp2 on the merge worklist.
115 so merge worklist = {mergetmp, c, mergetmp2}
117 Continue building binary ops of these operations until you have only
118 one operation left on the worklist.
123 mergetmp3 = mergetmp + c
125 worklist = {mergetmp2, mergetmp3}
127 mergetmp4 = mergetmp2 + mergetmp3
129 worklist = {mergetmp4}
131 because we have one operation left, we can now just set the original
132 statement equal to the result of that operation.
134 This will at least expose a + b and d + e to redundancy elimination
135 as binary operations.
137 For extra points, you can reuse the old statements to build the
138 mergetmps, since you shouldn't run out.
140 So why don't we do this?
142 Because it's expensive, and rarely will help. Most trees we are
143 reassociating have 3 or less ops. If they have 2 ops, they already
144 will be written into a nice single binary op. If you have 3 ops, a
145 single simple check suffices to tell you whether the first two are of the
146 same rank. If so, you know to order it
149 newstmt = mergetmp + op3
153 newstmt = mergetmp + op1
155 If all three are of the same rank, you can't expose them all in a
156 single binary operator anyway, so the above is *still* the best you
159 Thus, this is what we do. When we have three ops left, we check to see
160 what order to put them in, and call it a day. As a nod to vector sum
161 reduction, we check if any of the ops are really a phi node that is a
162 destructive update for the associating op, and keep the destructive
163 update together for vector sum reduction recognition. */
170 int constants_eliminated
;
173 int pows_encountered
;
177 /* Operator, rank pair. */
178 typedef struct operand_entry
186 static alloc_pool operand_entry_pool
;
188 /* This is used to assign a unique ID to each struct operand_entry
189 so that qsort results are identical on different hosts. */
190 static int next_operand_entry_id
;
192 /* Starting rank number for a given basic block, so that we can rank
193 operations using unmovable instructions in that BB based on the bb
195 static long *bb_rank
;
197 /* Operand->rank hashtable. */
198 static struct pointer_map_t
*operand_rank
;
201 static long get_rank (tree
);
204 /* Bias amount for loop-carried phis. We want this to be larger than
205 the depth of any reassociation tree we can see, but not larger than
206 the rank difference between two blocks. */
207 #define PHI_LOOP_BIAS (1 << 15)
209 /* Rank assigned to a phi statement. If STMT is a loop-carried phi of
210 an innermost loop, and the phi has only a single use which is inside
211 the loop, then the rank is the block rank of the loop latch plus an
212 extra bias for the loop-carried dependence. This causes expressions
213 calculated into an accumulator variable to be independent for each
214 iteration of the loop. If STMT is some other phi, the rank is the
215 block rank of its containing block. */
217 phi_rank (gimple stmt
)
219 basic_block bb
= gimple_bb (stmt
);
220 struct loop
*father
= bb
->loop_father
;
226 /* We only care about real loops (those with a latch). */
228 return bb_rank
[bb
->index
];
230 /* Interesting phis must be in headers of innermost loops. */
231 if (bb
!= father
->header
233 return bb_rank
[bb
->index
];
235 /* Ignore virtual SSA_NAMEs. */
236 res
= gimple_phi_result (stmt
);
237 if (virtual_operand_p (res
))
238 return bb_rank
[bb
->index
];
240 /* The phi definition must have a single use, and that use must be
241 within the loop. Otherwise this isn't an accumulator pattern. */
242 if (!single_imm_use (res
, &use
, &use_stmt
)
243 || gimple_bb (use_stmt
)->loop_father
!= father
)
244 return bb_rank
[bb
->index
];
246 /* Look for phi arguments from within the loop. If found, bias this phi. */
247 for (i
= 0; i
< gimple_phi_num_args (stmt
); i
++)
249 tree arg
= gimple_phi_arg_def (stmt
, i
);
250 if (TREE_CODE (arg
) == SSA_NAME
251 && !SSA_NAME_IS_DEFAULT_DEF (arg
))
253 gimple def_stmt
= SSA_NAME_DEF_STMT (arg
);
254 if (gimple_bb (def_stmt
)->loop_father
== father
)
255 return bb_rank
[father
->latch
->index
] + PHI_LOOP_BIAS
;
259 /* Must be an uninteresting phi. */
260 return bb_rank
[bb
->index
];
263 /* If EXP is an SSA_NAME defined by a PHI statement that represents a
264 loop-carried dependence of an innermost loop, return TRUE; else
267 loop_carried_phi (tree exp
)
272 if (TREE_CODE (exp
) != SSA_NAME
273 || SSA_NAME_IS_DEFAULT_DEF (exp
))
276 phi_stmt
= SSA_NAME_DEF_STMT (exp
);
278 if (gimple_code (SSA_NAME_DEF_STMT (exp
)) != GIMPLE_PHI
)
281 /* Non-loop-carried phis have block rank. Loop-carried phis have
282 an additional bias added in. If this phi doesn't have block rank,
283 it's biased and should not be propagated. */
284 block_rank
= bb_rank
[gimple_bb (phi_stmt
)->index
];
286 if (phi_rank (phi_stmt
) != block_rank
)
292 /* Return the maximum of RANK and the rank that should be propagated
293 from expression OP. For most operands, this is just the rank of OP.
294 For loop-carried phis, the value is zero to avoid undoing the bias
295 in favor of the phi. */
297 propagate_rank (long rank
, tree op
)
301 if (loop_carried_phi (op
))
304 op_rank
= get_rank (op
);
306 return MAX (rank
, op_rank
);
309 /* Look up the operand rank structure for expression E. */
312 find_operand_rank (tree e
)
314 void **slot
= pointer_map_contains (operand_rank
, e
);
315 return slot
? (long) (intptr_t) *slot
: -1;
318 /* Insert {E,RANK} into the operand rank hashtable. */
321 insert_operand_rank (tree e
, long rank
)
324 gcc_assert (rank
> 0);
325 slot
= pointer_map_insert (operand_rank
, e
);
327 *slot
= (void *) (intptr_t) rank
;
330 /* Given an expression E, return the rank of the expression. */
335 /* Constants have rank 0. */
336 if (is_gimple_min_invariant (e
))
339 /* SSA_NAME's have the rank of the expression they are the result
341 For globals and uninitialized values, the rank is 0.
342 For function arguments, use the pre-setup rank.
343 For PHI nodes, stores, asm statements, etc, we use the rank of
345 For simple operations, the rank is the maximum rank of any of
346 its operands, or the bb_rank, whichever is less.
347 I make no claims that this is optimal, however, it gives good
350 /* We make an exception to the normal ranking system to break
351 dependences of accumulator variables in loops. Suppose we
352 have a simple one-block loop containing:
359 As shown, each iteration of the calculation into x is fully
360 dependent upon the iteration before it. We would prefer to
361 see this in the form:
368 If the loop is unrolled, the calculations of b and c from
369 different iterations can be interleaved.
371 To obtain this result during reassociation, we bias the rank
372 of the phi definition x_1 upward, when it is recognized as an
373 accumulator pattern. The artificial rank causes it to be
374 added last, providing the desired independence. */
376 if (TREE_CODE (e
) == SSA_NAME
)
383 if (SSA_NAME_IS_DEFAULT_DEF (e
))
384 return find_operand_rank (e
);
386 stmt
= SSA_NAME_DEF_STMT (e
);
387 if (gimple_code (stmt
) == GIMPLE_PHI
)
388 return phi_rank (stmt
);
390 if (!is_gimple_assign (stmt
)
391 || gimple_vdef (stmt
))
392 return bb_rank
[gimple_bb (stmt
)->index
];
394 /* If we already have a rank for this expression, use that. */
395 rank
= find_operand_rank (e
);
399 /* Otherwise, find the maximum rank for the operands. As an
400 exception, remove the bias from loop-carried phis when propagating
401 the rank so that dependent operations are not also biased. */
403 if (gimple_assign_single_p (stmt
))
405 tree rhs
= gimple_assign_rhs1 (stmt
);
406 n
= TREE_OPERAND_LENGTH (rhs
);
408 rank
= propagate_rank (rank
, rhs
);
411 for (i
= 0; i
< n
; i
++)
413 op
= TREE_OPERAND (rhs
, i
);
416 rank
= propagate_rank (rank
, op
);
422 n
= gimple_num_ops (stmt
);
423 for (i
= 1; i
< n
; i
++)
425 op
= gimple_op (stmt
, i
);
427 rank
= propagate_rank (rank
, op
);
431 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
433 fprintf (dump_file
, "Rank for ");
434 print_generic_expr (dump_file
, e
, 0);
435 fprintf (dump_file
, " is %ld\n", (rank
+ 1));
438 /* Note the rank in the hashtable so we don't recompute it. */
439 insert_operand_rank (e
, (rank
+ 1));
443 /* Globals, etc, are rank 0 */
447 DEF_VEC_P(operand_entry_t
);
448 DEF_VEC_ALLOC_P(operand_entry_t
, heap
);
450 /* We want integer ones to end up last no matter what, since they are
451 the ones we can do the most with. */
452 #define INTEGER_CONST_TYPE 1 << 3
453 #define FLOAT_CONST_TYPE 1 << 2
454 #define OTHER_CONST_TYPE 1 << 1
456 /* Classify an invariant tree into integer, float, or other, so that
457 we can sort them to be near other constants of the same type. */
459 constant_type (tree t
)
461 if (INTEGRAL_TYPE_P (TREE_TYPE (t
)))
462 return INTEGER_CONST_TYPE
;
463 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t
)))
464 return FLOAT_CONST_TYPE
;
466 return OTHER_CONST_TYPE
;
469 /* qsort comparison function to sort operand entries PA and PB by rank
470 so that the sorted array is ordered by rank in decreasing order. */
472 sort_by_operand_rank (const void *pa
, const void *pb
)
474 const operand_entry_t oea
= *(const operand_entry_t
*)pa
;
475 const operand_entry_t oeb
= *(const operand_entry_t
*)pb
;
477 /* It's nicer for optimize_expression if constants that are likely
478 to fold when added/multiplied//whatever are put next to each
479 other. Since all constants have rank 0, order them by type. */
480 if (oeb
->rank
== 0 && oea
->rank
== 0)
482 if (constant_type (oeb
->op
) != constant_type (oea
->op
))
483 return constant_type (oeb
->op
) - constant_type (oea
->op
);
485 /* To make sorting result stable, we use unique IDs to determine
487 return oeb
->id
- oea
->id
;
490 /* Lastly, make sure the versions that are the same go next to each
491 other. We use SSA_NAME_VERSION because it's stable. */
492 if ((oeb
->rank
- oea
->rank
== 0)
493 && TREE_CODE (oea
->op
) == SSA_NAME
494 && TREE_CODE (oeb
->op
) == SSA_NAME
)
496 if (SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
497 return SSA_NAME_VERSION (oeb
->op
) - SSA_NAME_VERSION (oea
->op
);
499 return oeb
->id
- oea
->id
;
502 if (oeb
->rank
!= oea
->rank
)
503 return oeb
->rank
- oea
->rank
;
505 return oeb
->id
- oea
->id
;
508 /* Add an operand entry to *OPS for the tree operand OP. */
511 add_to_ops_vec (VEC(operand_entry_t
, heap
) **ops
, tree op
)
513 operand_entry_t oe
= (operand_entry_t
) pool_alloc (operand_entry_pool
);
516 oe
->rank
= get_rank (op
);
517 oe
->id
= next_operand_entry_id
++;
519 VEC_safe_push (operand_entry_t
, heap
, *ops
, oe
);
522 /* Add an operand entry to *OPS for the tree operand OP with repeat
526 add_repeat_to_ops_vec (VEC(operand_entry_t
, heap
) **ops
, tree op
,
527 HOST_WIDE_INT repeat
)
529 operand_entry_t oe
= (operand_entry_t
) pool_alloc (operand_entry_pool
);
532 oe
->rank
= get_rank (op
);
533 oe
->id
= next_operand_entry_id
++;
535 VEC_safe_push (operand_entry_t
, heap
, *ops
, oe
);
537 reassociate_stats
.pows_encountered
++;
540 /* Return true if STMT is reassociable operation containing a binary
541 operation with tree code CODE, and is inside LOOP. */
544 is_reassociable_op (gimple stmt
, enum tree_code code
, struct loop
*loop
)
546 basic_block bb
= gimple_bb (stmt
);
548 if (gimple_bb (stmt
) == NULL
)
551 if (!flow_bb_inside_loop_p (loop
, bb
))
554 if (is_gimple_assign (stmt
)
555 && gimple_assign_rhs_code (stmt
) == code
556 && has_single_use (gimple_assign_lhs (stmt
)))
563 /* Given NAME, if NAME is defined by a unary operation OPCODE, return the
564 operand of the negate operation. Otherwise, return NULL. */
567 get_unary_op (tree name
, enum tree_code opcode
)
569 gimple stmt
= SSA_NAME_DEF_STMT (name
);
571 if (!is_gimple_assign (stmt
))
574 if (gimple_assign_rhs_code (stmt
) == opcode
)
575 return gimple_assign_rhs1 (stmt
);
579 /* If CURR and LAST are a pair of ops that OPCODE allows us to
580 eliminate through equivalences, do so, remove them from OPS, and
581 return true. Otherwise, return false. */
584 eliminate_duplicate_pair (enum tree_code opcode
,
585 VEC (operand_entry_t
, heap
) **ops
,
588 operand_entry_t curr
,
589 operand_entry_t last
)
592 /* If we have two of the same op, and the opcode is & |, min, or max,
593 we can eliminate one of them.
594 If we have two of the same op, and the opcode is ^, we can
595 eliminate both of them. */
597 if (last
&& last
->op
== curr
->op
)
605 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
607 fprintf (dump_file
, "Equivalence: ");
608 print_generic_expr (dump_file
, curr
->op
, 0);
609 fprintf (dump_file
, " [&|minmax] ");
610 print_generic_expr (dump_file
, last
->op
, 0);
611 fprintf (dump_file
, " -> ");
612 print_generic_stmt (dump_file
, last
->op
, 0);
615 VEC_ordered_remove (operand_entry_t
, *ops
, i
);
616 reassociate_stats
.ops_eliminated
++;
621 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
623 fprintf (dump_file
, "Equivalence: ");
624 print_generic_expr (dump_file
, curr
->op
, 0);
625 fprintf (dump_file
, " ^ ");
626 print_generic_expr (dump_file
, last
->op
, 0);
627 fprintf (dump_file
, " -> nothing\n");
630 reassociate_stats
.ops_eliminated
+= 2;
632 if (VEC_length (operand_entry_t
, *ops
) == 2)
634 VEC_free (operand_entry_t
, heap
, *ops
);
636 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (last
->op
)));
641 VEC_ordered_remove (operand_entry_t
, *ops
, i
-1);
642 VEC_ordered_remove (operand_entry_t
, *ops
, i
-1);
654 static VEC(tree
, heap
) *plus_negates
;
656 /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
657 expression, look in OPS for a corresponding positive operation to cancel
658 it out. If we find one, remove the other from OPS, replace
659 OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise,
663 eliminate_plus_minus_pair (enum tree_code opcode
,
664 VEC (operand_entry_t
, heap
) **ops
,
665 unsigned int currindex
,
666 operand_entry_t curr
)
673 if (opcode
!= PLUS_EXPR
|| TREE_CODE (curr
->op
) != SSA_NAME
)
676 negateop
= get_unary_op (curr
->op
, NEGATE_EXPR
);
677 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
678 if (negateop
== NULL_TREE
&& notop
== NULL_TREE
)
681 /* Any non-negated version will have a rank that is one less than
682 the current rank. So once we hit those ranks, if we don't find
685 for (i
= currindex
+ 1;
686 VEC_iterate (operand_entry_t
, *ops
, i
, oe
)
687 && oe
->rank
>= curr
->rank
- 1 ;
690 if (oe
->op
== negateop
)
693 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
695 fprintf (dump_file
, "Equivalence: ");
696 print_generic_expr (dump_file
, negateop
, 0);
697 fprintf (dump_file
, " + -");
698 print_generic_expr (dump_file
, oe
->op
, 0);
699 fprintf (dump_file
, " -> 0\n");
702 VEC_ordered_remove (operand_entry_t
, *ops
, i
);
703 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (oe
->op
)));
704 VEC_ordered_remove (operand_entry_t
, *ops
, currindex
);
705 reassociate_stats
.ops_eliminated
++;
709 else if (oe
->op
== notop
)
711 tree op_type
= TREE_TYPE (oe
->op
);
713 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
715 fprintf (dump_file
, "Equivalence: ");
716 print_generic_expr (dump_file
, notop
, 0);
717 fprintf (dump_file
, " + ~");
718 print_generic_expr (dump_file
, oe
->op
, 0);
719 fprintf (dump_file
, " -> -1\n");
722 VEC_ordered_remove (operand_entry_t
, *ops
, i
);
723 add_to_ops_vec (ops
, build_int_cst_type (op_type
, -1));
724 VEC_ordered_remove (operand_entry_t
, *ops
, currindex
);
725 reassociate_stats
.ops_eliminated
++;
731 /* CURR->OP is a negate expr in a plus expr: save it for later
732 inspection in repropagate_negates(). */
733 if (negateop
!= NULL_TREE
)
734 VEC_safe_push (tree
, heap
, plus_negates
, curr
->op
);
739 /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a
740 bitwise not expression, look in OPS for a corresponding operand to
741 cancel it out. If we find one, remove the other from OPS, replace
742 OPS[CURRINDEX] with 0, and return true. Otherwise, return
746 eliminate_not_pairs (enum tree_code opcode
,
747 VEC (operand_entry_t
, heap
) **ops
,
748 unsigned int currindex
,
749 operand_entry_t curr
)
755 if ((opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
756 || TREE_CODE (curr
->op
) != SSA_NAME
)
759 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
760 if (notop
== NULL_TREE
)
763 /* Any non-not version will have a rank that is one less than
764 the current rank. So once we hit those ranks, if we don't find
767 for (i
= currindex
+ 1;
768 VEC_iterate (operand_entry_t
, *ops
, i
, oe
)
769 && oe
->rank
>= curr
->rank
- 1;
774 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
776 fprintf (dump_file
, "Equivalence: ");
777 print_generic_expr (dump_file
, notop
, 0);
778 if (opcode
== BIT_AND_EXPR
)
779 fprintf (dump_file
, " & ~");
780 else if (opcode
== BIT_IOR_EXPR
)
781 fprintf (dump_file
, " | ~");
782 print_generic_expr (dump_file
, oe
->op
, 0);
783 if (opcode
== BIT_AND_EXPR
)
784 fprintf (dump_file
, " -> 0\n");
785 else if (opcode
== BIT_IOR_EXPR
)
786 fprintf (dump_file
, " -> -1\n");
789 if (opcode
== BIT_AND_EXPR
)
790 oe
->op
= build_zero_cst (TREE_TYPE (oe
->op
));
791 else if (opcode
== BIT_IOR_EXPR
)
792 oe
->op
= build_low_bits_mask (TREE_TYPE (oe
->op
),
793 TYPE_PRECISION (TREE_TYPE (oe
->op
)));
795 reassociate_stats
.ops_eliminated
796 += VEC_length (operand_entry_t
, *ops
) - 1;
797 VEC_free (operand_entry_t
, heap
, *ops
);
799 VEC_safe_push (operand_entry_t
, heap
, *ops
, oe
);
807 /* Use constant value that may be present in OPS to try to eliminate
808 operands. Note that this function is only really used when we've
809 eliminated ops for other reasons, or merged constants. Across
810 single statements, fold already does all of this, plus more. There
811 is little point in duplicating logic, so I've only included the
812 identities that I could ever construct testcases to trigger. */
815 eliminate_using_constants (enum tree_code opcode
,
816 VEC(operand_entry_t
, heap
) **ops
)
818 operand_entry_t oelast
= VEC_last (operand_entry_t
, *ops
);
819 tree type
= TREE_TYPE (oelast
->op
);
821 if (oelast
->rank
== 0
822 && (INTEGRAL_TYPE_P (type
) || FLOAT_TYPE_P (type
)))
827 if (integer_zerop (oelast
->op
))
829 if (VEC_length (operand_entry_t
, *ops
) != 1)
831 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
832 fprintf (dump_file
, "Found & 0, removing all other ops\n");
834 reassociate_stats
.ops_eliminated
835 += VEC_length (operand_entry_t
, *ops
) - 1;
837 VEC_free (operand_entry_t
, heap
, *ops
);
839 VEC_safe_push (operand_entry_t
, heap
, *ops
, oelast
);
843 else if (integer_all_onesp (oelast
->op
))
845 if (VEC_length (operand_entry_t
, *ops
) != 1)
847 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
848 fprintf (dump_file
, "Found & -1, removing\n");
849 VEC_pop (operand_entry_t
, *ops
);
850 reassociate_stats
.ops_eliminated
++;
855 if (integer_all_onesp (oelast
->op
))
857 if (VEC_length (operand_entry_t
, *ops
) != 1)
859 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
860 fprintf (dump_file
, "Found | -1, removing all other ops\n");
862 reassociate_stats
.ops_eliminated
863 += VEC_length (operand_entry_t
, *ops
) - 1;
865 VEC_free (operand_entry_t
, heap
, *ops
);
867 VEC_safe_push (operand_entry_t
, heap
, *ops
, oelast
);
871 else if (integer_zerop (oelast
->op
))
873 if (VEC_length (operand_entry_t
, *ops
) != 1)
875 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
876 fprintf (dump_file
, "Found | 0, removing\n");
877 VEC_pop (operand_entry_t
, *ops
);
878 reassociate_stats
.ops_eliminated
++;
883 if (integer_zerop (oelast
->op
)
884 || (FLOAT_TYPE_P (type
)
885 && !HONOR_NANS (TYPE_MODE (type
))
886 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type
))
887 && real_zerop (oelast
->op
)))
889 if (VEC_length (operand_entry_t
, *ops
) != 1)
891 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
892 fprintf (dump_file
, "Found * 0, removing all other ops\n");
894 reassociate_stats
.ops_eliminated
895 += VEC_length (operand_entry_t
, *ops
) - 1;
896 VEC_free (operand_entry_t
, heap
, *ops
);
898 VEC_safe_push (operand_entry_t
, heap
, *ops
, oelast
);
902 else if (integer_onep (oelast
->op
)
903 || (FLOAT_TYPE_P (type
)
904 && !HONOR_SNANS (TYPE_MODE (type
))
905 && real_onep (oelast
->op
)))
907 if (VEC_length (operand_entry_t
, *ops
) != 1)
909 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
910 fprintf (dump_file
, "Found * 1, removing\n");
911 VEC_pop (operand_entry_t
, *ops
);
912 reassociate_stats
.ops_eliminated
++;
920 if (integer_zerop (oelast
->op
)
921 || (FLOAT_TYPE_P (type
)
922 && (opcode
== PLUS_EXPR
|| opcode
== MINUS_EXPR
)
923 && fold_real_zero_addition_p (type
, oelast
->op
,
924 opcode
== MINUS_EXPR
)))
926 if (VEC_length (operand_entry_t
, *ops
) != 1)
928 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
929 fprintf (dump_file
, "Found [|^+] 0, removing\n");
930 VEC_pop (operand_entry_t
, *ops
);
931 reassociate_stats
.ops_eliminated
++;
943 static void linearize_expr_tree (VEC(operand_entry_t
, heap
) **, gimple
,
946 /* Structure for tracking and counting operands. */
947 typedef struct oecount_s
{
950 enum tree_code oecode
;
955 DEF_VEC_ALLOC_O(oecount
,heap
);
957 /* The heap for the oecount hashtable and the sorted list of operands. */
958 static VEC (oecount
, heap
) *cvec
;
960 /* Hash function for oecount. */
963 oecount_hash (const void *p
)
965 const oecount
*c
= &VEC_index (oecount
, cvec
, (size_t)p
- 42);
966 return htab_hash_pointer (c
->op
) ^ (hashval_t
)c
->oecode
;
969 /* Comparison function for oecount. */
972 oecount_eq (const void *p1
, const void *p2
)
974 const oecount
*c1
= &VEC_index (oecount
, cvec
, (size_t)p1
- 42);
975 const oecount
*c2
= &VEC_index (oecount
, cvec
, (size_t)p2
- 42);
976 return (c1
->oecode
== c2
->oecode
977 && c1
->op
== c2
->op
);
980 /* Comparison function for qsort sorting oecount elements by count. */
983 oecount_cmp (const void *p1
, const void *p2
)
985 const oecount
*c1
= (const oecount
*)p1
;
986 const oecount
*c2
= (const oecount
*)p2
;
987 if (c1
->cnt
!= c2
->cnt
)
988 return c1
->cnt
- c2
->cnt
;
990 /* If counts are identical, use unique IDs to stabilize qsort. */
991 return c1
->id
- c2
->id
;
994 /* Return TRUE iff STMT represents a builtin call that raises OP
998 stmt_is_power_of_op (gimple stmt
, tree op
)
1002 if (!is_gimple_call (stmt
))
1005 fndecl
= gimple_call_fndecl (stmt
);
1008 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
1011 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
)))
1013 CASE_FLT_FN (BUILT_IN_POW
):
1014 CASE_FLT_FN (BUILT_IN_POWI
):
1015 return (operand_equal_p (gimple_call_arg (stmt
, 0), op
, 0));
1022 /* Given STMT which is a __builtin_pow* call, decrement its exponent
1023 in place and return the result. Assumes that stmt_is_power_of_op
1024 was previously called for STMT and returned TRUE. */
1026 static HOST_WIDE_INT
1027 decrement_power (gimple stmt
)
1029 REAL_VALUE_TYPE c
, cint
;
1030 HOST_WIDE_INT power
;
1033 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
)))
1035 CASE_FLT_FN (BUILT_IN_POW
):
1036 arg1
= gimple_call_arg (stmt
, 1);
1037 c
= TREE_REAL_CST (arg1
);
1038 power
= real_to_integer (&c
) - 1;
1039 real_from_integer (&cint
, VOIDmode
, power
, 0, 0);
1040 gimple_call_set_arg (stmt
, 1, build_real (TREE_TYPE (arg1
), cint
));
1043 CASE_FLT_FN (BUILT_IN_POWI
):
1044 arg1
= gimple_call_arg (stmt
, 1);
1045 power
= TREE_INT_CST_LOW (arg1
) - 1;
1046 gimple_call_set_arg (stmt
, 1, build_int_cst (TREE_TYPE (arg1
), power
));
1054 /* Find the single immediate use of STMT's LHS, and replace it
1055 with OP. Remove STMT. If STMT's LHS is the same as *DEF,
1056 replace *DEF with OP as well. */
1059 propagate_op_to_single_use (tree op
, gimple stmt
, tree
*def
)
1064 gimple_stmt_iterator gsi
;
1066 if (is_gimple_call (stmt
))
1067 lhs
= gimple_call_lhs (stmt
);
1069 lhs
= gimple_assign_lhs (stmt
);
1071 gcc_assert (has_single_use (lhs
));
1072 single_imm_use (lhs
, &use
, &use_stmt
);
1076 if (TREE_CODE (op
) != SSA_NAME
)
1077 update_stmt (use_stmt
);
1078 gsi
= gsi_for_stmt (stmt
);
1079 gsi_remove (&gsi
, true);
1080 release_defs (stmt
);
1082 if (is_gimple_call (stmt
))
1083 unlink_stmt_vdef (stmt
);
1086 /* Walks the linear chain with result *DEF searching for an operation
1087 with operand OP and code OPCODE removing that from the chain. *DEF
1088 is updated if there is only one operand but no operation left. */
1091 zero_one_operation (tree
*def
, enum tree_code opcode
, tree op
)
1093 gimple stmt
= SSA_NAME_DEF_STMT (*def
);
1099 if (opcode
== MULT_EXPR
1100 && stmt_is_power_of_op (stmt
, op
))
1102 if (decrement_power (stmt
) == 1)
1103 propagate_op_to_single_use (op
, stmt
, def
);
1107 name
= gimple_assign_rhs1 (stmt
);
1109 /* If this is the operation we look for and one of the operands
1110 is ours simply propagate the other operand into the stmts
1112 if (gimple_assign_rhs_code (stmt
) == opcode
1114 || gimple_assign_rhs2 (stmt
) == op
))
1117 name
= gimple_assign_rhs2 (stmt
);
1118 propagate_op_to_single_use (name
, stmt
, def
);
1122 /* We might have a multiply of two __builtin_pow* calls, and
1123 the operand might be hiding in the rightmost one. */
1124 if (opcode
== MULT_EXPR
1125 && gimple_assign_rhs_code (stmt
) == opcode
1126 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
)
1128 gimple stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
1129 if (stmt_is_power_of_op (stmt2
, op
))
1131 if (decrement_power (stmt2
) == 1)
1132 propagate_op_to_single_use (op
, stmt2
, def
);
1137 /* Continue walking the chain. */
1138 gcc_assert (name
!= op
1139 && TREE_CODE (name
) == SSA_NAME
);
1140 stmt
= SSA_NAME_DEF_STMT (name
);
1145 /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for
1146 the result. Places the statement after the definition of either
1147 OP1 or OP2. Returns the new statement. */
1150 build_and_add_sum (tree type
, tree op1
, tree op2
, enum tree_code opcode
)
1152 gimple op1def
= NULL
, op2def
= NULL
;
1153 gimple_stmt_iterator gsi
;
1157 /* Create the addition statement. */
1158 op
= make_ssa_name (type
, NULL
);
1159 sum
= gimple_build_assign_with_ops (opcode
, op
, op1
, op2
);
1161 /* Find an insertion place and insert. */
1162 if (TREE_CODE (op1
) == SSA_NAME
)
1163 op1def
= SSA_NAME_DEF_STMT (op1
);
1164 if (TREE_CODE (op2
) == SSA_NAME
)
1165 op2def
= SSA_NAME_DEF_STMT (op2
);
1166 if ((!op1def
|| gimple_nop_p (op1def
))
1167 && (!op2def
|| gimple_nop_p (op2def
)))
1169 gsi
= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR
));
1170 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1172 else if ((!op1def
|| gimple_nop_p (op1def
))
1173 || (op2def
&& !gimple_nop_p (op2def
)
1174 && stmt_dominates_stmt_p (op1def
, op2def
)))
1176 if (gimple_code (op2def
) == GIMPLE_PHI
)
1178 gsi
= gsi_after_labels (gimple_bb (op2def
));
1179 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1183 if (!stmt_ends_bb_p (op2def
))
1185 gsi
= gsi_for_stmt (op2def
);
1186 gsi_insert_after (&gsi
, sum
, GSI_NEW_STMT
);
1193 FOR_EACH_EDGE (e
, ei
, gimple_bb (op2def
)->succs
)
1194 if (e
->flags
& EDGE_FALLTHRU
)
1195 gsi_insert_on_edge_immediate (e
, sum
);
1201 if (gimple_code (op1def
) == GIMPLE_PHI
)
1203 gsi
= gsi_after_labels (gimple_bb (op1def
));
1204 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1208 if (!stmt_ends_bb_p (op1def
))
1210 gsi
= gsi_for_stmt (op1def
);
1211 gsi_insert_after (&gsi
, sum
, GSI_NEW_STMT
);
1218 FOR_EACH_EDGE (e
, ei
, gimple_bb (op1def
)->succs
)
1219 if (e
->flags
& EDGE_FALLTHRU
)
1220 gsi_insert_on_edge_immediate (e
, sum
);
1229 /* Perform un-distribution of divisions and multiplications.
1230 A * X + B * X is transformed into (A + B) * X and A / X + B / X
1231 to (A + B) / X for real X.
1233 The algorithm is organized as follows.
1235 - First we walk the addition chain *OPS looking for summands that
1236 are defined by a multiplication or a real division. This results
1237 in the candidates bitmap with relevant indices into *OPS.
1239 - Second we build the chains of multiplications or divisions for
1240 these candidates, counting the number of occurrences of (operand, code)
1241 pairs in all of the candidates chains.
1243 - Third we sort the (operand, code) pairs by number of occurrence and
1244 process them starting with the pair with the most uses.
1246 * For each such pair we walk the candidates again to build a
1247 second candidate bitmap noting all multiplication/division chains
1248 that have at least one occurrence of (operand, code).
1250 * We build an alternate addition chain only covering these
1251 candidates with one (operand, code) operation removed from their
1252 multiplication/division chain.
1254 * The first candidate gets replaced by the alternate addition chain
1255 multiplied/divided by the operand.
1257 * All candidate chains get disabled for further processing and
1258 processing of (operand, code) pairs continues.
1260 The alternate addition chains built are re-processed by the main
1261 reassociation algorithm which allows optimizing a * x * y + b * y * x
1262 to (a + b ) * x * y in one invocation of the reassociation pass. */
1265 undistribute_ops_list (enum tree_code opcode
,
1266 VEC (operand_entry_t
, heap
) **ops
, struct loop
*loop
)
1268 unsigned int length
= VEC_length (operand_entry_t
, *ops
);
1269 operand_entry_t oe1
;
1271 sbitmap candidates
, candidates2
;
1272 unsigned nr_candidates
, nr_candidates2
;
1273 sbitmap_iterator sbi0
;
1274 VEC (operand_entry_t
, heap
) **subops
;
1276 bool changed
= false;
1277 int next_oecount_id
= 0;
1280 || opcode
!= PLUS_EXPR
)
1283 /* Build a list of candidates to process. */
1284 candidates
= sbitmap_alloc (length
);
1285 bitmap_clear (candidates
);
1287 FOR_EACH_VEC_ELT (operand_entry_t
, *ops
, i
, oe1
)
1289 enum tree_code dcode
;
1292 if (TREE_CODE (oe1
->op
) != SSA_NAME
)
1294 oe1def
= SSA_NAME_DEF_STMT (oe1
->op
);
1295 if (!is_gimple_assign (oe1def
))
1297 dcode
= gimple_assign_rhs_code (oe1def
);
1298 if ((dcode
!= MULT_EXPR
1299 && dcode
!= RDIV_EXPR
)
1300 || !is_reassociable_op (oe1def
, dcode
, loop
))
1303 bitmap_set_bit (candidates
, i
);
1307 if (nr_candidates
< 2)
1309 sbitmap_free (candidates
);
1313 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1315 fprintf (dump_file
, "searching for un-distribute opportunities ");
1316 print_generic_expr (dump_file
,
1317 VEC_index (operand_entry_t
, *ops
,
1318 bitmap_first_set_bit (candidates
))->op
, 0);
1319 fprintf (dump_file
, " %d\n", nr_candidates
);
1322 /* Build linearized sub-operand lists and the counting table. */
1324 ctable
= htab_create (15, oecount_hash
, oecount_eq
, NULL
);
1325 subops
= XCNEWVEC (VEC (operand_entry_t
, heap
) *,
1326 VEC_length (operand_entry_t
, *ops
));
1327 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1330 enum tree_code oecode
;
1333 oedef
= SSA_NAME_DEF_STMT (VEC_index (operand_entry_t
, *ops
, i
)->op
);
1334 oecode
= gimple_assign_rhs_code (oedef
);
1335 linearize_expr_tree (&subops
[i
], oedef
,
1336 associative_tree_code (oecode
), false);
1338 FOR_EACH_VEC_ELT (operand_entry_t
, subops
[i
], j
, oe1
)
1345 c
.id
= next_oecount_id
++;
1347 VEC_safe_push (oecount
, heap
, cvec
, c
);
1348 idx
= VEC_length (oecount
, cvec
) + 41;
1349 slot
= htab_find_slot (ctable
, (void *)idx
, INSERT
);
1352 *slot
= (void *)idx
;
1356 VEC_pop (oecount
, cvec
);
1357 VEC_index (oecount
, cvec
, (size_t)*slot
- 42).cnt
++;
1361 htab_delete (ctable
);
1363 /* Sort the counting table. */
1364 VEC_qsort (oecount
, cvec
, oecount_cmp
);
1366 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1369 fprintf (dump_file
, "Candidates:\n");
1370 FOR_EACH_VEC_ELT (oecount
, cvec
, j
, c
)
1372 fprintf (dump_file
, " %u %s: ", c
->cnt
,
1373 c
->oecode
== MULT_EXPR
1374 ? "*" : c
->oecode
== RDIV_EXPR
? "/" : "?");
1375 print_generic_expr (dump_file
, c
->op
, 0);
1376 fprintf (dump_file
, "\n");
1380 /* Process the (operand, code) pairs in order of most occurence. */
1381 candidates2
= sbitmap_alloc (length
);
1382 while (!VEC_empty (oecount
, cvec
))
1384 oecount
*c
= &VEC_last (oecount
, cvec
);
1388 /* Now collect the operands in the outer chain that contain
1389 the common operand in their inner chain. */
1390 bitmap_clear (candidates2
);
1392 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1395 enum tree_code oecode
;
1397 tree op
= VEC_index (operand_entry_t
, *ops
, i
)->op
;
1399 /* If we undistributed in this chain already this may be
1401 if (TREE_CODE (op
) != SSA_NAME
)
1404 oedef
= SSA_NAME_DEF_STMT (op
);
1405 oecode
= gimple_assign_rhs_code (oedef
);
1406 if (oecode
!= c
->oecode
)
1409 FOR_EACH_VEC_ELT (operand_entry_t
, subops
[i
], j
, oe1
)
1411 if (oe1
->op
== c
->op
)
1413 bitmap_set_bit (candidates2
, i
);
1420 if (nr_candidates2
>= 2)
1422 operand_entry_t oe1
, oe2
;
1424 int first
= bitmap_first_set_bit (candidates2
);
1426 /* Build the new addition chain. */
1427 oe1
= VEC_index (operand_entry_t
, *ops
, first
);
1428 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1430 fprintf (dump_file
, "Building (");
1431 print_generic_expr (dump_file
, oe1
->op
, 0);
1433 zero_one_operation (&oe1
->op
, c
->oecode
, c
->op
);
1434 EXECUTE_IF_SET_IN_BITMAP (candidates2
, first
+1, i
, sbi0
)
1437 oe2
= VEC_index (operand_entry_t
, *ops
, i
);
1438 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1440 fprintf (dump_file
, " + ");
1441 print_generic_expr (dump_file
, oe2
->op
, 0);
1443 zero_one_operation (&oe2
->op
, c
->oecode
, c
->op
);
1444 sum
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1445 oe1
->op
, oe2
->op
, opcode
);
1446 oe2
->op
= build_zero_cst (TREE_TYPE (oe2
->op
));
1448 oe1
->op
= gimple_get_lhs (sum
);
1451 /* Apply the multiplication/division. */
1452 prod
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1453 oe1
->op
, c
->op
, c
->oecode
);
1454 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1456 fprintf (dump_file
, ") %s ", c
->oecode
== MULT_EXPR
? "*" : "/");
1457 print_generic_expr (dump_file
, c
->op
, 0);
1458 fprintf (dump_file
, "\n");
1461 /* Record it in the addition chain and disable further
1462 undistribution with this op. */
1463 oe1
->op
= gimple_assign_lhs (prod
);
1464 oe1
->rank
= get_rank (oe1
->op
);
1465 VEC_free (operand_entry_t
, heap
, subops
[first
]);
1470 VEC_pop (oecount
, cvec
);
1473 for (i
= 0; i
< VEC_length (operand_entry_t
, *ops
); ++i
)
1474 VEC_free (operand_entry_t
, heap
, subops
[i
]);
1476 VEC_free (oecount
, heap
, cvec
);
1477 sbitmap_free (candidates
);
1478 sbitmap_free (candidates2
);
1483 /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison
1484 expression, examine the other OPS to see if any of them are comparisons
1485 of the same values, which we may be able to combine or eliminate.
1486 For example, we can rewrite (a < b) | (a == b) as (a <= b). */
1489 eliminate_redundant_comparison (enum tree_code opcode
,
1490 VEC (operand_entry_t
, heap
) **ops
,
1491 unsigned int currindex
,
1492 operand_entry_t curr
)
1495 enum tree_code lcode
, rcode
;
1500 if (opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
1503 /* Check that CURR is a comparison. */
1504 if (TREE_CODE (curr
->op
) != SSA_NAME
)
1506 def1
= SSA_NAME_DEF_STMT (curr
->op
);
1507 if (!is_gimple_assign (def1
))
1509 lcode
= gimple_assign_rhs_code (def1
);
1510 if (TREE_CODE_CLASS (lcode
) != tcc_comparison
)
1512 op1
= gimple_assign_rhs1 (def1
);
1513 op2
= gimple_assign_rhs2 (def1
);
1515 /* Now look for a similar comparison in the remaining OPS. */
1516 for (i
= currindex
+ 1;
1517 VEC_iterate (operand_entry_t
, *ops
, i
, oe
);
1522 if (TREE_CODE (oe
->op
) != SSA_NAME
)
1524 def2
= SSA_NAME_DEF_STMT (oe
->op
);
1525 if (!is_gimple_assign (def2
))
1527 rcode
= gimple_assign_rhs_code (def2
);
1528 if (TREE_CODE_CLASS (rcode
) != tcc_comparison
)
1531 /* If we got here, we have a match. See if we can combine the
1533 if (opcode
== BIT_IOR_EXPR
)
1534 t
= maybe_fold_or_comparisons (lcode
, op1
, op2
,
1535 rcode
, gimple_assign_rhs1 (def2
),
1536 gimple_assign_rhs2 (def2
));
1538 t
= maybe_fold_and_comparisons (lcode
, op1
, op2
,
1539 rcode
, gimple_assign_rhs1 (def2
),
1540 gimple_assign_rhs2 (def2
));
1544 /* maybe_fold_and_comparisons and maybe_fold_or_comparisons
1545 always give us a boolean_type_node value back. If the original
1546 BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type,
1547 we need to convert. */
1548 if (!useless_type_conversion_p (TREE_TYPE (curr
->op
), TREE_TYPE (t
)))
1549 t
= fold_convert (TREE_TYPE (curr
->op
), t
);
1551 if (TREE_CODE (t
) != INTEGER_CST
1552 && !operand_equal_p (t
, curr
->op
, 0))
1554 enum tree_code subcode
;
1555 tree newop1
, newop2
;
1556 if (!COMPARISON_CLASS_P (t
))
1558 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1559 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1560 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1561 if (!is_gimple_val (newop1
) || !is_gimple_val (newop2
))
1565 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1567 fprintf (dump_file
, "Equivalence: ");
1568 print_generic_expr (dump_file
, curr
->op
, 0);
1569 fprintf (dump_file
, " %s ", op_symbol_code (opcode
));
1570 print_generic_expr (dump_file
, oe
->op
, 0);
1571 fprintf (dump_file
, " -> ");
1572 print_generic_expr (dump_file
, t
, 0);
1573 fprintf (dump_file
, "\n");
1576 /* Now we can delete oe, as it has been subsumed by the new combined
1578 VEC_ordered_remove (operand_entry_t
, *ops
, i
);
1579 reassociate_stats
.ops_eliminated
++;
1581 /* If t is the same as curr->op, we're done. Otherwise we must
1582 replace curr->op with t. Special case is if we got a constant
1583 back, in which case we add it to the end instead of in place of
1584 the current entry. */
1585 if (TREE_CODE (t
) == INTEGER_CST
)
1587 VEC_ordered_remove (operand_entry_t
, *ops
, currindex
);
1588 add_to_ops_vec (ops
, t
);
1590 else if (!operand_equal_p (t
, curr
->op
, 0))
1593 enum tree_code subcode
;
1596 gcc_assert (COMPARISON_CLASS_P (t
));
1597 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1598 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1599 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1600 gcc_checking_assert (is_gimple_val (newop1
)
1601 && is_gimple_val (newop2
));
1602 sum
= build_and_add_sum (TREE_TYPE (t
), newop1
, newop2
, subcode
);
1603 curr
->op
= gimple_get_lhs (sum
);
1611 /* Perform various identities and other optimizations on the list of
1612 operand entries, stored in OPS. The tree code for the binary
1613 operation between all the operands is OPCODE. */
1616 optimize_ops_list (enum tree_code opcode
,
1617 VEC (operand_entry_t
, heap
) **ops
)
1619 unsigned int length
= VEC_length (operand_entry_t
, *ops
);
1622 operand_entry_t oelast
= NULL
;
1623 bool iterate
= false;
1628 oelast
= VEC_last (operand_entry_t
, *ops
);
1630 /* If the last two are constants, pop the constants off, merge them
1631 and try the next two. */
1632 if (oelast
->rank
== 0 && is_gimple_min_invariant (oelast
->op
))
1634 operand_entry_t oelm1
= VEC_index (operand_entry_t
, *ops
, length
- 2);
1636 if (oelm1
->rank
== 0
1637 && is_gimple_min_invariant (oelm1
->op
)
1638 && useless_type_conversion_p (TREE_TYPE (oelm1
->op
),
1639 TREE_TYPE (oelast
->op
)))
1641 tree folded
= fold_binary (opcode
, TREE_TYPE (oelm1
->op
),
1642 oelm1
->op
, oelast
->op
);
1644 if (folded
&& is_gimple_min_invariant (folded
))
1646 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1647 fprintf (dump_file
, "Merging constants\n");
1649 VEC_pop (operand_entry_t
, *ops
);
1650 VEC_pop (operand_entry_t
, *ops
);
1652 add_to_ops_vec (ops
, folded
);
1653 reassociate_stats
.constants_eliminated
++;
1655 optimize_ops_list (opcode
, ops
);
1661 eliminate_using_constants (opcode
, ops
);
1664 for (i
= 0; VEC_iterate (operand_entry_t
, *ops
, i
, oe
);)
1668 if (eliminate_not_pairs (opcode
, ops
, i
, oe
))
1670 if (eliminate_duplicate_pair (opcode
, ops
, &done
, i
, oe
, oelast
)
1671 || (!done
&& eliminate_plus_minus_pair (opcode
, ops
, i
, oe
))
1672 || (!done
&& eliminate_redundant_comparison (opcode
, ops
, i
, oe
)))
1684 length
= VEC_length (operand_entry_t
, *ops
);
1685 oelast
= VEC_last (operand_entry_t
, *ops
);
1688 optimize_ops_list (opcode
, ops
);
1691 /* The following functions are subroutines to optimize_range_tests and allow
1692 it to try to change a logical combination of comparisons into a range
1696 X == 2 || X == 5 || X == 3 || X == 4
1700 (unsigned) (X - 2) <= 3
1702 For more information see comments above fold_test_range in fold-const.c,
1703 this implementation is for GIMPLE. */
1711 bool strict_overflow_p
;
1712 unsigned int idx
, next
;
1715 /* This is similar to make_range in fold-const.c, but on top of
1716 GIMPLE instead of trees. If EXP is non-NULL, it should be
1717 an SSA_NAME and STMT argument is ignored, otherwise STMT
1718 argument should be a GIMPLE_COND. */
1721 init_range_entry (struct range_entry
*r
, tree exp
, gimple stmt
)
1725 bool is_bool
, strict_overflow_p
;
1729 r
->strict_overflow_p
= false;
1731 r
->high
= NULL_TREE
;
1732 if (exp
!= NULL_TREE
1733 && (TREE_CODE (exp
) != SSA_NAME
|| !INTEGRAL_TYPE_P (TREE_TYPE (exp
))))
1736 /* Start with simply saying "EXP != 0" and then look at the code of EXP
1737 and see if we can refine the range. Some of the cases below may not
1738 happen, but it doesn't seem worth worrying about this. We "continue"
1739 the outer loop when we've changed something; otherwise we "break"
1740 the switch, which will "break" the while. */
1741 low
= exp
? build_int_cst (TREE_TYPE (exp
), 0) : boolean_false_node
;
1744 strict_overflow_p
= false;
1746 if (exp
== NULL_TREE
)
1748 else if (TYPE_PRECISION (TREE_TYPE (exp
)) == 1)
1750 if (TYPE_UNSIGNED (TREE_TYPE (exp
)))
1755 else if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
)
1760 enum tree_code code
;
1761 tree arg0
, arg1
, exp_type
;
1765 if (exp
!= NULL_TREE
)
1767 if (TREE_CODE (exp
) != SSA_NAME
)
1770 stmt
= SSA_NAME_DEF_STMT (exp
);
1771 if (!is_gimple_assign (stmt
))
1774 code
= gimple_assign_rhs_code (stmt
);
1775 arg0
= gimple_assign_rhs1 (stmt
);
1776 arg1
= gimple_assign_rhs2 (stmt
);
1777 exp_type
= TREE_TYPE (exp
);
1781 code
= gimple_cond_code (stmt
);
1782 arg0
= gimple_cond_lhs (stmt
);
1783 arg1
= gimple_cond_rhs (stmt
);
1784 exp_type
= boolean_type_node
;
1787 if (TREE_CODE (arg0
) != SSA_NAME
)
1789 loc
= gimple_location (stmt
);
1793 if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
)
1806 if (TYPE_PRECISION (TREE_TYPE (arg0
)) == 1)
1808 if (TYPE_UNSIGNED (TREE_TYPE (arg0
)))
1813 else if (TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
)
1828 nexp
= make_range_step (loc
, code
, arg0
, arg1
, exp_type
,
1830 &strict_overflow_p
);
1831 if (nexp
!= NULL_TREE
)
1834 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
1847 r
->strict_overflow_p
= strict_overflow_p
;
1851 /* Comparison function for qsort. Sort entries
1852 without SSA_NAME exp first, then with SSA_NAMEs sorted
1853 by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs
1854 by increasing ->low and if ->low is the same, by increasing
1855 ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE
1859 range_entry_cmp (const void *a
, const void *b
)
1861 const struct range_entry
*p
= (const struct range_entry
*) a
;
1862 const struct range_entry
*q
= (const struct range_entry
*) b
;
1864 if (p
->exp
!= NULL_TREE
&& TREE_CODE (p
->exp
) == SSA_NAME
)
1866 if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
1868 /* Group range_entries for the same SSA_NAME together. */
1869 if (SSA_NAME_VERSION (p
->exp
) < SSA_NAME_VERSION (q
->exp
))
1871 else if (SSA_NAME_VERSION (p
->exp
) > SSA_NAME_VERSION (q
->exp
))
1873 /* If ->low is different, NULL low goes first, then by
1875 if (p
->low
!= NULL_TREE
)
1877 if (q
->low
!= NULL_TREE
)
1879 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
1881 if (tem
&& integer_onep (tem
))
1883 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
1885 if (tem
&& integer_onep (tem
))
1891 else if (q
->low
!= NULL_TREE
)
1893 /* If ->high is different, NULL high goes last, before that by
1895 if (p
->high
!= NULL_TREE
)
1897 if (q
->high
!= NULL_TREE
)
1899 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
1901 if (tem
&& integer_onep (tem
))
1903 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
1905 if (tem
&& integer_onep (tem
))
1911 else if (p
->high
!= NULL_TREE
)
1913 /* If both ranges are the same, sort below by ascending idx. */
1918 else if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
1921 if (p
->idx
< q
->idx
)
1925 gcc_checking_assert (p
->idx
> q
->idx
);
1930 /* Helper routine of optimize_range_test.
1931 [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for
1932 RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges,
1933 OPCODE and OPS are arguments of optimize_range_tests. Return
1934 true if the range merge has been successful.
1935 If OPCODE is ERROR_MARK, this is called from within
1936 maybe_optimize_range_tests and is performing inter-bb range optimization.
1937 Changes should be then performed right away, and whether an op is
1938 BIT_AND_EXPR or BIT_IOR_EXPR is found in oe->rank. */
1941 update_range_test (struct range_entry
*range
, struct range_entry
*otherrange
,
1942 unsigned int count
, enum tree_code opcode
,
1943 VEC (operand_entry_t
, heap
) **ops
, tree exp
, bool in_p
,
1944 tree low
, tree high
, bool strict_overflow_p
)
1946 operand_entry_t oe
= VEC_index (oeprand_entry_t
, *ops
, range
->idx
);
1948 gimple stmt
= op
? SSA_NAME_DEF_STMT (op
) : last_stmt (BASIC_BLOCK (oe
->id
));
1949 location_t loc
= gimple_location (stmt
);
1950 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
1951 tree tem
= build_range_check (loc
, optype
, exp
, in_p
, low
, high
);
1952 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
1953 gimple_stmt_iterator gsi
;
1955 if (tem
== NULL_TREE
)
1958 if (strict_overflow_p
&& issue_strict_overflow_warning (wc
))
1959 warning_at (loc
, OPT_Wstrict_overflow
,
1960 "assuming signed overflow does not occur "
1961 "when simplifying range test");
1963 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1965 struct range_entry
*r
;
1966 fprintf (dump_file
, "Optimizing range tests ");
1967 print_generic_expr (dump_file
, range
->exp
, 0);
1968 fprintf (dump_file
, " %c[", range
->in_p
? '+' : '-');
1969 print_generic_expr (dump_file
, range
->low
, 0);
1970 fprintf (dump_file
, ", ");
1971 print_generic_expr (dump_file
, range
->high
, 0);
1972 fprintf (dump_file
, "]");
1973 for (r
= otherrange
; r
< otherrange
+ count
; r
++)
1975 fprintf (dump_file
, " and %c[", r
->in_p
? '+' : '-');
1976 print_generic_expr (dump_file
, r
->low
, 0);
1977 fprintf (dump_file
, ", ");
1978 print_generic_expr (dump_file
, r
->high
, 0);
1979 fprintf (dump_file
, "]");
1981 fprintf (dump_file
, "\n into ");
1982 print_generic_expr (dump_file
, tem
, 0);
1983 fprintf (dump_file
, "\n");
1986 if (opcode
== BIT_IOR_EXPR
1987 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
1988 tem
= invert_truthvalue_loc (loc
, tem
);
1990 tem
= fold_convert_loc (loc
, optype
, tem
);
1991 gsi
= gsi_for_stmt (stmt
);
1992 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, true,
1995 /* If doing inter-bb range test optimization, update the
1996 stmts immediately. Start with changing the first range test
1997 immediate use to the new value (TEM), or, if the first range
1998 test is a GIMPLE_COND stmt, change that condition. */
1999 if (opcode
== ERROR_MARK
)
2003 imm_use_iterator iter
;
2004 use_operand_p use_p
;
2007 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, op
)
2009 if (is_gimple_debug (use_stmt
))
2011 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2012 SET_USE (use_p
, tem
);
2013 update_stmt (use_stmt
);
2018 gimple_cond_set_code (stmt
, NE_EXPR
);
2019 gimple_cond_set_lhs (stmt
, tem
);
2020 gimple_cond_set_rhs (stmt
, boolean_false_node
);
2029 range
->strict_overflow_p
= false;
2031 for (range
= otherrange
; range
< otherrange
+ count
; range
++)
2033 oe
= VEC_index (oeprand_entry_t
, *ops
, range
->idx
);
2034 /* Now change all the other range test immediate uses, so that
2035 those tests will be optimized away. */
2036 if (opcode
== ERROR_MARK
)
2040 imm_use_iterator iter
;
2041 use_operand_p use_p
;
2044 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, oe
->op
)
2046 if (is_gimple_debug (use_stmt
))
2048 /* If imm use of _8 is a statement like _7 = _8 | _9;,
2049 adjust it into _7 = _9;. */
2050 if (is_gimple_assign (use_stmt
)
2051 && gimple_assign_rhs_code (use_stmt
) == oe
->rank
)
2053 tree expr
= NULL_TREE
;
2054 if (oe
->op
== gimple_assign_rhs1 (use_stmt
))
2055 expr
= gimple_assign_rhs2 (use_stmt
);
2056 else if (oe
->op
== gimple_assign_rhs2 (use_stmt
))
2057 expr
= gimple_assign_rhs1 (use_stmt
);
2060 && TREE_CODE (expr
) == SSA_NAME
)
2062 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
2063 gimple_assign_set_rhs_with_ops (&gsi2
, SSA_NAME
,
2065 update_stmt (use_stmt
);
2069 /* If imm use of _8 is a statement like _7 = (int) _8;,
2070 adjust it into _7 = 0; or _7 = 1;. */
2071 if (gimple_assign_cast_p (use_stmt
)
2072 && oe
->op
== gimple_assign_rhs1 (use_stmt
))
2074 tree lhs
= gimple_assign_lhs (use_stmt
);
2075 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
2077 gimple_stmt_iterator gsi2
2078 = gsi_for_stmt (use_stmt
);
2079 tree expr
= build_int_cst (TREE_TYPE (lhs
),
2080 oe
->rank
== BIT_IOR_EXPR
2082 gimple_assign_set_rhs_with_ops (&gsi2
,
2085 update_stmt (use_stmt
);
2089 /* Otherwise replace the use with 0 or 1. */
2090 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2092 build_int_cst (TREE_TYPE (oe
->op
),
2093 oe
->rank
== BIT_IOR_EXPR
2095 update_stmt (use_stmt
);
2100 /* If range test was a GIMPLE_COND, simply change it
2101 into an always false or always true condition. */
2102 stmt
= last_stmt (BASIC_BLOCK (oe
->id
));
2103 if (oe
->rank
== BIT_IOR_EXPR
)
2104 gimple_cond_make_false (stmt
);
2106 gimple_cond_make_true (stmt
);
2110 oe
->op
= error_mark_node
;
2111 range
->exp
= NULL_TREE
;
2116 /* Optimize range tests, similarly how fold_range_test optimizes
2117 it on trees. The tree code for the binary
2118 operation between all the operands is OPCODE.
2119 If OPCODE is ERROR_MARK, optimize_range_tests is called from within
2120 maybe_optimize_range_tests for inter-bb range optimization.
2121 In that case if oe->op is NULL, oe->id is bb->index whose
2122 GIMPLE_COND is && or ||ed into the test, and oe->rank says
2123 the actual opcode. */
2126 optimize_range_tests (enum tree_code opcode
,
2127 VEC (operand_entry_t
, heap
) **ops
)
2129 unsigned int length
= VEC_length (operand_entry_t
, *ops
), i
, j
, first
;
2131 struct range_entry
*ranges
;
2132 bool any_changes
= false;
2137 ranges
= XNEWVEC (struct range_entry
, length
);
2138 for (i
= 0; i
< length
; i
++)
2140 oe
= VEC_index (operand_entry_t
, *ops
, i
);
2142 init_range_entry (ranges
+ i
, oe
->op
,
2143 oe
->op
? NULL
: last_stmt (BASIC_BLOCK (oe
->id
)));
2144 /* For | invert it now, we will invert it again before emitting
2145 the optimized expression. */
2146 if (opcode
== BIT_IOR_EXPR
2147 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
2148 ranges
[i
].in_p
= !ranges
[i
].in_p
;
2151 qsort (ranges
, length
, sizeof (*ranges
), range_entry_cmp
);
2152 for (i
= 0; i
< length
; i
++)
2153 if (ranges
[i
].exp
!= NULL_TREE
&& TREE_CODE (ranges
[i
].exp
) == SSA_NAME
)
2156 /* Try to merge ranges. */
2157 for (first
= i
; i
< length
; i
++)
2159 tree low
= ranges
[i
].low
;
2160 tree high
= ranges
[i
].high
;
2161 int in_p
= ranges
[i
].in_p
;
2162 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
2163 int update_fail_count
= 0;
2165 for (j
= i
+ 1; j
< length
; j
++)
2167 if (ranges
[i
].exp
!= ranges
[j
].exp
)
2169 if (!merge_ranges (&in_p
, &low
, &high
, in_p
, low
, high
,
2170 ranges
[j
].in_p
, ranges
[j
].low
, ranges
[j
].high
))
2172 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
2178 if (update_range_test (ranges
+ i
, ranges
+ i
+ 1, j
- i
- 1, opcode
,
2179 ops
, ranges
[i
].exp
, in_p
, low
, high
,
2185 /* Avoid quadratic complexity if all merge_ranges calls would succeed,
2186 while update_range_test would fail. */
2187 else if (update_fail_count
== 64)
2190 ++update_fail_count
;
2193 /* Optimize X == CST1 || X == CST2
2194 if popcount (CST1 ^ CST2) == 1 into
2195 (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)).
2196 Similarly for ranges. E.g.
2197 X != 2 && X != 3 && X != 10 && X != 11
2198 will be transformed by the above loop into
2199 (X - 2U) <= 1U && (X - 10U) <= 1U
2200 and this loop can transform that into
2201 ((X & ~8) - 2U) <= 1U. */
2202 for (i
= first
; i
< length
; i
++)
2204 tree lowi
, highi
, lowj
, highj
, type
, lowxor
, highxor
, tem
, exp
;
2206 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2208 type
= TREE_TYPE (ranges
[i
].exp
);
2209 if (!INTEGRAL_TYPE_P (type
))
2211 lowi
= ranges
[i
].low
;
2212 if (lowi
== NULL_TREE
)
2213 lowi
= TYPE_MIN_VALUE (type
);
2214 highi
= ranges
[i
].high
;
2215 if (highi
== NULL_TREE
)
2217 for (j
= i
+ 1; j
< length
&& j
< i
+ 64; j
++)
2219 if (ranges
[j
].exp
== NULL_TREE
)
2221 if (ranges
[i
].exp
!= ranges
[j
].exp
)
2225 lowj
= ranges
[j
].low
;
2226 if (lowj
== NULL_TREE
)
2228 highj
= ranges
[j
].high
;
2229 if (highj
== NULL_TREE
)
2230 highj
= TYPE_MAX_VALUE (type
);
2231 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2233 if (tem
== NULL_TREE
|| !integer_onep (tem
))
2235 lowxor
= fold_binary (BIT_XOR_EXPR
, type
, lowi
, lowj
);
2236 if (lowxor
== NULL_TREE
|| TREE_CODE (lowxor
) != INTEGER_CST
)
2238 gcc_checking_assert (!integer_zerop (lowxor
));
2239 tem
= fold_binary (MINUS_EXPR
, type
, lowxor
,
2240 build_int_cst (type
, 1));
2241 if (tem
== NULL_TREE
)
2243 tem
= fold_binary (BIT_AND_EXPR
, type
, lowxor
, tem
);
2244 if (tem
== NULL_TREE
|| !integer_zerop (tem
))
2246 highxor
= fold_binary (BIT_XOR_EXPR
, type
, highi
, highj
);
2247 if (!tree_int_cst_equal (lowxor
, highxor
))
2249 tem
= fold_build1 (BIT_NOT_EXPR
, type
, lowxor
);
2250 exp
= fold_build2 (BIT_AND_EXPR
, type
, ranges
[i
].exp
, tem
);
2251 lowj
= fold_build2 (BIT_AND_EXPR
, type
, lowi
, tem
);
2252 highj
= fold_build2 (BIT_AND_EXPR
, type
, highi
, tem
);
2253 if (update_range_test (ranges
+ i
, ranges
+ j
, 1, opcode
, ops
, exp
,
2254 ranges
[i
].in_p
, lowj
, highj
,
2255 ranges
[i
].strict_overflow_p
2256 || ranges
[j
].strict_overflow_p
))
2264 if (any_changes
&& opcode
!= ERROR_MARK
)
2267 FOR_EACH_VEC_ELT (operand_entry_t
, *ops
, i
, oe
)
2269 if (oe
->op
== error_mark_node
)
2272 VEC_replace (operand_entry_t
, *ops
, j
, oe
);
2275 VEC_truncate (operand_entry_t
, *ops
, j
);
2278 XDELETEVEC (ranges
);
2281 /* Return true if STMT is a cast like:
2287 # _345 = PHI <_123(N), 1(...), 1(...)>
2288 where _234 has bool type, _123 has single use and
2289 bb N has a single successor M. This is commonly used in
2290 the last block of a range test. */
2293 final_range_test_p (gimple stmt
)
2295 basic_block bb
, rhs_bb
;
2298 use_operand_p use_p
;
2301 if (!gimple_assign_cast_p (stmt
))
2303 bb
= gimple_bb (stmt
);
2304 if (!single_succ_p (bb
))
2306 e
= single_succ_edge (bb
);
2307 if (e
->flags
& EDGE_COMPLEX
)
2310 lhs
= gimple_assign_lhs (stmt
);
2311 rhs
= gimple_assign_rhs1 (stmt
);
2312 if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2313 || TREE_CODE (rhs
) != SSA_NAME
2314 || TREE_CODE (TREE_TYPE (rhs
)) != BOOLEAN_TYPE
)
2317 /* Test whether lhs is consumed only by a PHI in the only successor bb. */
2318 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
2321 if (gimple_code (use_stmt
) != GIMPLE_PHI
2322 || gimple_bb (use_stmt
) != e
->dest
)
2325 /* And that the rhs is defined in the same loop. */
2326 rhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (rhs
));
2328 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), rhs_bb
))
2334 /* Return true if BB is suitable basic block for inter-bb range test
2335 optimization. If BACKWARD is true, BB should be the only predecessor
2336 of TEST_BB, and *OTHER_BB is either NULL and filled by the routine,
2337 or compared with to find a common basic block to which all conditions
2338 branch to if true resp. false. If BACKWARD is false, TEST_BB should
2339 be the only predecessor of BB. */
2342 suitable_cond_bb (basic_block bb
, basic_block test_bb
, basic_block
*other_bb
,
2345 edge_iterator ei
, ei2
;
2348 gimple_stmt_iterator gsi
;
2349 bool other_edge_seen
= false;
2354 /* Check last stmt first. */
2355 stmt
= last_stmt (bb
);
2357 || (gimple_code (stmt
) != GIMPLE_COND
2358 && (backward
|| !final_range_test_p (stmt
)))
2359 || gimple_visited_p (stmt
)
2360 || stmt_could_throw_p (stmt
)
2363 is_cond
= gimple_code (stmt
) == GIMPLE_COND
;
2366 /* If last stmt is GIMPLE_COND, verify that one of the succ edges
2367 goes to the next bb (if BACKWARD, it is TEST_BB), and the other
2368 to *OTHER_BB (if not set yet, try to find it out). */
2369 if (EDGE_COUNT (bb
->succs
) != 2)
2371 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2373 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2375 if (e
->dest
== test_bb
)
2384 if (*other_bb
== NULL
)
2386 FOR_EACH_EDGE (e2
, ei2
, test_bb
->succs
)
2387 if (!(e2
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2389 else if (e
->dest
== e2
->dest
)
2390 *other_bb
= e
->dest
;
2391 if (*other_bb
== NULL
)
2394 if (e
->dest
== *other_bb
)
2395 other_edge_seen
= true;
2399 if (*other_bb
== NULL
|| !other_edge_seen
)
2402 else if (single_succ (bb
) != *other_bb
)
2405 /* Now check all PHIs of *OTHER_BB. */
2406 e
= find_edge (bb
, *other_bb
);
2407 e2
= find_edge (test_bb
, *other_bb
);
2408 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2410 gimple phi
= gsi_stmt (gsi
);
2411 /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments
2412 corresponding to BB and TEST_BB predecessor must be the same. */
2413 if (!operand_equal_p (gimple_phi_arg_def (phi
, e
->dest_idx
),
2414 gimple_phi_arg_def (phi
, e2
->dest_idx
), 0))
2416 /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND,
2417 one of the PHIs should have the lhs of the last stmt in
2418 that block as PHI arg and that PHI should have 0 or 1
2419 corresponding to it in all other range test basic blocks
2423 if (gimple_phi_arg_def (phi
, e
->dest_idx
)
2424 == gimple_assign_lhs (stmt
)
2425 && (integer_zerop (gimple_phi_arg_def (phi
, e2
->dest_idx
))
2426 || integer_onep (gimple_phi_arg_def (phi
,
2432 gimple test_last
= last_stmt (test_bb
);
2433 if (gimple_code (test_last
) != GIMPLE_COND
2434 && gimple_phi_arg_def (phi
, e2
->dest_idx
)
2435 == gimple_assign_lhs (test_last
)
2436 && (integer_zerop (gimple_phi_arg_def (phi
, e
->dest_idx
))
2437 || integer_onep (gimple_phi_arg_def (phi
, e
->dest_idx
))))
2447 /* Return true if BB doesn't have side-effects that would disallow
2448 range test optimization, all SSA_NAMEs set in the bb are consumed
2449 in the bb and there are no PHIs. */
2452 no_side_effect_bb (basic_block bb
)
2454 gimple_stmt_iterator gsi
;
2457 if (!gimple_seq_empty_p (phi_nodes (bb
)))
2459 last
= last_stmt (bb
);
2460 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2462 gimple stmt
= gsi_stmt (gsi
);
2464 imm_use_iterator imm_iter
;
2465 use_operand_p use_p
;
2467 if (is_gimple_debug (stmt
))
2469 if (gimple_has_side_effects (stmt
))
2473 if (!is_gimple_assign (stmt
))
2475 lhs
= gimple_assign_lhs (stmt
);
2476 if (TREE_CODE (lhs
) != SSA_NAME
)
2478 if (gimple_assign_rhs_could_trap_p (stmt
))
2480 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2482 gimple use_stmt
= USE_STMT (use_p
);
2483 if (is_gimple_debug (use_stmt
))
2485 if (gimple_bb (use_stmt
) != bb
)
2492 /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable,
2493 return true and fill in *OPS recursively. */
2496 get_ops (tree var
, enum tree_code code
, VEC(operand_entry_t
, heap
) **ops
,
2499 gimple stmt
= SSA_NAME_DEF_STMT (var
);
2503 if (!is_reassociable_op (stmt
, code
, loop
))
2506 rhs
[0] = gimple_assign_rhs1 (stmt
);
2507 rhs
[1] = gimple_assign_rhs2 (stmt
);
2508 gimple_set_visited (stmt
, true);
2509 for (i
= 0; i
< 2; i
++)
2510 if (TREE_CODE (rhs
[i
]) == SSA_NAME
2511 && !get_ops (rhs
[i
], code
, ops
, loop
)
2512 && has_single_use (rhs
[i
]))
2514 operand_entry_t oe
= (operand_entry_t
) pool_alloc (operand_entry_pool
);
2520 VEC_safe_push (operand_entry_t
, heap
, *ops
, oe
);
2525 /* Inter-bb range test optimization. */
2528 maybe_optimize_range_tests (gimple stmt
)
2530 basic_block first_bb
= gimple_bb (stmt
);
2531 basic_block last_bb
= first_bb
;
2532 basic_block other_bb
= NULL
;
2536 VEC(operand_entry_t
, heap
) *ops
= NULL
;
2538 /* Consider only basic blocks that end with GIMPLE_COND or
2539 a cast statement satisfying final_range_test_p. All
2540 but the last bb in the first_bb .. last_bb range
2541 should end with GIMPLE_COND. */
2542 if (gimple_code (stmt
) == GIMPLE_COND
)
2544 if (EDGE_COUNT (first_bb
->succs
) != 2)
2547 else if (final_range_test_p (stmt
))
2548 other_bb
= single_succ (first_bb
);
2552 if (stmt_could_throw_p (stmt
))
2555 /* As relative ordering of post-dominator sons isn't fixed,
2556 maybe_optimize_range_tests can be called first on any
2557 bb in the range we want to optimize. So, start searching
2558 backwards, if first_bb can be set to a predecessor. */
2559 while (single_pred_p (first_bb
))
2561 basic_block pred_bb
= single_pred (first_bb
);
2562 if (!suitable_cond_bb (pred_bb
, first_bb
, &other_bb
, true))
2564 if (!no_side_effect_bb (first_bb
))
2568 /* If first_bb is last_bb, other_bb hasn't been computed yet.
2569 Before starting forward search in last_bb successors, find
2570 out the other_bb. */
2571 if (first_bb
== last_bb
)
2574 /* As non-GIMPLE_COND last stmt always terminates the range,
2575 if forward search didn't discover anything, just give up. */
2576 if (gimple_code (stmt
) != GIMPLE_COND
)
2578 /* Look at both successors. Either it ends with a GIMPLE_COND
2579 and satisfies suitable_cond_bb, or ends with a cast and
2580 other_bb is that cast's successor. */
2581 FOR_EACH_EDGE (e
, ei
, first_bb
->succs
)
2582 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
))
2583 || e
->dest
== first_bb
)
2585 else if (single_pred_p (e
->dest
))
2587 stmt
= last_stmt (e
->dest
);
2589 && gimple_code (stmt
) == GIMPLE_COND
2590 && EDGE_COUNT (e
->dest
->succs
) == 2)
2592 if (suitable_cond_bb (first_bb
, e
->dest
, &other_bb
, true))
2598 && final_range_test_p (stmt
)
2599 && find_edge (first_bb
, single_succ (e
->dest
)))
2601 other_bb
= single_succ (e
->dest
);
2602 if (other_bb
== first_bb
)
2606 if (other_bb
== NULL
)
2609 /* Now do the forward search, moving last_bb to successor bbs
2610 that aren't other_bb. */
2611 while (EDGE_COUNT (last_bb
->succs
) == 2)
2613 FOR_EACH_EDGE (e
, ei
, last_bb
->succs
)
2614 if (e
->dest
!= other_bb
)
2618 if (!single_pred_p (e
->dest
))
2620 if (!suitable_cond_bb (e
->dest
, last_bb
, &other_bb
, false))
2622 if (!no_side_effect_bb (e
->dest
))
2626 if (first_bb
== last_bb
)
2628 /* Here basic blocks first_bb through last_bb's predecessor
2629 end with GIMPLE_COND, all of them have one of the edges to
2630 other_bb and another to another block in the range,
2631 all blocks except first_bb don't have side-effects and
2632 last_bb ends with either GIMPLE_COND, or cast satisfying
2633 final_range_test_p. */
2634 for (bb
= last_bb
; ; bb
= single_pred (bb
))
2636 enum tree_code code
;
2639 e
= find_edge (bb
, other_bb
);
2640 stmt
= last_stmt (bb
);
2641 gimple_set_visited (stmt
, true);
2642 if (gimple_code (stmt
) != GIMPLE_COND
)
2644 use_operand_p use_p
;
2649 lhs
= gimple_assign_lhs (stmt
);
2650 rhs
= gimple_assign_rhs1 (stmt
);
2651 gcc_assert (bb
== last_bb
);
2658 # _345 = PHI <_123(N), 1(...), 1(...)>
2660 or 0 instead of 1. If it is 0, the _234
2661 range test is anded together with all the
2662 other range tests, if it is 1, it is ored with
2664 single_imm_use (lhs
, &use_p
, &phi
);
2665 gcc_assert (gimple_code (phi
) == GIMPLE_PHI
);
2666 e2
= find_edge (first_bb
, other_bb
);
2668 gcc_assert (gimple_phi_arg_def (phi
, e
->dest_idx
) == lhs
);
2669 if (integer_zerop (gimple_phi_arg_def (phi
, d
)))
2670 code
= BIT_AND_EXPR
;
2673 gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi
, d
)));
2674 code
= BIT_IOR_EXPR
;
2677 /* If _234 SSA_NAME_DEF_STMT is
2679 (or &, corresponding to 1/0 in the phi arguments,
2680 push into ops the individual range test arguments
2681 of the bitwise or resp. and, recursively. */
2682 if (!get_ops (rhs
, code
, &ops
,
2683 loop_containing_stmt (stmt
))
2684 && has_single_use (rhs
))
2686 /* Otherwise, push the _234 range test itself. */
2688 = (operand_entry_t
) pool_alloc (operand_entry_pool
);
2694 VEC_safe_push (operand_entry_t
, heap
, ops
, oe
);
2698 /* Otherwise stmt is GIMPLE_COND. */
2699 code
= gimple_cond_code (stmt
);
2700 lhs
= gimple_cond_lhs (stmt
);
2701 rhs
= gimple_cond_rhs (stmt
);
2702 if (TREE_CODE (lhs
) == SSA_NAME
2703 && INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2704 && ((code
!= EQ_EXPR
&& code
!= NE_EXPR
)
2705 || rhs
!= boolean_false_node
2706 /* Either push into ops the individual bitwise
2707 or resp. and operands, depending on which
2708 edge is other_bb. */
2709 || !get_ops (lhs
, (((e
->flags
& EDGE_TRUE_VALUE
) == 0)
2710 ^ (code
== EQ_EXPR
))
2711 ? BIT_AND_EXPR
: BIT_IOR_EXPR
, &ops
,
2712 loop_containing_stmt (stmt
))))
2714 /* Or push the GIMPLE_COND stmt itself. */
2716 = (operand_entry_t
) pool_alloc (operand_entry_pool
);
2719 oe
->rank
= (e
->flags
& EDGE_TRUE_VALUE
)
2720 ? BIT_IOR_EXPR
: BIT_AND_EXPR
;
2721 /* oe->op = NULL signs that there is no SSA_NAME
2722 for the range test, and oe->id instead is the
2723 basic block number, at which's end the GIMPLE_COND
2727 VEC_safe_push (operand_entry_t
, heap
, ops
, oe
);
2732 if (VEC_length (operand_entry_t
, ops
) > 1)
2733 optimize_range_tests (ERROR_MARK
, &ops
);
2734 VEC_free (operand_entry_t
, heap
, ops
);
2737 /* Return true if OPERAND is defined by a PHI node which uses the LHS
2738 of STMT in it's operands. This is also known as a "destructive
2739 update" operation. */
2742 is_phi_for_stmt (gimple stmt
, tree operand
)
2746 use_operand_p arg_p
;
2749 if (TREE_CODE (operand
) != SSA_NAME
)
2752 lhs
= gimple_assign_lhs (stmt
);
2754 def_stmt
= SSA_NAME_DEF_STMT (operand
);
2755 if (gimple_code (def_stmt
) != GIMPLE_PHI
)
2758 FOR_EACH_PHI_ARG (arg_p
, def_stmt
, i
, SSA_OP_USE
)
2759 if (lhs
== USE_FROM_PTR (arg_p
))
2764 /* Remove def stmt of VAR if VAR has zero uses and recurse
2765 on rhs1 operand if so. */
2768 remove_visited_stmt_chain (tree var
)
2771 gimple_stmt_iterator gsi
;
2775 if (TREE_CODE (var
) != SSA_NAME
|| !has_zero_uses (var
))
2777 stmt
= SSA_NAME_DEF_STMT (var
);
2778 if (is_gimple_assign (stmt
) && gimple_visited_p (stmt
))
2780 var
= gimple_assign_rhs1 (stmt
);
2781 gsi
= gsi_for_stmt (stmt
);
2782 gsi_remove (&gsi
, true);
2783 release_defs (stmt
);
2790 /* This function checks three consequtive operands in
2791 passed operands vector OPS starting from OPINDEX and
2792 swaps two operands if it is profitable for binary operation
2793 consuming OPINDEX + 1 abnd OPINDEX + 2 operands.
2795 We pair ops with the same rank if possible.
2797 The alternative we try is to see if STMT is a destructive
2798 update style statement, which is like:
2801 In that case, we want to use the destructive update form to
2802 expose the possible vectorizer sum reduction opportunity.
2803 In that case, the third operand will be the phi node. This
2804 check is not performed if STMT is null.
2806 We could, of course, try to be better as noted above, and do a
2807 lot of work to try to find these opportunities in >3 operand
2808 cases, but it is unlikely to be worth it. */
2811 swap_ops_for_binary_stmt (VEC(operand_entry_t
, heap
) * ops
,
2812 unsigned int opindex
, gimple stmt
)
2814 operand_entry_t oe1
, oe2
, oe3
;
2816 oe1
= VEC_index (operand_entry_t
, ops
, opindex
);
2817 oe2
= VEC_index (operand_entry_t
, ops
, opindex
+ 1);
2818 oe3
= VEC_index (operand_entry_t
, ops
, opindex
+ 2);
2820 if ((oe1
->rank
== oe2
->rank
2821 && oe2
->rank
!= oe3
->rank
)
2822 || (stmt
&& is_phi_for_stmt (stmt
, oe3
->op
)
2823 && !is_phi_for_stmt (stmt
, oe1
->op
)
2824 && !is_phi_for_stmt (stmt
, oe2
->op
)))
2826 struct operand_entry temp
= *oe3
;
2828 oe3
->rank
= oe1
->rank
;
2830 oe1
->rank
= temp
.rank
;
2832 else if ((oe1
->rank
== oe3
->rank
2833 && oe2
->rank
!= oe3
->rank
)
2834 || (stmt
&& is_phi_for_stmt (stmt
, oe2
->op
)
2835 && !is_phi_for_stmt (stmt
, oe1
->op
)
2836 && !is_phi_for_stmt (stmt
, oe3
->op
)))
2838 struct operand_entry temp
= *oe2
;
2840 oe2
->rank
= oe1
->rank
;
2842 oe1
->rank
= temp
.rank
;
2846 /* Recursively rewrite our linearized statements so that the operators
2847 match those in OPS[OPINDEX], putting the computation in rank
2851 rewrite_expr_tree (gimple stmt
, unsigned int opindex
,
2852 VEC(operand_entry_t
, heap
) * ops
, bool moved
)
2854 tree rhs1
= gimple_assign_rhs1 (stmt
);
2855 tree rhs2
= gimple_assign_rhs2 (stmt
);
2858 /* If we have three operands left, then we want to make sure the ones
2859 that get the double binary op are chosen wisely. */
2860 if (opindex
+ 3 == VEC_length (operand_entry_t
, ops
))
2861 swap_ops_for_binary_stmt (ops
, opindex
, stmt
);
2863 /* The final recursion case for this function is that you have
2864 exactly two operations left.
2865 If we had one exactly one op in the entire list to start with, we
2866 would have never called this function, and the tail recursion
2867 rewrites them one at a time. */
2868 if (opindex
+ 2 == VEC_length (operand_entry_t
, ops
))
2870 operand_entry_t oe1
, oe2
;
2872 oe1
= VEC_index (operand_entry_t
, ops
, opindex
);
2873 oe2
= VEC_index (operand_entry_t
, ops
, opindex
+ 1);
2875 if (rhs1
!= oe1
->op
|| rhs2
!= oe2
->op
)
2877 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2879 fprintf (dump_file
, "Transforming ");
2880 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2883 gimple_assign_set_rhs1 (stmt
, oe1
->op
);
2884 gimple_assign_set_rhs2 (stmt
, oe2
->op
);
2886 if (rhs1
!= oe1
->op
&& rhs1
!= oe2
->op
)
2887 remove_visited_stmt_chain (rhs1
);
2889 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2891 fprintf (dump_file
, " into ");
2892 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2898 /* If we hit here, we should have 3 or more ops left. */
2899 gcc_assert (opindex
+ 2 < VEC_length (operand_entry_t
, ops
));
2901 /* Rewrite the next operator. */
2902 oe
= VEC_index (operand_entry_t
, ops
, opindex
);
2908 gimple_stmt_iterator gsinow
, gsirhs1
;
2909 gimple stmt1
= stmt
, stmt2
;
2912 gsinow
= gsi_for_stmt (stmt
);
2913 count
= VEC_length (operand_entry_t
, ops
) - opindex
- 2;
2914 while (count
-- != 0)
2916 stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt1
));
2917 gsirhs1
= gsi_for_stmt (stmt2
);
2918 gsi_move_before (&gsirhs1
, &gsinow
);
2925 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2927 fprintf (dump_file
, "Transforming ");
2928 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2931 gimple_assign_set_rhs2 (stmt
, oe
->op
);
2934 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2936 fprintf (dump_file
, " into ");
2937 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2940 /* Recurse on the LHS of the binary operator, which is guaranteed to
2941 be the non-leaf side. */
2942 rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1
), opindex
+ 1, ops
, moved
);
2945 /* Find out how many cycles we need to compute statements chain.
2946 OPS_NUM holds number os statements in a chain. CPU_WIDTH is a
2947 maximum number of independent statements we may execute per cycle. */
2950 get_required_cycles (int ops_num
, int cpu_width
)
2956 /* While we have more than 2 * cpu_width operands
2957 we may reduce number of operands by cpu_width
2959 res
= ops_num
/ (2 * cpu_width
);
2961 /* Remained operands count may be reduced twice per cycle
2962 until we have only one operand. */
2963 rest
= (unsigned)(ops_num
- res
* cpu_width
);
2964 elog
= exact_log2 (rest
);
2968 res
+= floor_log2 (rest
) + 1;
2973 /* Returns an optimal number of registers to use for computation of
2974 given statements. */
2977 get_reassociation_width (int ops_num
, enum tree_code opc
,
2978 enum machine_mode mode
)
2980 int param_width
= PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH
);
2985 if (param_width
> 0)
2986 width
= param_width
;
2988 width
= targetm
.sched
.reassociation_width (opc
, mode
);
2993 /* Get the minimal time required for sequence computation. */
2994 cycles_best
= get_required_cycles (ops_num
, width
);
2996 /* Check if we may use less width and still compute sequence for
2997 the same time. It will allow us to reduce registers usage.
2998 get_required_cycles is monotonically increasing with lower width
2999 so we can perform a binary search for the minimal width that still
3000 results in the optimal cycle count. */
3002 while (width
> width_min
)
3004 int width_mid
= (width
+ width_min
) / 2;
3006 if (get_required_cycles (ops_num
, width_mid
) == cycles_best
)
3008 else if (width_min
< width_mid
)
3009 width_min
= width_mid
;
3017 /* Recursively rewrite our linearized statements so that the operators
3018 match those in OPS[OPINDEX], putting the computation in rank
3019 order and trying to allow operations to be executed in
3023 rewrite_expr_tree_parallel (gimple stmt
, int width
,
3024 VEC(operand_entry_t
, heap
) * ops
)
3026 enum tree_code opcode
= gimple_assign_rhs_code (stmt
);
3027 int op_num
= VEC_length (operand_entry_t
, ops
);
3028 int stmt_num
= op_num
- 1;
3029 gimple
*stmts
= XALLOCAVEC (gimple
, stmt_num
);
3030 int op_index
= op_num
- 1;
3032 int ready_stmts_end
= 0;
3034 tree last_rhs1
= gimple_assign_rhs1 (stmt
);
3036 /* We start expression rewriting from the top statements.
3037 So, in this loop we create a full list of statements
3038 we will work with. */
3039 stmts
[stmt_num
- 1] = stmt
;
3040 for (i
= stmt_num
- 2; i
>= 0; i
--)
3041 stmts
[i
] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts
[i
+1]));
3043 for (i
= 0; i
< stmt_num
; i
++)
3047 /* Determine whether we should use results of
3048 already handled statements or not. */
3049 if (ready_stmts_end
== 0
3050 && (i
- stmt_index
>= width
|| op_index
< 1))
3051 ready_stmts_end
= i
;
3053 /* Now we choose operands for the next statement. Non zero
3054 value in ready_stmts_end means here that we should use
3055 the result of already generated statements as new operand. */
3056 if (ready_stmts_end
> 0)
3058 op1
= gimple_assign_lhs (stmts
[stmt_index
++]);
3059 if (ready_stmts_end
> stmt_index
)
3060 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
3061 else if (op_index
>= 0)
3062 op2
= VEC_index (operand_entry_t
, ops
, op_index
--)->op
;
3065 gcc_assert (stmt_index
< i
);
3066 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
3069 if (stmt_index
>= ready_stmts_end
)
3070 ready_stmts_end
= 0;
3075 swap_ops_for_binary_stmt (ops
, op_index
- 2, NULL
);
3076 op2
= VEC_index (operand_entry_t
, ops
, op_index
--)->op
;
3077 op1
= VEC_index (operand_entry_t
, ops
, op_index
--)->op
;
3080 /* If we emit the last statement then we should put
3081 operands into the last statement. It will also
3083 if (op_index
< 0 && stmt_index
== i
)
3086 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3088 fprintf (dump_file
, "Transforming ");
3089 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
3092 /* We keep original statement only for the last one. All
3093 others are recreated. */
3094 if (i
== stmt_num
- 1)
3096 gimple_assign_set_rhs1 (stmts
[i
], op1
);
3097 gimple_assign_set_rhs2 (stmts
[i
], op2
);
3098 update_stmt (stmts
[i
]);
3101 stmts
[i
] = build_and_add_sum (TREE_TYPE (last_rhs1
), op1
, op2
, opcode
);
3103 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3105 fprintf (dump_file
, " into ");
3106 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
3110 remove_visited_stmt_chain (last_rhs1
);
3113 /* Transform STMT, which is really (A +B) + (C + D) into the left
3114 linear form, ((A+B)+C)+D.
3115 Recurse on D if necessary. */
3118 linearize_expr (gimple stmt
)
3120 gimple_stmt_iterator gsinow
, gsirhs
;
3121 gimple binlhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3122 gimple binrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3123 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
3124 gimple newbinrhs
= NULL
;
3125 struct loop
*loop
= loop_containing_stmt (stmt
);
3127 gcc_assert (is_reassociable_op (binlhs
, rhscode
, loop
)
3128 && is_reassociable_op (binrhs
, rhscode
, loop
));
3130 gsinow
= gsi_for_stmt (stmt
);
3131 gsirhs
= gsi_for_stmt (binrhs
);
3132 gsi_move_before (&gsirhs
, &gsinow
);
3134 gimple_assign_set_rhs2 (stmt
, gimple_assign_rhs1 (binrhs
));
3135 gimple_assign_set_rhs1 (binrhs
, gimple_assign_lhs (binlhs
));
3136 gimple_assign_set_rhs1 (stmt
, gimple_assign_lhs (binrhs
));
3138 if (TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
)
3139 newbinrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3141 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3143 fprintf (dump_file
, "Linearized: ");
3144 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3147 reassociate_stats
.linearized
++;
3148 update_stmt (binrhs
);
3149 update_stmt (binlhs
);
3152 gimple_set_visited (stmt
, true);
3153 gimple_set_visited (binlhs
, true);
3154 gimple_set_visited (binrhs
, true);
3156 /* Tail recurse on the new rhs if it still needs reassociation. */
3157 if (newbinrhs
&& is_reassociable_op (newbinrhs
, rhscode
, loop
))
3158 /* ??? This should probably be linearize_expr (newbinrhs) but I don't
3159 want to change the algorithm while converting to tuples. */
3160 linearize_expr (stmt
);
3163 /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return
3164 it. Otherwise, return NULL. */
3167 get_single_immediate_use (tree lhs
)
3169 use_operand_p immuse
;
3172 if (TREE_CODE (lhs
) == SSA_NAME
3173 && single_imm_use (lhs
, &immuse
, &immusestmt
)
3174 && is_gimple_assign (immusestmt
))
3180 /* Recursively negate the value of TONEGATE, and return the SSA_NAME
3181 representing the negated value. Insertions of any necessary
3182 instructions go before GSI.
3183 This function is recursive in that, if you hand it "a_5" as the
3184 value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will
3185 transform b_3 + b_4 into a_5 = -b_3 + -b_4. */
3188 negate_value (tree tonegate
, gimple_stmt_iterator
*gsi
)
3190 gimple negatedefstmt
= NULL
;
3191 tree resultofnegate
;
3193 /* If we are trying to negate a name, defined by an add, negate the
3194 add operands instead. */
3195 if (TREE_CODE (tonegate
) == SSA_NAME
)
3196 negatedefstmt
= SSA_NAME_DEF_STMT (tonegate
);
3197 if (TREE_CODE (tonegate
) == SSA_NAME
3198 && is_gimple_assign (negatedefstmt
)
3199 && TREE_CODE (gimple_assign_lhs (negatedefstmt
)) == SSA_NAME
3200 && has_single_use (gimple_assign_lhs (negatedefstmt
))
3201 && gimple_assign_rhs_code (negatedefstmt
) == PLUS_EXPR
)
3203 gimple_stmt_iterator gsi
;
3204 tree rhs1
= gimple_assign_rhs1 (negatedefstmt
);
3205 tree rhs2
= gimple_assign_rhs2 (negatedefstmt
);
3207 gsi
= gsi_for_stmt (negatedefstmt
);
3208 rhs1
= negate_value (rhs1
, &gsi
);
3209 gimple_assign_set_rhs1 (negatedefstmt
, rhs1
);
3211 gsi
= gsi_for_stmt (negatedefstmt
);
3212 rhs2
= negate_value (rhs2
, &gsi
);
3213 gimple_assign_set_rhs2 (negatedefstmt
, rhs2
);
3215 update_stmt (negatedefstmt
);
3216 return gimple_assign_lhs (negatedefstmt
);
3219 tonegate
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (tonegate
), tonegate
);
3220 resultofnegate
= force_gimple_operand_gsi (gsi
, tonegate
, true,
3221 NULL_TREE
, true, GSI_SAME_STMT
);
3222 return resultofnegate
;
3225 /* Return true if we should break up the subtract in STMT into an add
3226 with negate. This is true when we the subtract operands are really
3227 adds, or the subtract itself is used in an add expression. In
3228 either case, breaking up the subtract into an add with negate
3229 exposes the adds to reassociation. */
3232 should_break_up_subtract (gimple stmt
)
3234 tree lhs
= gimple_assign_lhs (stmt
);
3235 tree binlhs
= gimple_assign_rhs1 (stmt
);
3236 tree binrhs
= gimple_assign_rhs2 (stmt
);
3238 struct loop
*loop
= loop_containing_stmt (stmt
);
3240 if (TREE_CODE (binlhs
) == SSA_NAME
3241 && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs
), PLUS_EXPR
, loop
))
3244 if (TREE_CODE (binrhs
) == SSA_NAME
3245 && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
), PLUS_EXPR
, loop
))
3248 if (TREE_CODE (lhs
) == SSA_NAME
3249 && (immusestmt
= get_single_immediate_use (lhs
))
3250 && is_gimple_assign (immusestmt
)
3251 && (gimple_assign_rhs_code (immusestmt
) == PLUS_EXPR
3252 || gimple_assign_rhs_code (immusestmt
) == MULT_EXPR
))
3257 /* Transform STMT from A - B into A + -B. */
3260 break_up_subtract (gimple stmt
, gimple_stmt_iterator
*gsip
)
3262 tree rhs1
= gimple_assign_rhs1 (stmt
);
3263 tree rhs2
= gimple_assign_rhs2 (stmt
);
3265 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3267 fprintf (dump_file
, "Breaking up subtract ");
3268 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3271 rhs2
= negate_value (rhs2
, gsip
);
3272 gimple_assign_set_rhs_with_ops (gsip
, PLUS_EXPR
, rhs1
, rhs2
);
3276 /* Determine whether STMT is a builtin call that raises an SSA name
3277 to an integer power and has only one use. If so, and this is early
3278 reassociation and unsafe math optimizations are permitted, place
3279 the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE.
3280 If any of these conditions does not hold, return FALSE. */
3283 acceptable_pow_call (gimple stmt
, tree
*base
, HOST_WIDE_INT
*exponent
)
3286 REAL_VALUE_TYPE c
, cint
;
3288 if (!first_pass_instance
3289 || !flag_unsafe_math_optimizations
3290 || !is_gimple_call (stmt
)
3291 || !has_single_use (gimple_call_lhs (stmt
)))
3294 fndecl
= gimple_call_fndecl (stmt
);
3297 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
3300 switch (DECL_FUNCTION_CODE (fndecl
))
3302 CASE_FLT_FN (BUILT_IN_POW
):
3303 *base
= gimple_call_arg (stmt
, 0);
3304 arg1
= gimple_call_arg (stmt
, 1);
3306 if (TREE_CODE (arg1
) != REAL_CST
)
3309 c
= TREE_REAL_CST (arg1
);
3311 if (REAL_EXP (&c
) > HOST_BITS_PER_WIDE_INT
)
3314 *exponent
= real_to_integer (&c
);
3315 real_from_integer (&cint
, VOIDmode
, *exponent
,
3316 *exponent
< 0 ? -1 : 0, 0);
3317 if (!real_identical (&c
, &cint
))
3322 CASE_FLT_FN (BUILT_IN_POWI
):
3323 *base
= gimple_call_arg (stmt
, 0);
3324 arg1
= gimple_call_arg (stmt
, 1);
3326 if (!host_integerp (arg1
, 0))
3329 *exponent
= TREE_INT_CST_LOW (arg1
);
3336 /* Expanding negative exponents is generally unproductive, so we don't
3337 complicate matters with those. Exponents of zero and one should
3338 have been handled by expression folding. */
3339 if (*exponent
< 2 || TREE_CODE (*base
) != SSA_NAME
)
3345 /* Recursively linearize a binary expression that is the RHS of STMT.
3346 Place the operands of the expression tree in the vector named OPS. */
3349 linearize_expr_tree (VEC(operand_entry_t
, heap
) **ops
, gimple stmt
,
3350 bool is_associative
, bool set_visited
)
3352 tree binlhs
= gimple_assign_rhs1 (stmt
);
3353 tree binrhs
= gimple_assign_rhs2 (stmt
);
3354 gimple binlhsdef
= NULL
, binrhsdef
= NULL
;
3355 bool binlhsisreassoc
= false;
3356 bool binrhsisreassoc
= false;
3357 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
3358 struct loop
*loop
= loop_containing_stmt (stmt
);
3359 tree base
= NULL_TREE
;
3360 HOST_WIDE_INT exponent
= 0;
3363 gimple_set_visited (stmt
, true);
3365 if (TREE_CODE (binlhs
) == SSA_NAME
)
3367 binlhsdef
= SSA_NAME_DEF_STMT (binlhs
);
3368 binlhsisreassoc
= (is_reassociable_op (binlhsdef
, rhscode
, loop
)
3369 && !stmt_could_throw_p (binlhsdef
));
3372 if (TREE_CODE (binrhs
) == SSA_NAME
)
3374 binrhsdef
= SSA_NAME_DEF_STMT (binrhs
);
3375 binrhsisreassoc
= (is_reassociable_op (binrhsdef
, rhscode
, loop
)
3376 && !stmt_could_throw_p (binrhsdef
));
3379 /* If the LHS is not reassociable, but the RHS is, we need to swap
3380 them. If neither is reassociable, there is nothing we can do, so
3381 just put them in the ops vector. If the LHS is reassociable,
3382 linearize it. If both are reassociable, then linearize the RHS
3385 if (!binlhsisreassoc
)
3389 /* If this is not a associative operation like division, give up. */
3390 if (!is_associative
)
3392 add_to_ops_vec (ops
, binrhs
);
3396 if (!binrhsisreassoc
)
3398 if (rhscode
== MULT_EXPR
3399 && TREE_CODE (binrhs
) == SSA_NAME
3400 && acceptable_pow_call (binrhsdef
, &base
, &exponent
))
3402 add_repeat_to_ops_vec (ops
, base
, exponent
);
3403 gimple_set_visited (binrhsdef
, true);
3406 add_to_ops_vec (ops
, binrhs
);
3408 if (rhscode
== MULT_EXPR
3409 && TREE_CODE (binlhs
) == SSA_NAME
3410 && acceptable_pow_call (binlhsdef
, &base
, &exponent
))
3412 add_repeat_to_ops_vec (ops
, base
, exponent
);
3413 gimple_set_visited (binlhsdef
, true);
3416 add_to_ops_vec (ops
, binlhs
);
3421 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3423 fprintf (dump_file
, "swapping operands of ");
3424 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3427 swap_tree_operands (stmt
,
3428 gimple_assign_rhs1_ptr (stmt
),
3429 gimple_assign_rhs2_ptr (stmt
));
3432 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3434 fprintf (dump_file
, " is now ");
3435 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3438 /* We want to make it so the lhs is always the reassociative op,
3444 else if (binrhsisreassoc
)
3446 linearize_expr (stmt
);
3447 binlhs
= gimple_assign_rhs1 (stmt
);
3448 binrhs
= gimple_assign_rhs2 (stmt
);
3451 gcc_assert (TREE_CODE (binrhs
) != SSA_NAME
3452 || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
),
3454 linearize_expr_tree (ops
, SSA_NAME_DEF_STMT (binlhs
),
3455 is_associative
, set_visited
);
3457 if (rhscode
== MULT_EXPR
3458 && TREE_CODE (binrhs
) == SSA_NAME
3459 && acceptable_pow_call (SSA_NAME_DEF_STMT (binrhs
), &base
, &exponent
))
3461 add_repeat_to_ops_vec (ops
, base
, exponent
);
3462 gimple_set_visited (SSA_NAME_DEF_STMT (binrhs
), true);
3465 add_to_ops_vec (ops
, binrhs
);
3468 /* Repropagate the negates back into subtracts, since no other pass
3469 currently does it. */
3472 repropagate_negates (void)
3477 FOR_EACH_VEC_ELT (tree
, plus_negates
, i
, negate
)
3479 gimple user
= get_single_immediate_use (negate
);
3481 if (!user
|| !is_gimple_assign (user
))
3484 /* The negate operand can be either operand of a PLUS_EXPR
3485 (it can be the LHS if the RHS is a constant for example).
3487 Force the negate operand to the RHS of the PLUS_EXPR, then
3488 transform the PLUS_EXPR into a MINUS_EXPR. */
3489 if (gimple_assign_rhs_code (user
) == PLUS_EXPR
)
3491 /* If the negated operand appears on the LHS of the
3492 PLUS_EXPR, exchange the operands of the PLUS_EXPR
3493 to force the negated operand to the RHS of the PLUS_EXPR. */
3494 if (gimple_assign_rhs1 (user
) == negate
)
3496 swap_tree_operands (user
,
3497 gimple_assign_rhs1_ptr (user
),
3498 gimple_assign_rhs2_ptr (user
));
3501 /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace
3502 the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */
3503 if (gimple_assign_rhs2 (user
) == negate
)
3505 tree rhs1
= gimple_assign_rhs1 (user
);
3506 tree rhs2
= get_unary_op (negate
, NEGATE_EXPR
);
3507 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
3508 gimple_assign_set_rhs_with_ops (&gsi
, MINUS_EXPR
, rhs1
, rhs2
);
3512 else if (gimple_assign_rhs_code (user
) == MINUS_EXPR
)
3514 if (gimple_assign_rhs1 (user
) == negate
)
3519 which we transform into
3522 This pushes down the negate which we possibly can merge
3523 into some other operation, hence insert it into the
3524 plus_negates vector. */
3525 gimple feed
= SSA_NAME_DEF_STMT (negate
);
3526 tree a
= gimple_assign_rhs1 (feed
);
3527 tree rhs2
= gimple_assign_rhs2 (user
);
3528 gimple_stmt_iterator gsi
= gsi_for_stmt (feed
), gsi2
;
3529 gimple_replace_lhs (feed
, negate
);
3530 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, a
, rhs2
);
3531 update_stmt (gsi_stmt (gsi
));
3532 gsi2
= gsi_for_stmt (user
);
3533 gimple_assign_set_rhs_with_ops (&gsi2
, NEGATE_EXPR
, negate
, NULL
);
3534 update_stmt (gsi_stmt (gsi2
));
3535 gsi_move_before (&gsi
, &gsi2
);
3536 VEC_safe_push (tree
, heap
, plus_negates
,
3537 gimple_assign_lhs (gsi_stmt (gsi2
)));
3541 /* Transform "x = -a; y = b - x" into "y = b + a", getting
3542 rid of one operation. */
3543 gimple feed
= SSA_NAME_DEF_STMT (negate
);
3544 tree a
= gimple_assign_rhs1 (feed
);
3545 tree rhs1
= gimple_assign_rhs1 (user
);
3546 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
3547 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, rhs1
, a
);
3548 update_stmt (gsi_stmt (gsi
));
3554 /* Returns true if OP is of a type for which we can do reassociation.
3555 That is for integral or non-saturating fixed-point types, and for
3556 floating point type when associative-math is enabled. */
3559 can_reassociate_p (tree op
)
3561 tree type
= TREE_TYPE (op
);
3562 if ((INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_WRAPS (type
))
3563 || NON_SAT_FIXED_POINT_TYPE_P (type
)
3564 || (flag_associative_math
&& FLOAT_TYPE_P (type
)))
3569 /* Break up subtract operations in block BB.
3571 We do this top down because we don't know whether the subtract is
3572 part of a possible chain of reassociation except at the top.
3581 we want to break up k = t - q, but we won't until we've transformed q
3582 = b - r, which won't be broken up until we transform b = c - d.
3584 En passant, clear the GIMPLE visited flag on every statement. */
3587 break_up_subtract_bb (basic_block bb
)
3589 gimple_stmt_iterator gsi
;
3592 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3594 gimple stmt
= gsi_stmt (gsi
);
3595 gimple_set_visited (stmt
, false);
3597 if (!is_gimple_assign (stmt
)
3598 || !can_reassociate_p (gimple_assign_lhs (stmt
)))
3601 /* Look for simple gimple subtract operations. */
3602 if (gimple_assign_rhs_code (stmt
) == MINUS_EXPR
)
3604 if (!can_reassociate_p (gimple_assign_rhs1 (stmt
))
3605 || !can_reassociate_p (gimple_assign_rhs2 (stmt
)))
3608 /* Check for a subtract used only in an addition. If this
3609 is the case, transform it into add of a negate for better
3610 reassociation. IE transform C = A-B into C = A + -B if C
3611 is only used in an addition. */
3612 if (should_break_up_subtract (stmt
))
3613 break_up_subtract (stmt
, &gsi
);
3615 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
3616 && can_reassociate_p (gimple_assign_rhs1 (stmt
)))
3617 VEC_safe_push (tree
, heap
, plus_negates
, gimple_assign_lhs (stmt
));
3619 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
3621 son
= next_dom_son (CDI_DOMINATORS
, son
))
3622 break_up_subtract_bb (son
);
3625 /* Used for repeated factor analysis. */
3626 struct repeat_factor_d
3628 /* An SSA name that occurs in a multiply chain. */
3631 /* Cached rank of the factor. */
3634 /* Number of occurrences of the factor in the chain. */
3635 HOST_WIDE_INT count
;
3637 /* An SSA name representing the product of this factor and
3638 all factors appearing later in the repeated factor vector. */
3642 typedef struct repeat_factor_d repeat_factor
, *repeat_factor_t
;
3643 typedef const struct repeat_factor_d
*const_repeat_factor_t
;
3645 DEF_VEC_O (repeat_factor
);
3646 DEF_VEC_ALLOC_O (repeat_factor
, heap
);
3648 static VEC (repeat_factor
, heap
) *repeat_factor_vec
;
3650 /* Used for sorting the repeat factor vector. Sort primarily by
3651 ascending occurrence count, secondarily by descending rank. */
3654 compare_repeat_factors (const void *x1
, const void *x2
)
3656 const_repeat_factor_t rf1
= (const_repeat_factor_t
) x1
;
3657 const_repeat_factor_t rf2
= (const_repeat_factor_t
) x2
;
3659 if (rf1
->count
!= rf2
->count
)
3660 return rf1
->count
- rf2
->count
;
3662 return rf2
->rank
- rf1
->rank
;
3665 /* Look for repeated operands in OPS in the multiply tree rooted at
3666 STMT. Replace them with an optimal sequence of multiplies and powi
3667 builtin calls, and remove the used operands from OPS. Return an
3668 SSA name representing the value of the replacement sequence. */
3671 attempt_builtin_powi (gimple stmt
, VEC(operand_entry_t
, heap
) **ops
)
3673 unsigned i
, j
, vec_len
;
3676 repeat_factor_t rf1
, rf2
;
3677 repeat_factor rfnew
;
3678 tree result
= NULL_TREE
;
3679 tree target_ssa
, iter_result
;
3680 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
3681 tree powi_fndecl
= mathfn_built_in (type
, BUILT_IN_POWI
);
3682 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3683 gimple mul_stmt
, pow_stmt
;
3685 /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and
3690 /* Allocate the repeated factor vector. */
3691 repeat_factor_vec
= VEC_alloc (repeat_factor
, heap
, 10);
3693 /* Scan the OPS vector for all SSA names in the product and build
3694 up a vector of occurrence counts for each factor. */
3695 FOR_EACH_VEC_ELT (operand_entry_t
, *ops
, i
, oe
)
3697 if (TREE_CODE (oe
->op
) == SSA_NAME
)
3699 FOR_EACH_VEC_ELT (repeat_factor
, repeat_factor_vec
, j
, rf1
)
3701 if (rf1
->factor
== oe
->op
)
3703 rf1
->count
+= oe
->count
;
3708 if (j
>= VEC_length (repeat_factor
, repeat_factor_vec
))
3710 rfnew
.factor
= oe
->op
;
3711 rfnew
.rank
= oe
->rank
;
3712 rfnew
.count
= oe
->count
;
3713 rfnew
.repr
= NULL_TREE
;
3714 VEC_safe_push (repeat_factor
, heap
, repeat_factor_vec
, rfnew
);
3719 /* Sort the repeated factor vector by (a) increasing occurrence count,
3720 and (b) decreasing rank. */
3721 VEC_qsort (repeat_factor
, repeat_factor_vec
, compare_repeat_factors
);
3723 /* It is generally best to combine as many base factors as possible
3724 into a product before applying __builtin_powi to the result.
3725 However, the sort order chosen for the repeated factor vector
3726 allows us to cache partial results for the product of the base
3727 factors for subsequent use. When we already have a cached partial
3728 result from a previous iteration, it is best to make use of it
3729 before looking for another __builtin_pow opportunity.
3731 As an example, consider x * x * y * y * y * z * z * z * z.
3732 We want to first compose the product x * y * z, raise it to the
3733 second power, then multiply this by y * z, and finally multiply
3734 by z. This can be done in 5 multiplies provided we cache y * z
3735 for use in both expressions:
3743 If we instead ignored the cached y * z and first multiplied by
3744 the __builtin_pow opportunity z * z, we would get the inferior:
3753 vec_len
= VEC_length (repeat_factor
, repeat_factor_vec
);
3755 /* Repeatedly look for opportunities to create a builtin_powi call. */
3758 HOST_WIDE_INT power
;
3760 /* First look for the largest cached product of factors from
3761 preceding iterations. If found, create a builtin_powi for
3762 it if the minimum occurrence count for its factors is at
3763 least 2, or just use this cached product as our next
3764 multiplicand if the minimum occurrence count is 1. */
3765 FOR_EACH_VEC_ELT (repeat_factor
, repeat_factor_vec
, j
, rf1
)
3767 if (rf1
->repr
&& rf1
->count
> 0)
3777 iter_result
= rf1
->repr
;
3779 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3783 fputs ("Multiplying by cached product ", dump_file
);
3784 for (elt
= j
; elt
< vec_len
; elt
++)
3786 rf
= &VEC_index (repeat_factor
, repeat_factor_vec
, elt
);
3787 print_generic_expr (dump_file
, rf
->factor
, 0);
3788 if (elt
< vec_len
- 1)
3789 fputs (" * ", dump_file
);
3791 fputs ("\n", dump_file
);
3796 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3797 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
3798 build_int_cst (integer_type_node
,
3800 gimple_call_set_lhs (pow_stmt
, iter_result
);
3801 gimple_set_location (pow_stmt
, gimple_location (stmt
));
3802 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
3804 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3808 fputs ("Building __builtin_pow call for cached product (",
3810 for (elt
= j
; elt
< vec_len
; elt
++)
3812 rf
= &VEC_index (repeat_factor
, repeat_factor_vec
, elt
);
3813 print_generic_expr (dump_file
, rf
->factor
, 0);
3814 if (elt
< vec_len
- 1)
3815 fputs (" * ", dump_file
);
3817 fprintf (dump_file
, ")^"HOST_WIDE_INT_PRINT_DEC
"\n",
3824 /* Otherwise, find the first factor in the repeated factor
3825 vector whose occurrence count is at least 2. If no such
3826 factor exists, there are no builtin_powi opportunities
3828 FOR_EACH_VEC_ELT (repeat_factor
, repeat_factor_vec
, j
, rf1
)
3830 if (rf1
->count
>= 2)
3839 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3843 fputs ("Building __builtin_pow call for (", dump_file
);
3844 for (elt
= j
; elt
< vec_len
; elt
++)
3846 rf
= &VEC_index (repeat_factor
, repeat_factor_vec
, elt
);
3847 print_generic_expr (dump_file
, rf
->factor
, 0);
3848 if (elt
< vec_len
- 1)
3849 fputs (" * ", dump_file
);
3851 fprintf (dump_file
, ")^"HOST_WIDE_INT_PRINT_DEC
"\n", power
);
3854 reassociate_stats
.pows_created
++;
3856 /* Visit each element of the vector in reverse order (so that
3857 high-occurrence elements are visited first, and within the
3858 same occurrence count, lower-ranked elements are visited
3859 first). Form a linear product of all elements in this order
3860 whose occurrencce count is at least that of element J.
3861 Record the SSA name representing the product of each element
3862 with all subsequent elements in the vector. */
3863 if (j
== vec_len
- 1)
3864 rf1
->repr
= rf1
->factor
;
3867 for (ii
= vec_len
- 2; ii
>= (int)j
; ii
--)
3871 rf1
= &VEC_index (repeat_factor
, repeat_factor_vec
, ii
);
3872 rf2
= &VEC_index (repeat_factor
, repeat_factor_vec
, ii
+ 1);
3874 /* Init the last factor's representative to be itself. */
3876 rf2
->repr
= rf2
->factor
;
3881 target_ssa
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3882 mul_stmt
= gimple_build_assign_with_ops (MULT_EXPR
,
3885 gimple_set_location (mul_stmt
, gimple_location (stmt
));
3886 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
3887 rf1
->repr
= target_ssa
;
3889 /* Don't reprocess the multiply we just introduced. */
3890 gimple_set_visited (mul_stmt
, true);
3894 /* Form a call to __builtin_powi for the maximum product
3895 just formed, raised to the power obtained earlier. */
3896 rf1
= &VEC_index (repeat_factor
, repeat_factor_vec
, j
);
3897 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3898 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
3899 build_int_cst (integer_type_node
,
3901 gimple_call_set_lhs (pow_stmt
, iter_result
);
3902 gimple_set_location (pow_stmt
, gimple_location (stmt
));
3903 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
3906 /* If we previously formed at least one other builtin_powi call,
3907 form the product of this one and those others. */
3910 tree new_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3911 mul_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, new_result
,
3912 result
, iter_result
);
3913 gimple_set_location (mul_stmt
, gimple_location (stmt
));
3914 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
3915 gimple_set_visited (mul_stmt
, true);
3916 result
= new_result
;
3919 result
= iter_result
;
3921 /* Decrement the occurrence count of each element in the product
3922 by the count found above, and remove this many copies of each
3924 for (i
= j
; i
< vec_len
; i
++)
3929 rf1
= &VEC_index (repeat_factor
, repeat_factor_vec
, i
);
3930 rf1
->count
-= power
;
3932 FOR_EACH_VEC_ELT_REVERSE (operand_entry_t
, *ops
, n
, oe
)
3934 if (oe
->op
== rf1
->factor
)
3938 VEC_ordered_remove (operand_entry_t
, *ops
, n
);
3954 /* At this point all elements in the repeated factor vector have a
3955 remaining occurrence count of 0 or 1, and those with a count of 1
3956 don't have cached representatives. Re-sort the ops vector and
3958 VEC_qsort (operand_entry_t
, *ops
, sort_by_operand_rank
);
3959 VEC_free (repeat_factor
, heap
, repeat_factor_vec
);
3961 /* Return the final product computed herein. Note that there may
3962 still be some elements with single occurrence count left in OPS;
3963 those will be handled by the normal reassociation logic. */
3967 /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */
3970 transform_stmt_to_copy (gimple_stmt_iterator
*gsi
, gimple stmt
, tree new_rhs
)
3974 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3976 fprintf (dump_file
, "Transforming ");
3977 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3980 rhs1
= gimple_assign_rhs1 (stmt
);
3981 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
3983 remove_visited_stmt_chain (rhs1
);
3985 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3987 fprintf (dump_file
, " into ");
3988 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3992 /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */
3995 transform_stmt_to_multiply (gimple_stmt_iterator
*gsi
, gimple stmt
,
3996 tree rhs1
, tree rhs2
)
3998 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4000 fprintf (dump_file
, "Transforming ");
4001 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4004 gimple_assign_set_rhs_with_ops (gsi
, MULT_EXPR
, rhs1
, rhs2
);
4005 update_stmt (gsi_stmt (*gsi
));
4006 remove_visited_stmt_chain (rhs1
);
4008 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4010 fprintf (dump_file
, " into ");
4011 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4015 /* Reassociate expressions in basic block BB and its post-dominator as
4019 reassociate_bb (basic_block bb
)
4021 gimple_stmt_iterator gsi
;
4023 gimple stmt
= last_stmt (bb
);
4025 if (stmt
&& !gimple_visited_p (stmt
))
4026 maybe_optimize_range_tests (stmt
);
4028 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
4030 stmt
= gsi_stmt (gsi
);
4032 if (is_gimple_assign (stmt
)
4033 && !stmt_could_throw_p (stmt
))
4035 tree lhs
, rhs1
, rhs2
;
4036 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
4038 /* If this is not a gimple binary expression, there is
4039 nothing for us to do with it. */
4040 if (get_gimple_rhs_class (rhs_code
) != GIMPLE_BINARY_RHS
)
4043 /* If this was part of an already processed statement,
4044 we don't need to touch it again. */
4045 if (gimple_visited_p (stmt
))
4047 /* This statement might have become dead because of previous
4049 if (has_zero_uses (gimple_get_lhs (stmt
)))
4051 gsi_remove (&gsi
, true);
4052 release_defs (stmt
);
4053 /* We might end up removing the last stmt above which
4054 places the iterator to the end of the sequence.
4055 Reset it to the last stmt in this case which might
4056 be the end of the sequence as well if we removed
4057 the last statement of the sequence. In which case
4058 we need to bail out. */
4059 if (gsi_end_p (gsi
))
4061 gsi
= gsi_last_bb (bb
);
4062 if (gsi_end_p (gsi
))
4069 lhs
= gimple_assign_lhs (stmt
);
4070 rhs1
= gimple_assign_rhs1 (stmt
);
4071 rhs2
= gimple_assign_rhs2 (stmt
);
4073 /* For non-bit or min/max operations we can't associate
4074 all types. Verify that here. */
4075 if (rhs_code
!= BIT_IOR_EXPR
4076 && rhs_code
!= BIT_AND_EXPR
4077 && rhs_code
!= BIT_XOR_EXPR
4078 && rhs_code
!= MIN_EXPR
4079 && rhs_code
!= MAX_EXPR
4080 && (!can_reassociate_p (lhs
)
4081 || !can_reassociate_p (rhs1
)
4082 || !can_reassociate_p (rhs2
)))
4085 if (associative_tree_code (rhs_code
))
4087 VEC(operand_entry_t
, heap
) *ops
= NULL
;
4088 tree powi_result
= NULL_TREE
;
4090 /* There may be no immediate uses left by the time we
4091 get here because we may have eliminated them all. */
4092 if (TREE_CODE (lhs
) == SSA_NAME
&& has_zero_uses (lhs
))
4095 gimple_set_visited (stmt
, true);
4096 linearize_expr_tree (&ops
, stmt
, true, true);
4097 VEC_qsort (operand_entry_t
, ops
, sort_by_operand_rank
);
4098 optimize_ops_list (rhs_code
, &ops
);
4099 if (undistribute_ops_list (rhs_code
, &ops
,
4100 loop_containing_stmt (stmt
)))
4102 VEC_qsort (operand_entry_t
, ops
, sort_by_operand_rank
);
4103 optimize_ops_list (rhs_code
, &ops
);
4106 if (rhs_code
== BIT_IOR_EXPR
|| rhs_code
== BIT_AND_EXPR
)
4107 optimize_range_tests (rhs_code
, &ops
);
4109 if (first_pass_instance
4110 && rhs_code
== MULT_EXPR
4111 && flag_unsafe_math_optimizations
)
4112 powi_result
= attempt_builtin_powi (stmt
, &ops
);
4114 /* If the operand vector is now empty, all operands were
4115 consumed by the __builtin_powi optimization. */
4116 if (VEC_length (operand_entry_t
, ops
) == 0)
4117 transform_stmt_to_copy (&gsi
, stmt
, powi_result
);
4118 else if (VEC_length (operand_entry_t
, ops
) == 1)
4120 tree last_op
= VEC_last (operand_entry_t
, ops
)->op
;
4123 transform_stmt_to_multiply (&gsi
, stmt
, last_op
,
4126 transform_stmt_to_copy (&gsi
, stmt
, last_op
);
4130 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
4131 int ops_num
= VEC_length (operand_entry_t
, ops
);
4132 int width
= get_reassociation_width (ops_num
, rhs_code
, mode
);
4134 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4136 "Width = %d was chosen for reassociation\n", width
);
4139 && VEC_length (operand_entry_t
, ops
) > 3)
4140 rewrite_expr_tree_parallel (stmt
, width
, ops
);
4142 rewrite_expr_tree (stmt
, 0, ops
, false);
4144 /* If we combined some repeated factors into a
4145 __builtin_powi call, multiply that result by the
4146 reassociated operands. */
4150 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
4151 tree target_ssa
= make_temp_ssa_name (type
, NULL
,
4153 gimple_set_lhs (stmt
, target_ssa
);
4155 mul_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, lhs
,
4158 gimple_set_location (mul_stmt
, gimple_location (stmt
));
4159 gsi_insert_after (&gsi
, mul_stmt
, GSI_NEW_STMT
);
4163 VEC_free (operand_entry_t
, heap
, ops
);
4167 for (son
= first_dom_son (CDI_POST_DOMINATORS
, bb
);
4169 son
= next_dom_son (CDI_POST_DOMINATORS
, son
))
4170 reassociate_bb (son
);
4173 void dump_ops_vector (FILE *file
, VEC (operand_entry_t
, heap
) *ops
);
4174 void debug_ops_vector (VEC (operand_entry_t
, heap
) *ops
);
4176 /* Dump the operand entry vector OPS to FILE. */
4179 dump_ops_vector (FILE *file
, VEC (operand_entry_t
, heap
) *ops
)
4184 FOR_EACH_VEC_ELT (operand_entry_t
, ops
, i
, oe
)
4186 fprintf (file
, "Op %d -> rank: %d, tree: ", i
, oe
->rank
);
4187 print_generic_expr (file
, oe
->op
, 0);
4191 /* Dump the operand entry vector OPS to STDERR. */
4194 debug_ops_vector (VEC (operand_entry_t
, heap
) *ops
)
4196 dump_ops_vector (stderr
, ops
);
4202 break_up_subtract_bb (ENTRY_BLOCK_PTR
);
4203 reassociate_bb (EXIT_BLOCK_PTR
);
4206 /* Initialize the reassociation pass. */
4213 int *bbs
= XNEWVEC (int, n_basic_blocks
- NUM_FIXED_BLOCKS
);
4215 /* Find the loops, so that we can prevent moving calculations in
4217 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
4219 memset (&reassociate_stats
, 0, sizeof (reassociate_stats
));
4221 operand_entry_pool
= create_alloc_pool ("operand entry pool",
4222 sizeof (struct operand_entry
), 30);
4223 next_operand_entry_id
= 0;
4225 /* Reverse RPO (Reverse Post Order) will give us something where
4226 deeper loops come later. */
4227 pre_and_rev_post_order_compute (NULL
, bbs
, false);
4228 bb_rank
= XCNEWVEC (long, last_basic_block
);
4229 operand_rank
= pointer_map_create ();
4231 /* Give each default definition a distinct rank. This includes
4232 parameters and the static chain. Walk backwards over all
4233 SSA names so that we get proper rank ordering according
4234 to tree_swap_operands_p. */
4235 for (i
= num_ssa_names
- 1; i
> 0; --i
)
4237 tree name
= ssa_name (i
);
4238 if (name
&& SSA_NAME_IS_DEFAULT_DEF (name
))
4239 insert_operand_rank (name
, ++rank
);
4242 /* Set up rank for each BB */
4243 for (i
= 0; i
< n_basic_blocks
- NUM_FIXED_BLOCKS
; i
++)
4244 bb_rank
[bbs
[i
]] = ++rank
<< 16;
4247 calculate_dominance_info (CDI_POST_DOMINATORS
);
4248 plus_negates
= NULL
;
4251 /* Cleanup after the reassociation pass, and print stats if
4257 statistics_counter_event (cfun
, "Linearized",
4258 reassociate_stats
.linearized
);
4259 statistics_counter_event (cfun
, "Constants eliminated",
4260 reassociate_stats
.constants_eliminated
);
4261 statistics_counter_event (cfun
, "Ops eliminated",
4262 reassociate_stats
.ops_eliminated
);
4263 statistics_counter_event (cfun
, "Statements rewritten",
4264 reassociate_stats
.rewritten
);
4265 statistics_counter_event (cfun
, "Built-in pow[i] calls encountered",
4266 reassociate_stats
.pows_encountered
);
4267 statistics_counter_event (cfun
, "Built-in powi calls created",
4268 reassociate_stats
.pows_created
);
4270 pointer_map_destroy (operand_rank
);
4271 free_alloc_pool (operand_entry_pool
);
4273 VEC_free (tree
, heap
, plus_negates
);
4274 free_dominance_info (CDI_POST_DOMINATORS
);
4275 loop_optimizer_finalize ();
4278 /* Gate and execute functions for Reassociation. */
4281 execute_reassoc (void)
4286 repropagate_negates ();
4293 gate_tree_ssa_reassoc (void)
4295 return flag_tree_reassoc
!= 0;
4298 struct gimple_opt_pass pass_reassoc
=
4302 "reassoc", /* name */
4303 OPTGROUP_NONE
, /* optinfo_flags */
4304 gate_tree_ssa_reassoc
, /* gate */
4305 execute_reassoc
, /* execute */
4308 0, /* static_pass_number */
4309 TV_TREE_REASSOC
, /* tv_id */
4310 PROP_cfg
| PROP_ssa
, /* properties_required */
4311 0, /* properties_provided */
4312 0, /* properties_destroyed */
4313 0, /* todo_flags_start */
4316 | TODO_ggc_collect
/* todo_flags_finish */