1 /* Reassociation for trees.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "tree-inline.h"
29 #include "tree-flow.h"
31 #include "tree-iterator.h"
32 #include "tree-pass.h"
33 #include "alloc-pool.h"
35 #include "langhooks.h"
36 #include "pointer-set.h"
41 #include "diagnostic-core.h"
43 /* This is a simple global reassociation pass. It is, in part, based
44 on the LLVM pass of the same name (They do some things more/less
45 than we do, in different orders, etc).
47 It consists of five steps:
49 1. Breaking up subtract operations into addition + negate, where
50 it would promote the reassociation of adds.
52 2. Left linearization of the expression trees, so that (A+B)+(C+D)
53 becomes (((A+B)+C)+D), which is easier for us to rewrite later.
54 During linearization, we place the operands of the binary
55 expressions into a vector of operand_entry_t
57 3. Optimization of the operand lists, eliminating things like a +
60 3a. Combine repeated factors with the same occurrence counts
61 into a __builtin_powi call that will later be optimized into
62 an optimal number of multiplies.
64 4. Rewrite the expression trees we linearized and optimized so
65 they are in proper rank order.
67 5. Repropagate negates, as nothing else will clean it up ATM.
69 A bit of theory on #4, since nobody seems to write anything down
70 about why it makes sense to do it the way they do it:
72 We could do this much nicer theoretically, but don't (for reasons
73 explained after how to do it theoretically nice :P).
75 In order to promote the most redundancy elimination, you want
76 binary expressions whose operands are the same rank (or
77 preferably, the same value) exposed to the redundancy eliminator,
78 for possible elimination.
80 So the way to do this if we really cared, is to build the new op
81 tree from the leaves to the roots, merging as you go, and putting the
82 new op on the end of the worklist, until you are left with one
83 thing on the worklist.
85 IE if you have to rewrite the following set of operands (listed with
86 rank in parentheses), with opcode PLUS_EXPR:
88 a (1), b (1), c (1), d (2), e (2)
91 We start with our merge worklist empty, and the ops list with all of
94 You want to first merge all leaves of the same rank, as much as
97 So first build a binary op of
99 mergetmp = a + b, and put "mergetmp" on the merge worklist.
101 Because there is no three operand form of PLUS_EXPR, c is not going to
102 be exposed to redundancy elimination as a rank 1 operand.
104 So you might as well throw it on the merge worklist (you could also
105 consider it to now be a rank two operand, and merge it with d and e,
106 but in this case, you then have evicted e from a binary op. So at
107 least in this situation, you can't win.)
109 Then build a binary op of d + e
112 and put mergetmp2 on the merge worklist.
114 so merge worklist = {mergetmp, c, mergetmp2}
116 Continue building binary ops of these operations until you have only
117 one operation left on the worklist.
122 mergetmp3 = mergetmp + c
124 worklist = {mergetmp2, mergetmp3}
126 mergetmp4 = mergetmp2 + mergetmp3
128 worklist = {mergetmp4}
130 because we have one operation left, we can now just set the original
131 statement equal to the result of that operation.
133 This will at least expose a + b and d + e to redundancy elimination
134 as binary operations.
136 For extra points, you can reuse the old statements to build the
137 mergetmps, since you shouldn't run out.
139 So why don't we do this?
141 Because it's expensive, and rarely will help. Most trees we are
142 reassociating have 3 or less ops. If they have 2 ops, they already
143 will be written into a nice single binary op. If you have 3 ops, a
144 single simple check suffices to tell you whether the first two are of the
145 same rank. If so, you know to order it
148 newstmt = mergetmp + op3
152 newstmt = mergetmp + op1
154 If all three are of the same rank, you can't expose them all in a
155 single binary operator anyway, so the above is *still* the best you
158 Thus, this is what we do. When we have three ops left, we check to see
159 what order to put them in, and call it a day. As a nod to vector sum
160 reduction, we check if any of the ops are really a phi node that is a
161 destructive update for the associating op, and keep the destructive
162 update together for vector sum reduction recognition. */
169 int constants_eliminated
;
172 int pows_encountered
;
176 /* Operator, rank pair. */
177 typedef struct operand_entry
185 static alloc_pool operand_entry_pool
;
187 /* This is used to assign a unique ID to each struct operand_entry
188 so that qsort results are identical on different hosts. */
189 static int next_operand_entry_id
;
191 /* Starting rank number for a given basic block, so that we can rank
192 operations using unmovable instructions in that BB based on the bb
194 static long *bb_rank
;
196 /* Operand->rank hashtable. */
197 static struct pointer_map_t
*operand_rank
;
200 static long get_rank (tree
);
203 /* Bias amount for loop-carried phis. We want this to be larger than
204 the depth of any reassociation tree we can see, but not larger than
205 the rank difference between two blocks. */
206 #define PHI_LOOP_BIAS (1 << 15)
208 /* Rank assigned to a phi statement. If STMT is a loop-carried phi of
209 an innermost loop, and the phi has only a single use which is inside
210 the loop, then the rank is the block rank of the loop latch plus an
211 extra bias for the loop-carried dependence. This causes expressions
212 calculated into an accumulator variable to be independent for each
213 iteration of the loop. If STMT is some other phi, the rank is the
214 block rank of its containing block. */
216 phi_rank (gimple stmt
)
218 basic_block bb
= gimple_bb (stmt
);
219 struct loop
*father
= bb
->loop_father
;
225 /* We only care about real loops (those with a latch). */
227 return bb_rank
[bb
->index
];
229 /* Interesting phis must be in headers of innermost loops. */
230 if (bb
!= father
->header
232 return bb_rank
[bb
->index
];
234 /* Ignore virtual SSA_NAMEs. */
235 res
= gimple_phi_result (stmt
);
236 if (virtual_operand_p (res
))
237 return bb_rank
[bb
->index
];
239 /* The phi definition must have a single use, and that use must be
240 within the loop. Otherwise this isn't an accumulator pattern. */
241 if (!single_imm_use (res
, &use
, &use_stmt
)
242 || gimple_bb (use_stmt
)->loop_father
!= father
)
243 return bb_rank
[bb
->index
];
245 /* Look for phi arguments from within the loop. If found, bias this phi. */
246 for (i
= 0; i
< gimple_phi_num_args (stmt
); i
++)
248 tree arg
= gimple_phi_arg_def (stmt
, i
);
249 if (TREE_CODE (arg
) == SSA_NAME
250 && !SSA_NAME_IS_DEFAULT_DEF (arg
))
252 gimple def_stmt
= SSA_NAME_DEF_STMT (arg
);
253 if (gimple_bb (def_stmt
)->loop_father
== father
)
254 return bb_rank
[father
->latch
->index
] + PHI_LOOP_BIAS
;
258 /* Must be an uninteresting phi. */
259 return bb_rank
[bb
->index
];
262 /* If EXP is an SSA_NAME defined by a PHI statement that represents a
263 loop-carried dependence of an innermost loop, return TRUE; else
266 loop_carried_phi (tree exp
)
271 if (TREE_CODE (exp
) != SSA_NAME
272 || SSA_NAME_IS_DEFAULT_DEF (exp
))
275 phi_stmt
= SSA_NAME_DEF_STMT (exp
);
277 if (gimple_code (SSA_NAME_DEF_STMT (exp
)) != GIMPLE_PHI
)
280 /* Non-loop-carried phis have block rank. Loop-carried phis have
281 an additional bias added in. If this phi doesn't have block rank,
282 it's biased and should not be propagated. */
283 block_rank
= bb_rank
[gimple_bb (phi_stmt
)->index
];
285 if (phi_rank (phi_stmt
) != block_rank
)
291 /* Return the maximum of RANK and the rank that should be propagated
292 from expression OP. For most operands, this is just the rank of OP.
293 For loop-carried phis, the value is zero to avoid undoing the bias
294 in favor of the phi. */
296 propagate_rank (long rank
, tree op
)
300 if (loop_carried_phi (op
))
303 op_rank
= get_rank (op
);
305 return MAX (rank
, op_rank
);
308 /* Look up the operand rank structure for expression E. */
311 find_operand_rank (tree e
)
313 void **slot
= pointer_map_contains (operand_rank
, e
);
314 return slot
? (long) (intptr_t) *slot
: -1;
317 /* Insert {E,RANK} into the operand rank hashtable. */
320 insert_operand_rank (tree e
, long rank
)
323 gcc_assert (rank
> 0);
324 slot
= pointer_map_insert (operand_rank
, e
);
326 *slot
= (void *) (intptr_t) rank
;
329 /* Given an expression E, return the rank of the expression. */
334 /* Constants have rank 0. */
335 if (is_gimple_min_invariant (e
))
338 /* SSA_NAME's have the rank of the expression they are the result
340 For globals and uninitialized values, the rank is 0.
341 For function arguments, use the pre-setup rank.
342 For PHI nodes, stores, asm statements, etc, we use the rank of
344 For simple operations, the rank is the maximum rank of any of
345 its operands, or the bb_rank, whichever is less.
346 I make no claims that this is optimal, however, it gives good
349 /* We make an exception to the normal ranking system to break
350 dependences of accumulator variables in loops. Suppose we
351 have a simple one-block loop containing:
358 As shown, each iteration of the calculation into x is fully
359 dependent upon the iteration before it. We would prefer to
360 see this in the form:
367 If the loop is unrolled, the calculations of b and c from
368 different iterations can be interleaved.
370 To obtain this result during reassociation, we bias the rank
371 of the phi definition x_1 upward, when it is recognized as an
372 accumulator pattern. The artificial rank causes it to be
373 added last, providing the desired independence. */
375 if (TREE_CODE (e
) == SSA_NAME
)
382 if (SSA_NAME_IS_DEFAULT_DEF (e
))
383 return find_operand_rank (e
);
385 stmt
= SSA_NAME_DEF_STMT (e
);
386 if (gimple_code (stmt
) == GIMPLE_PHI
)
387 return phi_rank (stmt
);
389 if (!is_gimple_assign (stmt
)
390 || gimple_vdef (stmt
))
391 return bb_rank
[gimple_bb (stmt
)->index
];
393 /* If we already have a rank for this expression, use that. */
394 rank
= find_operand_rank (e
);
398 /* Otherwise, find the maximum rank for the operands. As an
399 exception, remove the bias from loop-carried phis when propagating
400 the rank so that dependent operations are not also biased. */
402 if (gimple_assign_single_p (stmt
))
404 tree rhs
= gimple_assign_rhs1 (stmt
);
405 n
= TREE_OPERAND_LENGTH (rhs
);
407 rank
= propagate_rank (rank
, rhs
);
410 for (i
= 0; i
< n
; i
++)
412 op
= TREE_OPERAND (rhs
, i
);
415 rank
= propagate_rank (rank
, op
);
421 n
= gimple_num_ops (stmt
);
422 for (i
= 1; i
< n
; i
++)
424 op
= gimple_op (stmt
, i
);
426 rank
= propagate_rank (rank
, op
);
430 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
432 fprintf (dump_file
, "Rank for ");
433 print_generic_expr (dump_file
, e
, 0);
434 fprintf (dump_file
, " is %ld\n", (rank
+ 1));
437 /* Note the rank in the hashtable so we don't recompute it. */
438 insert_operand_rank (e
, (rank
+ 1));
442 /* Globals, etc, are rank 0 */
447 /* We want integer ones to end up last no matter what, since they are
448 the ones we can do the most with. */
449 #define INTEGER_CONST_TYPE 1 << 3
450 #define FLOAT_CONST_TYPE 1 << 2
451 #define OTHER_CONST_TYPE 1 << 1
453 /* Classify an invariant tree into integer, float, or other, so that
454 we can sort them to be near other constants of the same type. */
456 constant_type (tree t
)
458 if (INTEGRAL_TYPE_P (TREE_TYPE (t
)))
459 return INTEGER_CONST_TYPE
;
460 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t
)))
461 return FLOAT_CONST_TYPE
;
463 return OTHER_CONST_TYPE
;
466 /* qsort comparison function to sort operand entries PA and PB by rank
467 so that the sorted array is ordered by rank in decreasing order. */
469 sort_by_operand_rank (const void *pa
, const void *pb
)
471 const operand_entry_t oea
= *(const operand_entry_t
*)pa
;
472 const operand_entry_t oeb
= *(const operand_entry_t
*)pb
;
474 /* It's nicer for optimize_expression if constants that are likely
475 to fold when added/multiplied//whatever are put next to each
476 other. Since all constants have rank 0, order them by type. */
477 if (oeb
->rank
== 0 && oea
->rank
== 0)
479 if (constant_type (oeb
->op
) != constant_type (oea
->op
))
480 return constant_type (oeb
->op
) - constant_type (oea
->op
);
482 /* To make sorting result stable, we use unique IDs to determine
484 return oeb
->id
- oea
->id
;
487 /* Lastly, make sure the versions that are the same go next to each
488 other. We use SSA_NAME_VERSION because it's stable. */
489 if ((oeb
->rank
- oea
->rank
== 0)
490 && TREE_CODE (oea
->op
) == SSA_NAME
491 && TREE_CODE (oeb
->op
) == SSA_NAME
)
493 if (SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
494 return SSA_NAME_VERSION (oeb
->op
) - SSA_NAME_VERSION (oea
->op
);
496 return oeb
->id
- oea
->id
;
499 if (oeb
->rank
!= oea
->rank
)
500 return oeb
->rank
- oea
->rank
;
502 return oeb
->id
- oea
->id
;
505 /* Add an operand entry to *OPS for the tree operand OP. */
508 add_to_ops_vec (vec
<operand_entry_t
> *ops
, tree op
)
510 operand_entry_t oe
= (operand_entry_t
) pool_alloc (operand_entry_pool
);
513 oe
->rank
= get_rank (op
);
514 oe
->id
= next_operand_entry_id
++;
519 /* Add an operand entry to *OPS for the tree operand OP with repeat
523 add_repeat_to_ops_vec (vec
<operand_entry_t
> *ops
, tree op
,
524 HOST_WIDE_INT repeat
)
526 operand_entry_t oe
= (operand_entry_t
) pool_alloc (operand_entry_pool
);
529 oe
->rank
= get_rank (op
);
530 oe
->id
= next_operand_entry_id
++;
534 reassociate_stats
.pows_encountered
++;
537 /* Return true if STMT is reassociable operation containing a binary
538 operation with tree code CODE, and is inside LOOP. */
541 is_reassociable_op (gimple stmt
, enum tree_code code
, struct loop
*loop
)
543 basic_block bb
= gimple_bb (stmt
);
545 if (gimple_bb (stmt
) == NULL
)
548 if (!flow_bb_inside_loop_p (loop
, bb
))
551 if (is_gimple_assign (stmt
)
552 && gimple_assign_rhs_code (stmt
) == code
553 && has_single_use (gimple_assign_lhs (stmt
)))
560 /* Given NAME, if NAME is defined by a unary operation OPCODE, return the
561 operand of the negate operation. Otherwise, return NULL. */
564 get_unary_op (tree name
, enum tree_code opcode
)
566 gimple stmt
= SSA_NAME_DEF_STMT (name
);
568 if (!is_gimple_assign (stmt
))
571 if (gimple_assign_rhs_code (stmt
) == opcode
)
572 return gimple_assign_rhs1 (stmt
);
576 /* If CURR and LAST are a pair of ops that OPCODE allows us to
577 eliminate through equivalences, do so, remove them from OPS, and
578 return true. Otherwise, return false. */
581 eliminate_duplicate_pair (enum tree_code opcode
,
582 vec
<operand_entry_t
> *ops
,
585 operand_entry_t curr
,
586 operand_entry_t last
)
589 /* If we have two of the same op, and the opcode is & |, min, or max,
590 we can eliminate one of them.
591 If we have two of the same op, and the opcode is ^, we can
592 eliminate both of them. */
594 if (last
&& last
->op
== curr
->op
)
602 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
604 fprintf (dump_file
, "Equivalence: ");
605 print_generic_expr (dump_file
, curr
->op
, 0);
606 fprintf (dump_file
, " [&|minmax] ");
607 print_generic_expr (dump_file
, last
->op
, 0);
608 fprintf (dump_file
, " -> ");
609 print_generic_stmt (dump_file
, last
->op
, 0);
612 ops
->ordered_remove (i
);
613 reassociate_stats
.ops_eliminated
++;
618 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
620 fprintf (dump_file
, "Equivalence: ");
621 print_generic_expr (dump_file
, curr
->op
, 0);
622 fprintf (dump_file
, " ^ ");
623 print_generic_expr (dump_file
, last
->op
, 0);
624 fprintf (dump_file
, " -> nothing\n");
627 reassociate_stats
.ops_eliminated
+= 2;
629 if (ops
->length () == 2)
632 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (last
->op
)));
637 ops
->ordered_remove (i
-1);
638 ops
->ordered_remove (i
-1);
650 static vec
<tree
> plus_negates
;
652 /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
653 expression, look in OPS for a corresponding positive operation to cancel
654 it out. If we find one, remove the other from OPS, replace
655 OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise,
659 eliminate_plus_minus_pair (enum tree_code opcode
,
660 vec
<operand_entry_t
> *ops
,
661 unsigned int currindex
,
662 operand_entry_t curr
)
669 if (opcode
!= PLUS_EXPR
|| TREE_CODE (curr
->op
) != SSA_NAME
)
672 negateop
= get_unary_op (curr
->op
, NEGATE_EXPR
);
673 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
674 if (negateop
== NULL_TREE
&& notop
== NULL_TREE
)
677 /* Any non-negated version will have a rank that is one less than
678 the current rank. So once we hit those ranks, if we don't find
681 for (i
= currindex
+ 1;
682 ops
->iterate (i
, &oe
)
683 && oe
->rank
>= curr
->rank
- 1 ;
686 if (oe
->op
== negateop
)
689 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
691 fprintf (dump_file
, "Equivalence: ");
692 print_generic_expr (dump_file
, negateop
, 0);
693 fprintf (dump_file
, " + -");
694 print_generic_expr (dump_file
, oe
->op
, 0);
695 fprintf (dump_file
, " -> 0\n");
698 ops
->ordered_remove (i
);
699 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (oe
->op
)));
700 ops
->ordered_remove (currindex
);
701 reassociate_stats
.ops_eliminated
++;
705 else if (oe
->op
== notop
)
707 tree op_type
= TREE_TYPE (oe
->op
);
709 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
711 fprintf (dump_file
, "Equivalence: ");
712 print_generic_expr (dump_file
, notop
, 0);
713 fprintf (dump_file
, " + ~");
714 print_generic_expr (dump_file
, oe
->op
, 0);
715 fprintf (dump_file
, " -> -1\n");
718 ops
->ordered_remove (i
);
719 add_to_ops_vec (ops
, build_int_cst_type (op_type
, -1));
720 ops
->ordered_remove (currindex
);
721 reassociate_stats
.ops_eliminated
++;
727 /* CURR->OP is a negate expr in a plus expr: save it for later
728 inspection in repropagate_negates(). */
729 if (negateop
!= NULL_TREE
)
730 plus_negates
.safe_push (curr
->op
);
735 /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a
736 bitwise not expression, look in OPS for a corresponding operand to
737 cancel it out. If we find one, remove the other from OPS, replace
738 OPS[CURRINDEX] with 0, and return true. Otherwise, return
742 eliminate_not_pairs (enum tree_code opcode
,
743 vec
<operand_entry_t
> *ops
,
744 unsigned int currindex
,
745 operand_entry_t curr
)
751 if ((opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
752 || TREE_CODE (curr
->op
) != SSA_NAME
)
755 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
756 if (notop
== NULL_TREE
)
759 /* Any non-not version will have a rank that is one less than
760 the current rank. So once we hit those ranks, if we don't find
763 for (i
= currindex
+ 1;
764 ops
->iterate (i
, &oe
)
765 && oe
->rank
>= curr
->rank
- 1;
770 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
772 fprintf (dump_file
, "Equivalence: ");
773 print_generic_expr (dump_file
, notop
, 0);
774 if (opcode
== BIT_AND_EXPR
)
775 fprintf (dump_file
, " & ~");
776 else if (opcode
== BIT_IOR_EXPR
)
777 fprintf (dump_file
, " | ~");
778 print_generic_expr (dump_file
, oe
->op
, 0);
779 if (opcode
== BIT_AND_EXPR
)
780 fprintf (dump_file
, " -> 0\n");
781 else if (opcode
== BIT_IOR_EXPR
)
782 fprintf (dump_file
, " -> -1\n");
785 if (opcode
== BIT_AND_EXPR
)
786 oe
->op
= build_zero_cst (TREE_TYPE (oe
->op
));
787 else if (opcode
== BIT_IOR_EXPR
)
788 oe
->op
= build_all_ones_cst (TREE_TYPE (oe
->op
));
790 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
792 ops
->quick_push (oe
);
800 /* Use constant value that may be present in OPS to try to eliminate
801 operands. Note that this function is only really used when we've
802 eliminated ops for other reasons, or merged constants. Across
803 single statements, fold already does all of this, plus more. There
804 is little point in duplicating logic, so I've only included the
805 identities that I could ever construct testcases to trigger. */
808 eliminate_using_constants (enum tree_code opcode
,
809 vec
<operand_entry_t
> *ops
)
811 operand_entry_t oelast
= ops
->last ();
812 tree type
= TREE_TYPE (oelast
->op
);
814 if (oelast
->rank
== 0
815 && (INTEGRAL_TYPE_P (type
) || FLOAT_TYPE_P (type
)))
820 if (integer_zerop (oelast
->op
))
822 if (ops
->length () != 1)
824 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
825 fprintf (dump_file
, "Found & 0, removing all other ops\n");
827 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
830 ops
->quick_push (oelast
);
834 else if (integer_all_onesp (oelast
->op
))
836 if (ops
->length () != 1)
838 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
839 fprintf (dump_file
, "Found & -1, removing\n");
841 reassociate_stats
.ops_eliminated
++;
846 if (integer_all_onesp (oelast
->op
))
848 if (ops
->length () != 1)
850 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
851 fprintf (dump_file
, "Found | -1, removing all other ops\n");
853 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
856 ops
->quick_push (oelast
);
860 else if (integer_zerop (oelast
->op
))
862 if (ops
->length () != 1)
864 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
865 fprintf (dump_file
, "Found | 0, removing\n");
867 reassociate_stats
.ops_eliminated
++;
872 if (integer_zerop (oelast
->op
)
873 || (FLOAT_TYPE_P (type
)
874 && !HONOR_NANS (TYPE_MODE (type
))
875 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type
))
876 && real_zerop (oelast
->op
)))
878 if (ops
->length () != 1)
880 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
881 fprintf (dump_file
, "Found * 0, removing all other ops\n");
883 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
885 ops
->quick_push (oelast
);
889 else if (integer_onep (oelast
->op
)
890 || (FLOAT_TYPE_P (type
)
891 && !HONOR_SNANS (TYPE_MODE (type
))
892 && real_onep (oelast
->op
)))
894 if (ops
->length () != 1)
896 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
897 fprintf (dump_file
, "Found * 1, removing\n");
899 reassociate_stats
.ops_eliminated
++;
907 if (integer_zerop (oelast
->op
)
908 || (FLOAT_TYPE_P (type
)
909 && (opcode
== PLUS_EXPR
|| opcode
== MINUS_EXPR
)
910 && fold_real_zero_addition_p (type
, oelast
->op
,
911 opcode
== MINUS_EXPR
)))
913 if (ops
->length () != 1)
915 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
916 fprintf (dump_file
, "Found [|^+] 0, removing\n");
918 reassociate_stats
.ops_eliminated
++;
930 static void linearize_expr_tree (vec
<operand_entry_t
> *, gimple
,
933 /* Structure for tracking and counting operands. */
934 typedef struct oecount_s
{
937 enum tree_code oecode
;
942 /* The heap for the oecount hashtable and the sorted list of operands. */
943 static vec
<oecount
> cvec
;
945 /* Hash function for oecount. */
948 oecount_hash (const void *p
)
950 const oecount
*c
= &cvec
[(size_t)p
- 42];
951 return htab_hash_pointer (c
->op
) ^ (hashval_t
)c
->oecode
;
954 /* Comparison function for oecount. */
957 oecount_eq (const void *p1
, const void *p2
)
959 const oecount
*c1
= &cvec
[(size_t)p1
- 42];
960 const oecount
*c2
= &cvec
[(size_t)p2
- 42];
961 return (c1
->oecode
== c2
->oecode
962 && c1
->op
== c2
->op
);
965 /* Comparison function for qsort sorting oecount elements by count. */
968 oecount_cmp (const void *p1
, const void *p2
)
970 const oecount
*c1
= (const oecount
*)p1
;
971 const oecount
*c2
= (const oecount
*)p2
;
972 if (c1
->cnt
!= c2
->cnt
)
973 return c1
->cnt
- c2
->cnt
;
975 /* If counts are identical, use unique IDs to stabilize qsort. */
976 return c1
->id
- c2
->id
;
979 /* Return TRUE iff STMT represents a builtin call that raises OP
983 stmt_is_power_of_op (gimple stmt
, tree op
)
987 if (!is_gimple_call (stmt
))
990 fndecl
= gimple_call_fndecl (stmt
);
993 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
996 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
)))
998 CASE_FLT_FN (BUILT_IN_POW
):
999 CASE_FLT_FN (BUILT_IN_POWI
):
1000 return (operand_equal_p (gimple_call_arg (stmt
, 0), op
, 0));
1007 /* Given STMT which is a __builtin_pow* call, decrement its exponent
1008 in place and return the result. Assumes that stmt_is_power_of_op
1009 was previously called for STMT and returned TRUE. */
1011 static HOST_WIDE_INT
1012 decrement_power (gimple stmt
)
1014 REAL_VALUE_TYPE c
, cint
;
1015 HOST_WIDE_INT power
;
1018 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
)))
1020 CASE_FLT_FN (BUILT_IN_POW
):
1021 arg1
= gimple_call_arg (stmt
, 1);
1022 c
= TREE_REAL_CST (arg1
);
1023 power
= real_to_integer (&c
) - 1;
1024 real_from_integer (&cint
, VOIDmode
, power
, 0, 0);
1025 gimple_call_set_arg (stmt
, 1, build_real (TREE_TYPE (arg1
), cint
));
1028 CASE_FLT_FN (BUILT_IN_POWI
):
1029 arg1
= gimple_call_arg (stmt
, 1);
1030 power
= TREE_INT_CST_LOW (arg1
) - 1;
1031 gimple_call_set_arg (stmt
, 1, build_int_cst (TREE_TYPE (arg1
), power
));
1039 /* Find the single immediate use of STMT's LHS, and replace it
1040 with OP. Remove STMT. If STMT's LHS is the same as *DEF,
1041 replace *DEF with OP as well. */
1044 propagate_op_to_single_use (tree op
, gimple stmt
, tree
*def
)
1049 gimple_stmt_iterator gsi
;
1051 if (is_gimple_call (stmt
))
1052 lhs
= gimple_call_lhs (stmt
);
1054 lhs
= gimple_assign_lhs (stmt
);
1056 gcc_assert (has_single_use (lhs
));
1057 single_imm_use (lhs
, &use
, &use_stmt
);
1061 if (TREE_CODE (op
) != SSA_NAME
)
1062 update_stmt (use_stmt
);
1063 gsi
= gsi_for_stmt (stmt
);
1064 unlink_stmt_vdef (stmt
);
1065 gsi_remove (&gsi
, true);
1066 release_defs (stmt
);
1069 /* Walks the linear chain with result *DEF searching for an operation
1070 with operand OP and code OPCODE removing that from the chain. *DEF
1071 is updated if there is only one operand but no operation left. */
1074 zero_one_operation (tree
*def
, enum tree_code opcode
, tree op
)
1076 gimple stmt
= SSA_NAME_DEF_STMT (*def
);
1082 if (opcode
== MULT_EXPR
1083 && stmt_is_power_of_op (stmt
, op
))
1085 if (decrement_power (stmt
) == 1)
1086 propagate_op_to_single_use (op
, stmt
, def
);
1090 name
= gimple_assign_rhs1 (stmt
);
1092 /* If this is the operation we look for and one of the operands
1093 is ours simply propagate the other operand into the stmts
1095 if (gimple_assign_rhs_code (stmt
) == opcode
1097 || gimple_assign_rhs2 (stmt
) == op
))
1100 name
= gimple_assign_rhs2 (stmt
);
1101 propagate_op_to_single_use (name
, stmt
, def
);
1105 /* We might have a multiply of two __builtin_pow* calls, and
1106 the operand might be hiding in the rightmost one. */
1107 if (opcode
== MULT_EXPR
1108 && gimple_assign_rhs_code (stmt
) == opcode
1109 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
1110 && has_single_use (gimple_assign_rhs2 (stmt
)))
1112 gimple stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
1113 if (stmt_is_power_of_op (stmt2
, op
))
1115 if (decrement_power (stmt2
) == 1)
1116 propagate_op_to_single_use (op
, stmt2
, def
);
1121 /* Continue walking the chain. */
1122 gcc_assert (name
!= op
1123 && TREE_CODE (name
) == SSA_NAME
);
1124 stmt
= SSA_NAME_DEF_STMT (name
);
1129 /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for
1130 the result. Places the statement after the definition of either
1131 OP1 or OP2. Returns the new statement. */
1134 build_and_add_sum (tree type
, tree op1
, tree op2
, enum tree_code opcode
)
1136 gimple op1def
= NULL
, op2def
= NULL
;
1137 gimple_stmt_iterator gsi
;
1141 /* Create the addition statement. */
1142 op
= make_ssa_name (type
, NULL
);
1143 sum
= gimple_build_assign_with_ops (opcode
, op
, op1
, op2
);
1145 /* Find an insertion place and insert. */
1146 if (TREE_CODE (op1
) == SSA_NAME
)
1147 op1def
= SSA_NAME_DEF_STMT (op1
);
1148 if (TREE_CODE (op2
) == SSA_NAME
)
1149 op2def
= SSA_NAME_DEF_STMT (op2
);
1150 if ((!op1def
|| gimple_nop_p (op1def
))
1151 && (!op2def
|| gimple_nop_p (op2def
)))
1153 gsi
= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR
));
1154 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1156 else if ((!op1def
|| gimple_nop_p (op1def
))
1157 || (op2def
&& !gimple_nop_p (op2def
)
1158 && stmt_dominates_stmt_p (op1def
, op2def
)))
1160 if (gimple_code (op2def
) == GIMPLE_PHI
)
1162 gsi
= gsi_after_labels (gimple_bb (op2def
));
1163 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1167 if (!stmt_ends_bb_p (op2def
))
1169 gsi
= gsi_for_stmt (op2def
);
1170 gsi_insert_after (&gsi
, sum
, GSI_NEW_STMT
);
1177 FOR_EACH_EDGE (e
, ei
, gimple_bb (op2def
)->succs
)
1178 if (e
->flags
& EDGE_FALLTHRU
)
1179 gsi_insert_on_edge_immediate (e
, sum
);
1185 if (gimple_code (op1def
) == GIMPLE_PHI
)
1187 gsi
= gsi_after_labels (gimple_bb (op1def
));
1188 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1192 if (!stmt_ends_bb_p (op1def
))
1194 gsi
= gsi_for_stmt (op1def
);
1195 gsi_insert_after (&gsi
, sum
, GSI_NEW_STMT
);
1202 FOR_EACH_EDGE (e
, ei
, gimple_bb (op1def
)->succs
)
1203 if (e
->flags
& EDGE_FALLTHRU
)
1204 gsi_insert_on_edge_immediate (e
, sum
);
1213 /* Perform un-distribution of divisions and multiplications.
1214 A * X + B * X is transformed into (A + B) * X and A / X + B / X
1215 to (A + B) / X for real X.
1217 The algorithm is organized as follows.
1219 - First we walk the addition chain *OPS looking for summands that
1220 are defined by a multiplication or a real division. This results
1221 in the candidates bitmap with relevant indices into *OPS.
1223 - Second we build the chains of multiplications or divisions for
1224 these candidates, counting the number of occurrences of (operand, code)
1225 pairs in all of the candidates chains.
1227 - Third we sort the (operand, code) pairs by number of occurrence and
1228 process them starting with the pair with the most uses.
1230 * For each such pair we walk the candidates again to build a
1231 second candidate bitmap noting all multiplication/division chains
1232 that have at least one occurrence of (operand, code).
1234 * We build an alternate addition chain only covering these
1235 candidates with one (operand, code) operation removed from their
1236 multiplication/division chain.
1238 * The first candidate gets replaced by the alternate addition chain
1239 multiplied/divided by the operand.
1241 * All candidate chains get disabled for further processing and
1242 processing of (operand, code) pairs continues.
1244 The alternate addition chains built are re-processed by the main
1245 reassociation algorithm which allows optimizing a * x * y + b * y * x
1246 to (a + b ) * x * y in one invocation of the reassociation pass. */
1249 undistribute_ops_list (enum tree_code opcode
,
1250 vec
<operand_entry_t
> *ops
, struct loop
*loop
)
1252 unsigned int length
= ops
->length ();
1253 operand_entry_t oe1
;
1255 sbitmap candidates
, candidates2
;
1256 unsigned nr_candidates
, nr_candidates2
;
1257 sbitmap_iterator sbi0
;
1258 vec
<operand_entry_t
> *subops
;
1260 bool changed
= false;
1261 int next_oecount_id
= 0;
1264 || opcode
!= PLUS_EXPR
)
1267 /* Build a list of candidates to process. */
1268 candidates
= sbitmap_alloc (length
);
1269 bitmap_clear (candidates
);
1271 FOR_EACH_VEC_ELT (*ops
, i
, oe1
)
1273 enum tree_code dcode
;
1276 if (TREE_CODE (oe1
->op
) != SSA_NAME
)
1278 oe1def
= SSA_NAME_DEF_STMT (oe1
->op
);
1279 if (!is_gimple_assign (oe1def
))
1281 dcode
= gimple_assign_rhs_code (oe1def
);
1282 if ((dcode
!= MULT_EXPR
1283 && dcode
!= RDIV_EXPR
)
1284 || !is_reassociable_op (oe1def
, dcode
, loop
))
1287 bitmap_set_bit (candidates
, i
);
1291 if (nr_candidates
< 2)
1293 sbitmap_free (candidates
);
1297 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1299 fprintf (dump_file
, "searching for un-distribute opportunities ");
1300 print_generic_expr (dump_file
,
1301 (*ops
)[bitmap_first_set_bit (candidates
)]->op
, 0);
1302 fprintf (dump_file
, " %d\n", nr_candidates
);
1305 /* Build linearized sub-operand lists and the counting table. */
1307 ctable
= htab_create (15, oecount_hash
, oecount_eq
, NULL
);
1308 /* ??? Macro arguments cannot have multi-argument template types in
1309 them. This typedef is needed to workaround that limitation. */
1310 typedef vec
<operand_entry_t
> vec_operand_entry_t_heap
;
1311 subops
= XCNEWVEC (vec_operand_entry_t_heap
, ops
->length ());
1312 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1315 enum tree_code oecode
;
1318 oedef
= SSA_NAME_DEF_STMT ((*ops
)[i
]->op
);
1319 oecode
= gimple_assign_rhs_code (oedef
);
1320 linearize_expr_tree (&subops
[i
], oedef
,
1321 associative_tree_code (oecode
), false);
1323 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1330 c
.id
= next_oecount_id
++;
1333 idx
= cvec
.length () + 41;
1334 slot
= htab_find_slot (ctable
, (void *)idx
, INSERT
);
1337 *slot
= (void *)idx
;
1342 cvec
[(size_t)*slot
- 42].cnt
++;
1346 htab_delete (ctable
);
1348 /* Sort the counting table. */
1349 cvec
.qsort (oecount_cmp
);
1351 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1354 fprintf (dump_file
, "Candidates:\n");
1355 FOR_EACH_VEC_ELT (cvec
, j
, c
)
1357 fprintf (dump_file
, " %u %s: ", c
->cnt
,
1358 c
->oecode
== MULT_EXPR
1359 ? "*" : c
->oecode
== RDIV_EXPR
? "/" : "?");
1360 print_generic_expr (dump_file
, c
->op
, 0);
1361 fprintf (dump_file
, "\n");
1365 /* Process the (operand, code) pairs in order of most occurence. */
1366 candidates2
= sbitmap_alloc (length
);
1367 while (!cvec
.is_empty ())
1369 oecount
*c
= &cvec
.last ();
1373 /* Now collect the operands in the outer chain that contain
1374 the common operand in their inner chain. */
1375 bitmap_clear (candidates2
);
1377 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1380 enum tree_code oecode
;
1382 tree op
= (*ops
)[i
]->op
;
1384 /* If we undistributed in this chain already this may be
1386 if (TREE_CODE (op
) != SSA_NAME
)
1389 oedef
= SSA_NAME_DEF_STMT (op
);
1390 oecode
= gimple_assign_rhs_code (oedef
);
1391 if (oecode
!= c
->oecode
)
1394 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1396 if (oe1
->op
== c
->op
)
1398 bitmap_set_bit (candidates2
, i
);
1405 if (nr_candidates2
>= 2)
1407 operand_entry_t oe1
, oe2
;
1409 int first
= bitmap_first_set_bit (candidates2
);
1411 /* Build the new addition chain. */
1412 oe1
= (*ops
)[first
];
1413 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1415 fprintf (dump_file
, "Building (");
1416 print_generic_expr (dump_file
, oe1
->op
, 0);
1418 zero_one_operation (&oe1
->op
, c
->oecode
, c
->op
);
1419 EXECUTE_IF_SET_IN_BITMAP (candidates2
, first
+1, i
, sbi0
)
1423 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1425 fprintf (dump_file
, " + ");
1426 print_generic_expr (dump_file
, oe2
->op
, 0);
1428 zero_one_operation (&oe2
->op
, c
->oecode
, c
->op
);
1429 sum
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1430 oe1
->op
, oe2
->op
, opcode
);
1431 oe2
->op
= build_zero_cst (TREE_TYPE (oe2
->op
));
1433 oe1
->op
= gimple_get_lhs (sum
);
1436 /* Apply the multiplication/division. */
1437 prod
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1438 oe1
->op
, c
->op
, c
->oecode
);
1439 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1441 fprintf (dump_file
, ") %s ", c
->oecode
== MULT_EXPR
? "*" : "/");
1442 print_generic_expr (dump_file
, c
->op
, 0);
1443 fprintf (dump_file
, "\n");
1446 /* Record it in the addition chain and disable further
1447 undistribution with this op. */
1448 oe1
->op
= gimple_assign_lhs (prod
);
1449 oe1
->rank
= get_rank (oe1
->op
);
1450 subops
[first
].release ();
1458 for (i
= 0; i
< ops
->length (); ++i
)
1459 subops
[i
].release ();
1462 sbitmap_free (candidates
);
1463 sbitmap_free (candidates2
);
1468 /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison
1469 expression, examine the other OPS to see if any of them are comparisons
1470 of the same values, which we may be able to combine or eliminate.
1471 For example, we can rewrite (a < b) | (a == b) as (a <= b). */
1474 eliminate_redundant_comparison (enum tree_code opcode
,
1475 vec
<operand_entry_t
> *ops
,
1476 unsigned int currindex
,
1477 operand_entry_t curr
)
1480 enum tree_code lcode
, rcode
;
1485 if (opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
1488 /* Check that CURR is a comparison. */
1489 if (TREE_CODE (curr
->op
) != SSA_NAME
)
1491 def1
= SSA_NAME_DEF_STMT (curr
->op
);
1492 if (!is_gimple_assign (def1
))
1494 lcode
= gimple_assign_rhs_code (def1
);
1495 if (TREE_CODE_CLASS (lcode
) != tcc_comparison
)
1497 op1
= gimple_assign_rhs1 (def1
);
1498 op2
= gimple_assign_rhs2 (def1
);
1500 /* Now look for a similar comparison in the remaining OPS. */
1501 for (i
= currindex
+ 1; ops
->iterate (i
, &oe
); i
++)
1505 if (TREE_CODE (oe
->op
) != SSA_NAME
)
1507 def2
= SSA_NAME_DEF_STMT (oe
->op
);
1508 if (!is_gimple_assign (def2
))
1510 rcode
= gimple_assign_rhs_code (def2
);
1511 if (TREE_CODE_CLASS (rcode
) != tcc_comparison
)
1514 /* If we got here, we have a match. See if we can combine the
1516 if (opcode
== BIT_IOR_EXPR
)
1517 t
= maybe_fold_or_comparisons (lcode
, op1
, op2
,
1518 rcode
, gimple_assign_rhs1 (def2
),
1519 gimple_assign_rhs2 (def2
));
1521 t
= maybe_fold_and_comparisons (lcode
, op1
, op2
,
1522 rcode
, gimple_assign_rhs1 (def2
),
1523 gimple_assign_rhs2 (def2
));
1527 /* maybe_fold_and_comparisons and maybe_fold_or_comparisons
1528 always give us a boolean_type_node value back. If the original
1529 BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type,
1530 we need to convert. */
1531 if (!useless_type_conversion_p (TREE_TYPE (curr
->op
), TREE_TYPE (t
)))
1532 t
= fold_convert (TREE_TYPE (curr
->op
), t
);
1534 if (TREE_CODE (t
) != INTEGER_CST
1535 && !operand_equal_p (t
, curr
->op
, 0))
1537 enum tree_code subcode
;
1538 tree newop1
, newop2
;
1539 if (!COMPARISON_CLASS_P (t
))
1541 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1542 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1543 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1544 if (!is_gimple_val (newop1
) || !is_gimple_val (newop2
))
1548 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1550 fprintf (dump_file
, "Equivalence: ");
1551 print_generic_expr (dump_file
, curr
->op
, 0);
1552 fprintf (dump_file
, " %s ", op_symbol_code (opcode
));
1553 print_generic_expr (dump_file
, oe
->op
, 0);
1554 fprintf (dump_file
, " -> ");
1555 print_generic_expr (dump_file
, t
, 0);
1556 fprintf (dump_file
, "\n");
1559 /* Now we can delete oe, as it has been subsumed by the new combined
1561 ops
->ordered_remove (i
);
1562 reassociate_stats
.ops_eliminated
++;
1564 /* If t is the same as curr->op, we're done. Otherwise we must
1565 replace curr->op with t. Special case is if we got a constant
1566 back, in which case we add it to the end instead of in place of
1567 the current entry. */
1568 if (TREE_CODE (t
) == INTEGER_CST
)
1570 ops
->ordered_remove (currindex
);
1571 add_to_ops_vec (ops
, t
);
1573 else if (!operand_equal_p (t
, curr
->op
, 0))
1576 enum tree_code subcode
;
1579 gcc_assert (COMPARISON_CLASS_P (t
));
1580 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1581 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1582 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1583 gcc_checking_assert (is_gimple_val (newop1
)
1584 && is_gimple_val (newop2
));
1585 sum
= build_and_add_sum (TREE_TYPE (t
), newop1
, newop2
, subcode
);
1586 curr
->op
= gimple_get_lhs (sum
);
1594 /* Perform various identities and other optimizations on the list of
1595 operand entries, stored in OPS. The tree code for the binary
1596 operation between all the operands is OPCODE. */
1599 optimize_ops_list (enum tree_code opcode
,
1600 vec
<operand_entry_t
> *ops
)
1602 unsigned int length
= ops
->length ();
1605 operand_entry_t oelast
= NULL
;
1606 bool iterate
= false;
1611 oelast
= ops
->last ();
1613 /* If the last two are constants, pop the constants off, merge them
1614 and try the next two. */
1615 if (oelast
->rank
== 0 && is_gimple_min_invariant (oelast
->op
))
1617 operand_entry_t oelm1
= (*ops
)[length
- 2];
1619 if (oelm1
->rank
== 0
1620 && is_gimple_min_invariant (oelm1
->op
)
1621 && useless_type_conversion_p (TREE_TYPE (oelm1
->op
),
1622 TREE_TYPE (oelast
->op
)))
1624 tree folded
= fold_binary (opcode
, TREE_TYPE (oelm1
->op
),
1625 oelm1
->op
, oelast
->op
);
1627 if (folded
&& is_gimple_min_invariant (folded
))
1629 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1630 fprintf (dump_file
, "Merging constants\n");
1635 add_to_ops_vec (ops
, folded
);
1636 reassociate_stats
.constants_eliminated
++;
1638 optimize_ops_list (opcode
, ops
);
1644 eliminate_using_constants (opcode
, ops
);
1647 for (i
= 0; ops
->iterate (i
, &oe
);)
1651 if (eliminate_not_pairs (opcode
, ops
, i
, oe
))
1653 if (eliminate_duplicate_pair (opcode
, ops
, &done
, i
, oe
, oelast
)
1654 || (!done
&& eliminate_plus_minus_pair (opcode
, ops
, i
, oe
))
1655 || (!done
&& eliminate_redundant_comparison (opcode
, ops
, i
, oe
)))
1667 length
= ops
->length ();
1668 oelast
= ops
->last ();
1671 optimize_ops_list (opcode
, ops
);
1674 /* The following functions are subroutines to optimize_range_tests and allow
1675 it to try to change a logical combination of comparisons into a range
1679 X == 2 || X == 5 || X == 3 || X == 4
1683 (unsigned) (X - 2) <= 3
1685 For more information see comments above fold_test_range in fold-const.c,
1686 this implementation is for GIMPLE. */
1694 bool strict_overflow_p
;
1695 unsigned int idx
, next
;
1698 /* This is similar to make_range in fold-const.c, but on top of
1699 GIMPLE instead of trees. If EXP is non-NULL, it should be
1700 an SSA_NAME and STMT argument is ignored, otherwise STMT
1701 argument should be a GIMPLE_COND. */
1704 init_range_entry (struct range_entry
*r
, tree exp
, gimple stmt
)
1708 bool is_bool
, strict_overflow_p
;
1712 r
->strict_overflow_p
= false;
1714 r
->high
= NULL_TREE
;
1715 if (exp
!= NULL_TREE
1716 && (TREE_CODE (exp
) != SSA_NAME
|| !INTEGRAL_TYPE_P (TREE_TYPE (exp
))))
1719 /* Start with simply saying "EXP != 0" and then look at the code of EXP
1720 and see if we can refine the range. Some of the cases below may not
1721 happen, but it doesn't seem worth worrying about this. We "continue"
1722 the outer loop when we've changed something; otherwise we "break"
1723 the switch, which will "break" the while. */
1724 low
= exp
? build_int_cst (TREE_TYPE (exp
), 0) : boolean_false_node
;
1727 strict_overflow_p
= false;
1729 if (exp
== NULL_TREE
)
1731 else if (TYPE_PRECISION (TREE_TYPE (exp
)) == 1)
1733 if (TYPE_UNSIGNED (TREE_TYPE (exp
)))
1738 else if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
)
1743 enum tree_code code
;
1744 tree arg0
, arg1
, exp_type
;
1748 if (exp
!= NULL_TREE
)
1750 if (TREE_CODE (exp
) != SSA_NAME
)
1753 stmt
= SSA_NAME_DEF_STMT (exp
);
1754 if (!is_gimple_assign (stmt
))
1757 code
= gimple_assign_rhs_code (stmt
);
1758 arg0
= gimple_assign_rhs1 (stmt
);
1759 arg1
= gimple_assign_rhs2 (stmt
);
1760 exp_type
= TREE_TYPE (exp
);
1764 code
= gimple_cond_code (stmt
);
1765 arg0
= gimple_cond_lhs (stmt
);
1766 arg1
= gimple_cond_rhs (stmt
);
1767 exp_type
= boolean_type_node
;
1770 if (TREE_CODE (arg0
) != SSA_NAME
)
1772 loc
= gimple_location (stmt
);
1776 if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
1777 /* Ensure the range is either +[-,0], +[0,0],
1778 -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or
1779 -[1,1]. If it is e.g. +[-,-] or -[-,-]
1780 or similar expression of unconditional true or
1781 false, it should not be negated. */
1782 && ((high
&& integer_zerop (high
))
1783 || (low
&& integer_onep (low
))))
1796 if (TYPE_PRECISION (TREE_TYPE (arg0
)) == 1)
1798 if (TYPE_UNSIGNED (TREE_TYPE (arg0
)))
1803 else if (TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
)
1818 nexp
= make_range_step (loc
, code
, arg0
, arg1
, exp_type
,
1820 &strict_overflow_p
);
1821 if (nexp
!= NULL_TREE
)
1824 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
1837 r
->strict_overflow_p
= strict_overflow_p
;
1841 /* Comparison function for qsort. Sort entries
1842 without SSA_NAME exp first, then with SSA_NAMEs sorted
1843 by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs
1844 by increasing ->low and if ->low is the same, by increasing
1845 ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE
1849 range_entry_cmp (const void *a
, const void *b
)
1851 const struct range_entry
*p
= (const struct range_entry
*) a
;
1852 const struct range_entry
*q
= (const struct range_entry
*) b
;
1854 if (p
->exp
!= NULL_TREE
&& TREE_CODE (p
->exp
) == SSA_NAME
)
1856 if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
1858 /* Group range_entries for the same SSA_NAME together. */
1859 if (SSA_NAME_VERSION (p
->exp
) < SSA_NAME_VERSION (q
->exp
))
1861 else if (SSA_NAME_VERSION (p
->exp
) > SSA_NAME_VERSION (q
->exp
))
1863 /* If ->low is different, NULL low goes first, then by
1865 if (p
->low
!= NULL_TREE
)
1867 if (q
->low
!= NULL_TREE
)
1869 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
1871 if (tem
&& integer_onep (tem
))
1873 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
1875 if (tem
&& integer_onep (tem
))
1881 else if (q
->low
!= NULL_TREE
)
1883 /* If ->high is different, NULL high goes last, before that by
1885 if (p
->high
!= NULL_TREE
)
1887 if (q
->high
!= NULL_TREE
)
1889 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
1891 if (tem
&& integer_onep (tem
))
1893 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
1895 if (tem
&& integer_onep (tem
))
1901 else if (q
->high
!= NULL_TREE
)
1903 /* If both ranges are the same, sort below by ascending idx. */
1908 else if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
1911 if (p
->idx
< q
->idx
)
1915 gcc_checking_assert (p
->idx
> q
->idx
);
1920 /* Helper routine of optimize_range_test.
1921 [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for
1922 RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges,
1923 OPCODE and OPS are arguments of optimize_range_tests. Return
1924 true if the range merge has been successful.
1925 If OPCODE is ERROR_MARK, this is called from within
1926 maybe_optimize_range_tests and is performing inter-bb range optimization.
1927 Changes should be then performed right away, and whether an op is
1928 BIT_AND_EXPR or BIT_IOR_EXPR is found in oe->rank. */
1931 update_range_test (struct range_entry
*range
, struct range_entry
*otherrange
,
1932 unsigned int count
, enum tree_code opcode
,
1933 vec
<operand_entry_t
> *ops
, tree exp
, bool in_p
,
1934 tree low
, tree high
, bool strict_overflow_p
)
1936 operand_entry_t oe
= (*ops
)[range
->idx
];
1938 gimple stmt
= op
? SSA_NAME_DEF_STMT (op
) : last_stmt (BASIC_BLOCK (oe
->id
));
1939 location_t loc
= gimple_location (stmt
);
1940 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
1941 tree tem
= build_range_check (loc
, optype
, exp
, in_p
, low
, high
);
1942 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
1943 gimple_stmt_iterator gsi
;
1945 if (tem
== NULL_TREE
)
1948 if (strict_overflow_p
&& issue_strict_overflow_warning (wc
))
1949 warning_at (loc
, OPT_Wstrict_overflow
,
1950 "assuming signed overflow does not occur "
1951 "when simplifying range test");
1953 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1955 struct range_entry
*r
;
1956 fprintf (dump_file
, "Optimizing range tests ");
1957 print_generic_expr (dump_file
, range
->exp
, 0);
1958 fprintf (dump_file
, " %c[", range
->in_p
? '+' : '-');
1959 print_generic_expr (dump_file
, range
->low
, 0);
1960 fprintf (dump_file
, ", ");
1961 print_generic_expr (dump_file
, range
->high
, 0);
1962 fprintf (dump_file
, "]");
1963 for (r
= otherrange
; r
< otherrange
+ count
; r
++)
1965 fprintf (dump_file
, " and %c[", r
->in_p
? '+' : '-');
1966 print_generic_expr (dump_file
, r
->low
, 0);
1967 fprintf (dump_file
, ", ");
1968 print_generic_expr (dump_file
, r
->high
, 0);
1969 fprintf (dump_file
, "]");
1971 fprintf (dump_file
, "\n into ");
1972 print_generic_expr (dump_file
, tem
, 0);
1973 fprintf (dump_file
, "\n");
1976 if (opcode
== BIT_IOR_EXPR
1977 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
1978 tem
= invert_truthvalue_loc (loc
, tem
);
1980 tem
= fold_convert_loc (loc
, optype
, tem
);
1981 gsi
= gsi_for_stmt (stmt
);
1982 /* In rare cases range->exp can be equal to lhs of stmt.
1983 In that case we have to insert after the stmt rather then before
1985 if (op
== range
->exp
)
1986 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, false,
1989 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, true,
1992 /* If doing inter-bb range test optimization, update the
1993 stmts immediately. Start with changing the first range test
1994 immediate use to the new value (TEM), or, if the first range
1995 test is a GIMPLE_COND stmt, change that condition. */
1996 if (opcode
== ERROR_MARK
)
2000 imm_use_iterator iter
;
2001 use_operand_p use_p
;
2004 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, op
)
2006 if (is_gimple_debug (use_stmt
))
2008 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2009 SET_USE (use_p
, tem
);
2010 update_stmt (use_stmt
);
2015 gimple_cond_set_code (stmt
, NE_EXPR
);
2016 gimple_cond_set_lhs (stmt
, tem
);
2017 gimple_cond_set_rhs (stmt
, boolean_false_node
);
2026 range
->strict_overflow_p
= false;
2028 for (range
= otherrange
; range
< otherrange
+ count
; range
++)
2030 oe
= (*ops
)[range
->idx
];
2031 /* Now change all the other range test immediate uses, so that
2032 those tests will be optimized away. */
2033 if (opcode
== ERROR_MARK
)
2037 imm_use_iterator iter
;
2038 use_operand_p use_p
;
2041 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, oe
->op
)
2043 if (is_gimple_debug (use_stmt
))
2045 /* If imm use of _8 is a statement like _7 = _8 | _9;,
2046 adjust it into _7 = _9;. */
2047 if (is_gimple_assign (use_stmt
)
2048 && gimple_assign_rhs_code (use_stmt
) == oe
->rank
)
2050 tree expr
= NULL_TREE
;
2051 if (oe
->op
== gimple_assign_rhs1 (use_stmt
))
2052 expr
= gimple_assign_rhs2 (use_stmt
);
2053 else if (oe
->op
== gimple_assign_rhs2 (use_stmt
))
2054 expr
= gimple_assign_rhs1 (use_stmt
);
2057 && TREE_CODE (expr
) == SSA_NAME
)
2059 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
2060 gimple_assign_set_rhs_with_ops (&gsi2
, SSA_NAME
,
2062 update_stmt (use_stmt
);
2066 /* If imm use of _8 is a statement like _7 = (int) _8;,
2067 adjust it into _7 = 0; or _7 = 1;. */
2068 if (gimple_assign_cast_p (use_stmt
)
2069 && oe
->op
== gimple_assign_rhs1 (use_stmt
))
2071 tree lhs
= gimple_assign_lhs (use_stmt
);
2072 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
2074 gimple_stmt_iterator gsi2
2075 = gsi_for_stmt (use_stmt
);
2076 tree expr
= build_int_cst (TREE_TYPE (lhs
),
2077 oe
->rank
== BIT_IOR_EXPR
2079 gimple_assign_set_rhs_with_ops (&gsi2
,
2082 update_stmt (use_stmt
);
2086 /* Otherwise replace the use with 0 or 1. */
2087 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2089 build_int_cst (TREE_TYPE (oe
->op
),
2090 oe
->rank
== BIT_IOR_EXPR
2092 update_stmt (use_stmt
);
2097 /* If range test was a GIMPLE_COND, simply change it
2098 into an always false or always true condition. */
2099 stmt
= last_stmt (BASIC_BLOCK (oe
->id
));
2100 if (oe
->rank
== BIT_IOR_EXPR
)
2101 gimple_cond_make_false (stmt
);
2103 gimple_cond_make_true (stmt
);
2107 oe
->op
= error_mark_node
;
2108 range
->exp
= NULL_TREE
;
2113 /* Optimize range tests, similarly how fold_range_test optimizes
2114 it on trees. The tree code for the binary
2115 operation between all the operands is OPCODE.
2116 If OPCODE is ERROR_MARK, optimize_range_tests is called from within
2117 maybe_optimize_range_tests for inter-bb range optimization.
2118 In that case if oe->op is NULL, oe->id is bb->index whose
2119 GIMPLE_COND is && or ||ed into the test, and oe->rank says
2120 the actual opcode. */
2123 optimize_range_tests (enum tree_code opcode
,
2124 vec
<operand_entry_t
> *ops
)
2126 unsigned int length
= ops
->length (), i
, j
, first
;
2128 struct range_entry
*ranges
;
2129 bool any_changes
= false;
2134 ranges
= XNEWVEC (struct range_entry
, length
);
2135 for (i
= 0; i
< length
; i
++)
2139 init_range_entry (ranges
+ i
, oe
->op
,
2140 oe
->op
? NULL
: last_stmt (BASIC_BLOCK (oe
->id
)));
2141 /* For | invert it now, we will invert it again before emitting
2142 the optimized expression. */
2143 if (opcode
== BIT_IOR_EXPR
2144 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
2145 ranges
[i
].in_p
= !ranges
[i
].in_p
;
2148 qsort (ranges
, length
, sizeof (*ranges
), range_entry_cmp
);
2149 for (i
= 0; i
< length
; i
++)
2150 if (ranges
[i
].exp
!= NULL_TREE
&& TREE_CODE (ranges
[i
].exp
) == SSA_NAME
)
2153 /* Try to merge ranges. */
2154 for (first
= i
; i
< length
; i
++)
2156 tree low
= ranges
[i
].low
;
2157 tree high
= ranges
[i
].high
;
2158 int in_p
= ranges
[i
].in_p
;
2159 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
2160 int update_fail_count
= 0;
2162 for (j
= i
+ 1; j
< length
; j
++)
2164 if (ranges
[i
].exp
!= ranges
[j
].exp
)
2166 if (!merge_ranges (&in_p
, &low
, &high
, in_p
, low
, high
,
2167 ranges
[j
].in_p
, ranges
[j
].low
, ranges
[j
].high
))
2169 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
2175 if (update_range_test (ranges
+ i
, ranges
+ i
+ 1, j
- i
- 1, opcode
,
2176 ops
, ranges
[i
].exp
, in_p
, low
, high
,
2182 /* Avoid quadratic complexity if all merge_ranges calls would succeed,
2183 while update_range_test would fail. */
2184 else if (update_fail_count
== 64)
2187 ++update_fail_count
;
2190 /* Optimize X == CST1 || X == CST2
2191 if popcount (CST1 ^ CST2) == 1 into
2192 (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)).
2193 Similarly for ranges. E.g.
2194 X != 2 && X != 3 && X != 10 && X != 11
2195 will be transformed by the above loop into
2196 (X - 2U) <= 1U && (X - 10U) <= 1U
2197 and this loop can transform that into
2198 ((X & ~8) - 2U) <= 1U. */
2199 for (i
= first
; i
< length
; i
++)
2201 tree lowi
, highi
, lowj
, highj
, type
, lowxor
, highxor
, tem
, exp
;
2203 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2205 type
= TREE_TYPE (ranges
[i
].exp
);
2206 if (!INTEGRAL_TYPE_P (type
))
2208 lowi
= ranges
[i
].low
;
2209 if (lowi
== NULL_TREE
)
2210 lowi
= TYPE_MIN_VALUE (type
);
2211 highi
= ranges
[i
].high
;
2212 if (highi
== NULL_TREE
)
2214 for (j
= i
+ 1; j
< length
&& j
< i
+ 64; j
++)
2216 if (ranges
[j
].exp
== NULL_TREE
)
2218 if (ranges
[i
].exp
!= ranges
[j
].exp
)
2222 lowj
= ranges
[j
].low
;
2223 if (lowj
== NULL_TREE
)
2225 highj
= ranges
[j
].high
;
2226 if (highj
== NULL_TREE
)
2227 highj
= TYPE_MAX_VALUE (type
);
2228 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2230 if (tem
== NULL_TREE
|| !integer_onep (tem
))
2232 lowxor
= fold_binary (BIT_XOR_EXPR
, type
, lowi
, lowj
);
2233 if (lowxor
== NULL_TREE
|| TREE_CODE (lowxor
) != INTEGER_CST
)
2235 gcc_checking_assert (!integer_zerop (lowxor
));
2236 tem
= fold_binary (MINUS_EXPR
, type
, lowxor
,
2237 build_int_cst (type
, 1));
2238 if (tem
== NULL_TREE
)
2240 tem
= fold_binary (BIT_AND_EXPR
, type
, lowxor
, tem
);
2241 if (tem
== NULL_TREE
|| !integer_zerop (tem
))
2243 highxor
= fold_binary (BIT_XOR_EXPR
, type
, highi
, highj
);
2244 if (!tree_int_cst_equal (lowxor
, highxor
))
2246 tem
= fold_build1 (BIT_NOT_EXPR
, type
, lowxor
);
2247 exp
= fold_build2 (BIT_AND_EXPR
, type
, ranges
[i
].exp
, tem
);
2248 lowj
= fold_build2 (BIT_AND_EXPR
, type
, lowi
, tem
);
2249 highj
= fold_build2 (BIT_AND_EXPR
, type
, highi
, tem
);
2250 if (update_range_test (ranges
+ i
, ranges
+ j
, 1, opcode
, ops
, exp
,
2251 ranges
[i
].in_p
, lowj
, highj
,
2252 ranges
[i
].strict_overflow_p
2253 || ranges
[j
].strict_overflow_p
))
2261 if (any_changes
&& opcode
!= ERROR_MARK
)
2264 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
2266 if (oe
->op
== error_mark_node
)
2275 XDELETEVEC (ranges
);
2278 /* Return true if STMT is a cast like:
2284 # _345 = PHI <_123(N), 1(...), 1(...)>
2285 where _234 has bool type, _123 has single use and
2286 bb N has a single successor M. This is commonly used in
2287 the last block of a range test. */
2290 final_range_test_p (gimple stmt
)
2292 basic_block bb
, rhs_bb
;
2295 use_operand_p use_p
;
2298 if (!gimple_assign_cast_p (stmt
))
2300 bb
= gimple_bb (stmt
);
2301 if (!single_succ_p (bb
))
2303 e
= single_succ_edge (bb
);
2304 if (e
->flags
& EDGE_COMPLEX
)
2307 lhs
= gimple_assign_lhs (stmt
);
2308 rhs
= gimple_assign_rhs1 (stmt
);
2309 if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2310 || TREE_CODE (rhs
) != SSA_NAME
2311 || TREE_CODE (TREE_TYPE (rhs
)) != BOOLEAN_TYPE
)
2314 /* Test whether lhs is consumed only by a PHI in the only successor bb. */
2315 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
2318 if (gimple_code (use_stmt
) != GIMPLE_PHI
2319 || gimple_bb (use_stmt
) != e
->dest
)
2322 /* And that the rhs is defined in the same loop. */
2323 rhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (rhs
));
2325 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), rhs_bb
))
2331 /* Return true if BB is suitable basic block for inter-bb range test
2332 optimization. If BACKWARD is true, BB should be the only predecessor
2333 of TEST_BB, and *OTHER_BB is either NULL and filled by the routine,
2334 or compared with to find a common basic block to which all conditions
2335 branch to if true resp. false. If BACKWARD is false, TEST_BB should
2336 be the only predecessor of BB. */
2339 suitable_cond_bb (basic_block bb
, basic_block test_bb
, basic_block
*other_bb
,
2342 edge_iterator ei
, ei2
;
2345 gimple_stmt_iterator gsi
;
2346 bool other_edge_seen
= false;
2351 /* Check last stmt first. */
2352 stmt
= last_stmt (bb
);
2354 || (gimple_code (stmt
) != GIMPLE_COND
2355 && (backward
|| !final_range_test_p (stmt
)))
2356 || gimple_visited_p (stmt
)
2357 || stmt_could_throw_p (stmt
)
2360 is_cond
= gimple_code (stmt
) == GIMPLE_COND
;
2363 /* If last stmt is GIMPLE_COND, verify that one of the succ edges
2364 goes to the next bb (if BACKWARD, it is TEST_BB), and the other
2365 to *OTHER_BB (if not set yet, try to find it out). */
2366 if (EDGE_COUNT (bb
->succs
) != 2)
2368 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2370 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2372 if (e
->dest
== test_bb
)
2381 if (*other_bb
== NULL
)
2383 FOR_EACH_EDGE (e2
, ei2
, test_bb
->succs
)
2384 if (!(e2
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2386 else if (e
->dest
== e2
->dest
)
2387 *other_bb
= e
->dest
;
2388 if (*other_bb
== NULL
)
2391 if (e
->dest
== *other_bb
)
2392 other_edge_seen
= true;
2396 if (*other_bb
== NULL
|| !other_edge_seen
)
2399 else if (single_succ (bb
) != *other_bb
)
2402 /* Now check all PHIs of *OTHER_BB. */
2403 e
= find_edge (bb
, *other_bb
);
2404 e2
= find_edge (test_bb
, *other_bb
);
2405 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2407 gimple phi
= gsi_stmt (gsi
);
2408 /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments
2409 corresponding to BB and TEST_BB predecessor must be the same. */
2410 if (!operand_equal_p (gimple_phi_arg_def (phi
, e
->dest_idx
),
2411 gimple_phi_arg_def (phi
, e2
->dest_idx
), 0))
2413 /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND,
2414 one of the PHIs should have the lhs of the last stmt in
2415 that block as PHI arg and that PHI should have 0 or 1
2416 corresponding to it in all other range test basic blocks
2420 if (gimple_phi_arg_def (phi
, e
->dest_idx
)
2421 == gimple_assign_lhs (stmt
)
2422 && (integer_zerop (gimple_phi_arg_def (phi
, e2
->dest_idx
))
2423 || integer_onep (gimple_phi_arg_def (phi
,
2429 gimple test_last
= last_stmt (test_bb
);
2430 if (gimple_code (test_last
) != GIMPLE_COND
2431 && gimple_phi_arg_def (phi
, e2
->dest_idx
)
2432 == gimple_assign_lhs (test_last
)
2433 && (integer_zerop (gimple_phi_arg_def (phi
, e
->dest_idx
))
2434 || integer_onep (gimple_phi_arg_def (phi
, e
->dest_idx
))))
2444 /* Return true if BB doesn't have side-effects that would disallow
2445 range test optimization, all SSA_NAMEs set in the bb are consumed
2446 in the bb and there are no PHIs. */
2449 no_side_effect_bb (basic_block bb
)
2451 gimple_stmt_iterator gsi
;
2454 if (!gimple_seq_empty_p (phi_nodes (bb
)))
2456 last
= last_stmt (bb
);
2457 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2459 gimple stmt
= gsi_stmt (gsi
);
2461 imm_use_iterator imm_iter
;
2462 use_operand_p use_p
;
2464 if (is_gimple_debug (stmt
))
2466 if (gimple_has_side_effects (stmt
))
2470 if (!is_gimple_assign (stmt
))
2472 lhs
= gimple_assign_lhs (stmt
);
2473 if (TREE_CODE (lhs
) != SSA_NAME
)
2475 if (gimple_assign_rhs_could_trap_p (stmt
))
2477 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2479 gimple use_stmt
= USE_STMT (use_p
);
2480 if (is_gimple_debug (use_stmt
))
2482 if (gimple_bb (use_stmt
) != bb
)
2489 /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable,
2490 return true and fill in *OPS recursively. */
2493 get_ops (tree var
, enum tree_code code
, vec
<operand_entry_t
> *ops
,
2496 gimple stmt
= SSA_NAME_DEF_STMT (var
);
2500 if (!is_reassociable_op (stmt
, code
, loop
))
2503 rhs
[0] = gimple_assign_rhs1 (stmt
);
2504 rhs
[1] = gimple_assign_rhs2 (stmt
);
2505 gimple_set_visited (stmt
, true);
2506 for (i
= 0; i
< 2; i
++)
2507 if (TREE_CODE (rhs
[i
]) == SSA_NAME
2508 && !get_ops (rhs
[i
], code
, ops
, loop
)
2509 && has_single_use (rhs
[i
]))
2511 operand_entry_t oe
= (operand_entry_t
) pool_alloc (operand_entry_pool
);
2517 ops
->safe_push (oe
);
2522 /* Inter-bb range test optimization. */
2525 maybe_optimize_range_tests (gimple stmt
)
2527 basic_block first_bb
= gimple_bb (stmt
);
2528 basic_block last_bb
= first_bb
;
2529 basic_block other_bb
= NULL
;
2533 vec
<operand_entry_t
> ops
= vNULL
;
2535 /* Consider only basic blocks that end with GIMPLE_COND or
2536 a cast statement satisfying final_range_test_p. All
2537 but the last bb in the first_bb .. last_bb range
2538 should end with GIMPLE_COND. */
2539 if (gimple_code (stmt
) == GIMPLE_COND
)
2541 if (EDGE_COUNT (first_bb
->succs
) != 2)
2544 else if (final_range_test_p (stmt
))
2545 other_bb
= single_succ (first_bb
);
2549 if (stmt_could_throw_p (stmt
))
2552 /* As relative ordering of post-dominator sons isn't fixed,
2553 maybe_optimize_range_tests can be called first on any
2554 bb in the range we want to optimize. So, start searching
2555 backwards, if first_bb can be set to a predecessor. */
2556 while (single_pred_p (first_bb
))
2558 basic_block pred_bb
= single_pred (first_bb
);
2559 if (!suitable_cond_bb (pred_bb
, first_bb
, &other_bb
, true))
2561 if (!no_side_effect_bb (first_bb
))
2565 /* If first_bb is last_bb, other_bb hasn't been computed yet.
2566 Before starting forward search in last_bb successors, find
2567 out the other_bb. */
2568 if (first_bb
== last_bb
)
2571 /* As non-GIMPLE_COND last stmt always terminates the range,
2572 if forward search didn't discover anything, just give up. */
2573 if (gimple_code (stmt
) != GIMPLE_COND
)
2575 /* Look at both successors. Either it ends with a GIMPLE_COND
2576 and satisfies suitable_cond_bb, or ends with a cast and
2577 other_bb is that cast's successor. */
2578 FOR_EACH_EDGE (e
, ei
, first_bb
->succs
)
2579 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
))
2580 || e
->dest
== first_bb
)
2582 else if (single_pred_p (e
->dest
))
2584 stmt
= last_stmt (e
->dest
);
2586 && gimple_code (stmt
) == GIMPLE_COND
2587 && EDGE_COUNT (e
->dest
->succs
) == 2)
2589 if (suitable_cond_bb (first_bb
, e
->dest
, &other_bb
, true))
2595 && final_range_test_p (stmt
)
2596 && find_edge (first_bb
, single_succ (e
->dest
)))
2598 other_bb
= single_succ (e
->dest
);
2599 if (other_bb
== first_bb
)
2603 if (other_bb
== NULL
)
2606 /* Now do the forward search, moving last_bb to successor bbs
2607 that aren't other_bb. */
2608 while (EDGE_COUNT (last_bb
->succs
) == 2)
2610 FOR_EACH_EDGE (e
, ei
, last_bb
->succs
)
2611 if (e
->dest
!= other_bb
)
2615 if (!single_pred_p (e
->dest
))
2617 if (!suitable_cond_bb (e
->dest
, last_bb
, &other_bb
, false))
2619 if (!no_side_effect_bb (e
->dest
))
2623 if (first_bb
== last_bb
)
2625 /* Here basic blocks first_bb through last_bb's predecessor
2626 end with GIMPLE_COND, all of them have one of the edges to
2627 other_bb and another to another block in the range,
2628 all blocks except first_bb don't have side-effects and
2629 last_bb ends with either GIMPLE_COND, or cast satisfying
2630 final_range_test_p. */
2631 for (bb
= last_bb
; ; bb
= single_pred (bb
))
2633 enum tree_code code
;
2636 e
= find_edge (bb
, other_bb
);
2637 stmt
= last_stmt (bb
);
2638 gimple_set_visited (stmt
, true);
2639 if (gimple_code (stmt
) != GIMPLE_COND
)
2641 use_operand_p use_p
;
2646 lhs
= gimple_assign_lhs (stmt
);
2647 rhs
= gimple_assign_rhs1 (stmt
);
2648 gcc_assert (bb
== last_bb
);
2655 # _345 = PHI <_123(N), 1(...), 1(...)>
2657 or 0 instead of 1. If it is 0, the _234
2658 range test is anded together with all the
2659 other range tests, if it is 1, it is ored with
2661 single_imm_use (lhs
, &use_p
, &phi
);
2662 gcc_assert (gimple_code (phi
) == GIMPLE_PHI
);
2663 e2
= find_edge (first_bb
, other_bb
);
2665 gcc_assert (gimple_phi_arg_def (phi
, e
->dest_idx
) == lhs
);
2666 if (integer_zerop (gimple_phi_arg_def (phi
, d
)))
2667 code
= BIT_AND_EXPR
;
2670 gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi
, d
)));
2671 code
= BIT_IOR_EXPR
;
2674 /* If _234 SSA_NAME_DEF_STMT is
2676 (or &, corresponding to 1/0 in the phi arguments,
2677 push into ops the individual range test arguments
2678 of the bitwise or resp. and, recursively. */
2679 if (!get_ops (rhs
, code
, &ops
,
2680 loop_containing_stmt (stmt
))
2681 && has_single_use (rhs
))
2683 /* Otherwise, push the _234 range test itself. */
2685 = (operand_entry_t
) pool_alloc (operand_entry_pool
);
2695 /* Otherwise stmt is GIMPLE_COND. */
2696 code
= gimple_cond_code (stmt
);
2697 lhs
= gimple_cond_lhs (stmt
);
2698 rhs
= gimple_cond_rhs (stmt
);
2699 if (TREE_CODE (lhs
) == SSA_NAME
2700 && INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2701 && ((code
!= EQ_EXPR
&& code
!= NE_EXPR
)
2702 || rhs
!= boolean_false_node
2703 /* Either push into ops the individual bitwise
2704 or resp. and operands, depending on which
2705 edge is other_bb. */
2706 || !get_ops (lhs
, (((e
->flags
& EDGE_TRUE_VALUE
) == 0)
2707 ^ (code
== EQ_EXPR
))
2708 ? BIT_AND_EXPR
: BIT_IOR_EXPR
, &ops
,
2709 loop_containing_stmt (stmt
))))
2711 /* Or push the GIMPLE_COND stmt itself. */
2713 = (operand_entry_t
) pool_alloc (operand_entry_pool
);
2716 oe
->rank
= (e
->flags
& EDGE_TRUE_VALUE
)
2717 ? BIT_IOR_EXPR
: BIT_AND_EXPR
;
2718 /* oe->op = NULL signs that there is no SSA_NAME
2719 for the range test, and oe->id instead is the
2720 basic block number, at which's end the GIMPLE_COND
2729 if (ops
.length () > 1)
2730 optimize_range_tests (ERROR_MARK
, &ops
);
2734 /* Return true if OPERAND is defined by a PHI node which uses the LHS
2735 of STMT in it's operands. This is also known as a "destructive
2736 update" operation. */
2739 is_phi_for_stmt (gimple stmt
, tree operand
)
2743 use_operand_p arg_p
;
2746 if (TREE_CODE (operand
) != SSA_NAME
)
2749 lhs
= gimple_assign_lhs (stmt
);
2751 def_stmt
= SSA_NAME_DEF_STMT (operand
);
2752 if (gimple_code (def_stmt
) != GIMPLE_PHI
)
2755 FOR_EACH_PHI_ARG (arg_p
, def_stmt
, i
, SSA_OP_USE
)
2756 if (lhs
== USE_FROM_PTR (arg_p
))
2761 /* Remove def stmt of VAR if VAR has zero uses and recurse
2762 on rhs1 operand if so. */
2765 remove_visited_stmt_chain (tree var
)
2768 gimple_stmt_iterator gsi
;
2772 if (TREE_CODE (var
) != SSA_NAME
|| !has_zero_uses (var
))
2774 stmt
= SSA_NAME_DEF_STMT (var
);
2775 if (is_gimple_assign (stmt
) && gimple_visited_p (stmt
))
2777 var
= gimple_assign_rhs1 (stmt
);
2778 gsi
= gsi_for_stmt (stmt
);
2779 gsi_remove (&gsi
, true);
2780 release_defs (stmt
);
2787 /* This function checks three consequtive operands in
2788 passed operands vector OPS starting from OPINDEX and
2789 swaps two operands if it is profitable for binary operation
2790 consuming OPINDEX + 1 abnd OPINDEX + 2 operands.
2792 We pair ops with the same rank if possible.
2794 The alternative we try is to see if STMT is a destructive
2795 update style statement, which is like:
2798 In that case, we want to use the destructive update form to
2799 expose the possible vectorizer sum reduction opportunity.
2800 In that case, the third operand will be the phi node. This
2801 check is not performed if STMT is null.
2803 We could, of course, try to be better as noted above, and do a
2804 lot of work to try to find these opportunities in >3 operand
2805 cases, but it is unlikely to be worth it. */
2808 swap_ops_for_binary_stmt (vec
<operand_entry_t
> ops
,
2809 unsigned int opindex
, gimple stmt
)
2811 operand_entry_t oe1
, oe2
, oe3
;
2814 oe2
= ops
[opindex
+ 1];
2815 oe3
= ops
[opindex
+ 2];
2817 if ((oe1
->rank
== oe2
->rank
2818 && oe2
->rank
!= oe3
->rank
)
2819 || (stmt
&& is_phi_for_stmt (stmt
, oe3
->op
)
2820 && !is_phi_for_stmt (stmt
, oe1
->op
)
2821 && !is_phi_for_stmt (stmt
, oe2
->op
)))
2823 struct operand_entry temp
= *oe3
;
2825 oe3
->rank
= oe1
->rank
;
2827 oe1
->rank
= temp
.rank
;
2829 else if ((oe1
->rank
== oe3
->rank
2830 && oe2
->rank
!= oe3
->rank
)
2831 || (stmt
&& is_phi_for_stmt (stmt
, oe2
->op
)
2832 && !is_phi_for_stmt (stmt
, oe1
->op
)
2833 && !is_phi_for_stmt (stmt
, oe3
->op
)))
2835 struct operand_entry temp
= *oe2
;
2837 oe2
->rank
= oe1
->rank
;
2839 oe1
->rank
= temp
.rank
;
2843 /* Recursively rewrite our linearized statements so that the operators
2844 match those in OPS[OPINDEX], putting the computation in rank
2848 rewrite_expr_tree (gimple stmt
, unsigned int opindex
,
2849 vec
<operand_entry_t
> ops
, bool moved
)
2851 tree rhs1
= gimple_assign_rhs1 (stmt
);
2852 tree rhs2
= gimple_assign_rhs2 (stmt
);
2855 /* If we have three operands left, then we want to make sure the ones
2856 that get the double binary op are chosen wisely. */
2857 if (opindex
+ 3 == ops
.length ())
2858 swap_ops_for_binary_stmt (ops
, opindex
, stmt
);
2860 /* The final recursion case for this function is that you have
2861 exactly two operations left.
2862 If we had one exactly one op in the entire list to start with, we
2863 would have never called this function, and the tail recursion
2864 rewrites them one at a time. */
2865 if (opindex
+ 2 == ops
.length ())
2867 operand_entry_t oe1
, oe2
;
2870 oe2
= ops
[opindex
+ 1];
2872 if (rhs1
!= oe1
->op
|| rhs2
!= oe2
->op
)
2874 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2876 fprintf (dump_file
, "Transforming ");
2877 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2880 gimple_assign_set_rhs1 (stmt
, oe1
->op
);
2881 gimple_assign_set_rhs2 (stmt
, oe2
->op
);
2883 if (rhs1
!= oe1
->op
&& rhs1
!= oe2
->op
)
2884 remove_visited_stmt_chain (rhs1
);
2886 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2888 fprintf (dump_file
, " into ");
2889 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2895 /* If we hit here, we should have 3 or more ops left. */
2896 gcc_assert (opindex
+ 2 < ops
.length ());
2898 /* Rewrite the next operator. */
2905 gimple_stmt_iterator gsinow
, gsirhs1
;
2906 gimple stmt1
= stmt
, stmt2
;
2909 gsinow
= gsi_for_stmt (stmt
);
2910 count
= ops
.length () - opindex
- 2;
2911 while (count
-- != 0)
2913 stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt1
));
2914 gsirhs1
= gsi_for_stmt (stmt2
);
2915 gsi_move_before (&gsirhs1
, &gsinow
);
2922 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2924 fprintf (dump_file
, "Transforming ");
2925 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2928 gimple_assign_set_rhs2 (stmt
, oe
->op
);
2931 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2933 fprintf (dump_file
, " into ");
2934 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2937 /* Recurse on the LHS of the binary operator, which is guaranteed to
2938 be the non-leaf side. */
2939 rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1
), opindex
+ 1, ops
, moved
);
2942 /* Find out how many cycles we need to compute statements chain.
2943 OPS_NUM holds number os statements in a chain. CPU_WIDTH is a
2944 maximum number of independent statements we may execute per cycle. */
2947 get_required_cycles (int ops_num
, int cpu_width
)
2953 /* While we have more than 2 * cpu_width operands
2954 we may reduce number of operands by cpu_width
2956 res
= ops_num
/ (2 * cpu_width
);
2958 /* Remained operands count may be reduced twice per cycle
2959 until we have only one operand. */
2960 rest
= (unsigned)(ops_num
- res
* cpu_width
);
2961 elog
= exact_log2 (rest
);
2965 res
+= floor_log2 (rest
) + 1;
2970 /* Returns an optimal number of registers to use for computation of
2971 given statements. */
2974 get_reassociation_width (int ops_num
, enum tree_code opc
,
2975 enum machine_mode mode
)
2977 int param_width
= PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH
);
2982 if (param_width
> 0)
2983 width
= param_width
;
2985 width
= targetm
.sched
.reassociation_width (opc
, mode
);
2990 /* Get the minimal time required for sequence computation. */
2991 cycles_best
= get_required_cycles (ops_num
, width
);
2993 /* Check if we may use less width and still compute sequence for
2994 the same time. It will allow us to reduce registers usage.
2995 get_required_cycles is monotonically increasing with lower width
2996 so we can perform a binary search for the minimal width that still
2997 results in the optimal cycle count. */
2999 while (width
> width_min
)
3001 int width_mid
= (width
+ width_min
) / 2;
3003 if (get_required_cycles (ops_num
, width_mid
) == cycles_best
)
3005 else if (width_min
< width_mid
)
3006 width_min
= width_mid
;
3014 /* Recursively rewrite our linearized statements so that the operators
3015 match those in OPS[OPINDEX], putting the computation in rank
3016 order and trying to allow operations to be executed in
3020 rewrite_expr_tree_parallel (gimple stmt
, int width
,
3021 vec
<operand_entry_t
> ops
)
3023 enum tree_code opcode
= gimple_assign_rhs_code (stmt
);
3024 int op_num
= ops
.length ();
3025 int stmt_num
= op_num
- 1;
3026 gimple
*stmts
= XALLOCAVEC (gimple
, stmt_num
);
3027 int op_index
= op_num
- 1;
3029 int ready_stmts_end
= 0;
3031 tree last_rhs1
= gimple_assign_rhs1 (stmt
);
3033 /* We start expression rewriting from the top statements.
3034 So, in this loop we create a full list of statements
3035 we will work with. */
3036 stmts
[stmt_num
- 1] = stmt
;
3037 for (i
= stmt_num
- 2; i
>= 0; i
--)
3038 stmts
[i
] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts
[i
+1]));
3040 for (i
= 0; i
< stmt_num
; i
++)
3044 /* Determine whether we should use results of
3045 already handled statements or not. */
3046 if (ready_stmts_end
== 0
3047 && (i
- stmt_index
>= width
|| op_index
< 1))
3048 ready_stmts_end
= i
;
3050 /* Now we choose operands for the next statement. Non zero
3051 value in ready_stmts_end means here that we should use
3052 the result of already generated statements as new operand. */
3053 if (ready_stmts_end
> 0)
3055 op1
= gimple_assign_lhs (stmts
[stmt_index
++]);
3056 if (ready_stmts_end
> stmt_index
)
3057 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
3058 else if (op_index
>= 0)
3059 op2
= ops
[op_index
--]->op
;
3062 gcc_assert (stmt_index
< i
);
3063 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
3066 if (stmt_index
>= ready_stmts_end
)
3067 ready_stmts_end
= 0;
3072 swap_ops_for_binary_stmt (ops
, op_index
- 2, NULL
);
3073 op2
= ops
[op_index
--]->op
;
3074 op1
= ops
[op_index
--]->op
;
3077 /* If we emit the last statement then we should put
3078 operands into the last statement. It will also
3080 if (op_index
< 0 && stmt_index
== i
)
3083 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3085 fprintf (dump_file
, "Transforming ");
3086 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
3089 /* We keep original statement only for the last one. All
3090 others are recreated. */
3091 if (i
== stmt_num
- 1)
3093 gimple_assign_set_rhs1 (stmts
[i
], op1
);
3094 gimple_assign_set_rhs2 (stmts
[i
], op2
);
3095 update_stmt (stmts
[i
]);
3098 stmts
[i
] = build_and_add_sum (TREE_TYPE (last_rhs1
), op1
, op2
, opcode
);
3100 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3102 fprintf (dump_file
, " into ");
3103 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
3107 remove_visited_stmt_chain (last_rhs1
);
3110 /* Transform STMT, which is really (A +B) + (C + D) into the left
3111 linear form, ((A+B)+C)+D.
3112 Recurse on D if necessary. */
3115 linearize_expr (gimple stmt
)
3117 gimple_stmt_iterator gsinow
, gsirhs
;
3118 gimple binlhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3119 gimple binrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3120 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
3121 gimple newbinrhs
= NULL
;
3122 struct loop
*loop
= loop_containing_stmt (stmt
);
3124 gcc_assert (is_reassociable_op (binlhs
, rhscode
, loop
)
3125 && is_reassociable_op (binrhs
, rhscode
, loop
));
3127 gsinow
= gsi_for_stmt (stmt
);
3128 gsirhs
= gsi_for_stmt (binrhs
);
3129 gsi_move_before (&gsirhs
, &gsinow
);
3131 gimple_assign_set_rhs2 (stmt
, gimple_assign_rhs1 (binrhs
));
3132 gimple_assign_set_rhs1 (binrhs
, gimple_assign_lhs (binlhs
));
3133 gimple_assign_set_rhs1 (stmt
, gimple_assign_lhs (binrhs
));
3135 if (TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
)
3136 newbinrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3138 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3140 fprintf (dump_file
, "Linearized: ");
3141 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3144 reassociate_stats
.linearized
++;
3145 update_stmt (binrhs
);
3146 update_stmt (binlhs
);
3149 gimple_set_visited (stmt
, true);
3150 gimple_set_visited (binlhs
, true);
3151 gimple_set_visited (binrhs
, true);
3153 /* Tail recurse on the new rhs if it still needs reassociation. */
3154 if (newbinrhs
&& is_reassociable_op (newbinrhs
, rhscode
, loop
))
3155 /* ??? This should probably be linearize_expr (newbinrhs) but I don't
3156 want to change the algorithm while converting to tuples. */
3157 linearize_expr (stmt
);
3160 /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return
3161 it. Otherwise, return NULL. */
3164 get_single_immediate_use (tree lhs
)
3166 use_operand_p immuse
;
3169 if (TREE_CODE (lhs
) == SSA_NAME
3170 && single_imm_use (lhs
, &immuse
, &immusestmt
)
3171 && is_gimple_assign (immusestmt
))
3177 /* Recursively negate the value of TONEGATE, and return the SSA_NAME
3178 representing the negated value. Insertions of any necessary
3179 instructions go before GSI.
3180 This function is recursive in that, if you hand it "a_5" as the
3181 value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will
3182 transform b_3 + b_4 into a_5 = -b_3 + -b_4. */
3185 negate_value (tree tonegate
, gimple_stmt_iterator
*gsi
)
3187 gimple negatedefstmt
= NULL
;
3188 tree resultofnegate
;
3190 /* If we are trying to negate a name, defined by an add, negate the
3191 add operands instead. */
3192 if (TREE_CODE (tonegate
) == SSA_NAME
)
3193 negatedefstmt
= SSA_NAME_DEF_STMT (tonegate
);
3194 if (TREE_CODE (tonegate
) == SSA_NAME
3195 && is_gimple_assign (negatedefstmt
)
3196 && TREE_CODE (gimple_assign_lhs (negatedefstmt
)) == SSA_NAME
3197 && has_single_use (gimple_assign_lhs (negatedefstmt
))
3198 && gimple_assign_rhs_code (negatedefstmt
) == PLUS_EXPR
)
3200 gimple_stmt_iterator gsi
;
3201 tree rhs1
= gimple_assign_rhs1 (negatedefstmt
);
3202 tree rhs2
= gimple_assign_rhs2 (negatedefstmt
);
3204 gsi
= gsi_for_stmt (negatedefstmt
);
3205 rhs1
= negate_value (rhs1
, &gsi
);
3206 gimple_assign_set_rhs1 (negatedefstmt
, rhs1
);
3208 gsi
= gsi_for_stmt (negatedefstmt
);
3209 rhs2
= negate_value (rhs2
, &gsi
);
3210 gimple_assign_set_rhs2 (negatedefstmt
, rhs2
);
3212 update_stmt (negatedefstmt
);
3213 return gimple_assign_lhs (negatedefstmt
);
3216 tonegate
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (tonegate
), tonegate
);
3217 resultofnegate
= force_gimple_operand_gsi (gsi
, tonegate
, true,
3218 NULL_TREE
, true, GSI_SAME_STMT
);
3219 return resultofnegate
;
3222 /* Return true if we should break up the subtract in STMT into an add
3223 with negate. This is true when we the subtract operands are really
3224 adds, or the subtract itself is used in an add expression. In
3225 either case, breaking up the subtract into an add with negate
3226 exposes the adds to reassociation. */
3229 should_break_up_subtract (gimple stmt
)
3231 tree lhs
= gimple_assign_lhs (stmt
);
3232 tree binlhs
= gimple_assign_rhs1 (stmt
);
3233 tree binrhs
= gimple_assign_rhs2 (stmt
);
3235 struct loop
*loop
= loop_containing_stmt (stmt
);
3237 if (TREE_CODE (binlhs
) == SSA_NAME
3238 && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs
), PLUS_EXPR
, loop
))
3241 if (TREE_CODE (binrhs
) == SSA_NAME
3242 && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
), PLUS_EXPR
, loop
))
3245 if (TREE_CODE (lhs
) == SSA_NAME
3246 && (immusestmt
= get_single_immediate_use (lhs
))
3247 && is_gimple_assign (immusestmt
)
3248 && (gimple_assign_rhs_code (immusestmt
) == PLUS_EXPR
3249 || gimple_assign_rhs_code (immusestmt
) == MULT_EXPR
))
3254 /* Transform STMT from A - B into A + -B. */
3257 break_up_subtract (gimple stmt
, gimple_stmt_iterator
*gsip
)
3259 tree rhs1
= gimple_assign_rhs1 (stmt
);
3260 tree rhs2
= gimple_assign_rhs2 (stmt
);
3262 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3264 fprintf (dump_file
, "Breaking up subtract ");
3265 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3268 rhs2
= negate_value (rhs2
, gsip
);
3269 gimple_assign_set_rhs_with_ops (gsip
, PLUS_EXPR
, rhs1
, rhs2
);
3273 /* Determine whether STMT is a builtin call that raises an SSA name
3274 to an integer power and has only one use. If so, and this is early
3275 reassociation and unsafe math optimizations are permitted, place
3276 the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE.
3277 If any of these conditions does not hold, return FALSE. */
3280 acceptable_pow_call (gimple stmt
, tree
*base
, HOST_WIDE_INT
*exponent
)
3283 REAL_VALUE_TYPE c
, cint
;
3285 if (!first_pass_instance
3286 || !flag_unsafe_math_optimizations
3287 || !is_gimple_call (stmt
)
3288 || !has_single_use (gimple_call_lhs (stmt
)))
3291 fndecl
= gimple_call_fndecl (stmt
);
3294 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
3297 switch (DECL_FUNCTION_CODE (fndecl
))
3299 CASE_FLT_FN (BUILT_IN_POW
):
3300 if (flag_errno_math
)
3303 *base
= gimple_call_arg (stmt
, 0);
3304 arg1
= gimple_call_arg (stmt
, 1);
3306 if (TREE_CODE (arg1
) != REAL_CST
)
3309 c
= TREE_REAL_CST (arg1
);
3311 if (REAL_EXP (&c
) > HOST_BITS_PER_WIDE_INT
)
3314 *exponent
= real_to_integer (&c
);
3315 real_from_integer (&cint
, VOIDmode
, *exponent
,
3316 *exponent
< 0 ? -1 : 0, 0);
3317 if (!real_identical (&c
, &cint
))
3322 CASE_FLT_FN (BUILT_IN_POWI
):
3323 *base
= gimple_call_arg (stmt
, 0);
3324 arg1
= gimple_call_arg (stmt
, 1);
3326 if (!host_integerp (arg1
, 0))
3329 *exponent
= TREE_INT_CST_LOW (arg1
);
3336 /* Expanding negative exponents is generally unproductive, so we don't
3337 complicate matters with those. Exponents of zero and one should
3338 have been handled by expression folding. */
3339 if (*exponent
< 2 || TREE_CODE (*base
) != SSA_NAME
)
3345 /* Recursively linearize a binary expression that is the RHS of STMT.
3346 Place the operands of the expression tree in the vector named OPS. */
3349 linearize_expr_tree (vec
<operand_entry_t
> *ops
, gimple stmt
,
3350 bool is_associative
, bool set_visited
)
3352 tree binlhs
= gimple_assign_rhs1 (stmt
);
3353 tree binrhs
= gimple_assign_rhs2 (stmt
);
3354 gimple binlhsdef
= NULL
, binrhsdef
= NULL
;
3355 bool binlhsisreassoc
= false;
3356 bool binrhsisreassoc
= false;
3357 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
3358 struct loop
*loop
= loop_containing_stmt (stmt
);
3359 tree base
= NULL_TREE
;
3360 HOST_WIDE_INT exponent
= 0;
3363 gimple_set_visited (stmt
, true);
3365 if (TREE_CODE (binlhs
) == SSA_NAME
)
3367 binlhsdef
= SSA_NAME_DEF_STMT (binlhs
);
3368 binlhsisreassoc
= (is_reassociable_op (binlhsdef
, rhscode
, loop
)
3369 && !stmt_could_throw_p (binlhsdef
));
3372 if (TREE_CODE (binrhs
) == SSA_NAME
)
3374 binrhsdef
= SSA_NAME_DEF_STMT (binrhs
);
3375 binrhsisreassoc
= (is_reassociable_op (binrhsdef
, rhscode
, loop
)
3376 && !stmt_could_throw_p (binrhsdef
));
3379 /* If the LHS is not reassociable, but the RHS is, we need to swap
3380 them. If neither is reassociable, there is nothing we can do, so
3381 just put them in the ops vector. If the LHS is reassociable,
3382 linearize it. If both are reassociable, then linearize the RHS
3385 if (!binlhsisreassoc
)
3389 /* If this is not a associative operation like division, give up. */
3390 if (!is_associative
)
3392 add_to_ops_vec (ops
, binrhs
);
3396 if (!binrhsisreassoc
)
3398 if (rhscode
== MULT_EXPR
3399 && TREE_CODE (binrhs
) == SSA_NAME
3400 && acceptable_pow_call (binrhsdef
, &base
, &exponent
))
3402 add_repeat_to_ops_vec (ops
, base
, exponent
);
3403 gimple_set_visited (binrhsdef
, true);
3406 add_to_ops_vec (ops
, binrhs
);
3408 if (rhscode
== MULT_EXPR
3409 && TREE_CODE (binlhs
) == SSA_NAME
3410 && acceptable_pow_call (binlhsdef
, &base
, &exponent
))
3412 add_repeat_to_ops_vec (ops
, base
, exponent
);
3413 gimple_set_visited (binlhsdef
, true);
3416 add_to_ops_vec (ops
, binlhs
);
3421 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3423 fprintf (dump_file
, "swapping operands of ");
3424 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3427 swap_tree_operands (stmt
,
3428 gimple_assign_rhs1_ptr (stmt
),
3429 gimple_assign_rhs2_ptr (stmt
));
3432 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3434 fprintf (dump_file
, " is now ");
3435 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3438 /* We want to make it so the lhs is always the reassociative op,
3444 else if (binrhsisreassoc
)
3446 linearize_expr (stmt
);
3447 binlhs
= gimple_assign_rhs1 (stmt
);
3448 binrhs
= gimple_assign_rhs2 (stmt
);
3451 gcc_assert (TREE_CODE (binrhs
) != SSA_NAME
3452 || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
),
3454 linearize_expr_tree (ops
, SSA_NAME_DEF_STMT (binlhs
),
3455 is_associative
, set_visited
);
3457 if (rhscode
== MULT_EXPR
3458 && TREE_CODE (binrhs
) == SSA_NAME
3459 && acceptable_pow_call (SSA_NAME_DEF_STMT (binrhs
), &base
, &exponent
))
3461 add_repeat_to_ops_vec (ops
, base
, exponent
);
3462 gimple_set_visited (SSA_NAME_DEF_STMT (binrhs
), true);
3465 add_to_ops_vec (ops
, binrhs
);
3468 /* Repropagate the negates back into subtracts, since no other pass
3469 currently does it. */
3472 repropagate_negates (void)
3477 FOR_EACH_VEC_ELT (plus_negates
, i
, negate
)
3479 gimple user
= get_single_immediate_use (negate
);
3481 if (!user
|| !is_gimple_assign (user
))
3484 /* The negate operand can be either operand of a PLUS_EXPR
3485 (it can be the LHS if the RHS is a constant for example).
3487 Force the negate operand to the RHS of the PLUS_EXPR, then
3488 transform the PLUS_EXPR into a MINUS_EXPR. */
3489 if (gimple_assign_rhs_code (user
) == PLUS_EXPR
)
3491 /* If the negated operand appears on the LHS of the
3492 PLUS_EXPR, exchange the operands of the PLUS_EXPR
3493 to force the negated operand to the RHS of the PLUS_EXPR. */
3494 if (gimple_assign_rhs1 (user
) == negate
)
3496 swap_tree_operands (user
,
3497 gimple_assign_rhs1_ptr (user
),
3498 gimple_assign_rhs2_ptr (user
));
3501 /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace
3502 the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */
3503 if (gimple_assign_rhs2 (user
) == negate
)
3505 tree rhs1
= gimple_assign_rhs1 (user
);
3506 tree rhs2
= get_unary_op (negate
, NEGATE_EXPR
);
3507 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
3508 gimple_assign_set_rhs_with_ops (&gsi
, MINUS_EXPR
, rhs1
, rhs2
);
3512 else if (gimple_assign_rhs_code (user
) == MINUS_EXPR
)
3514 if (gimple_assign_rhs1 (user
) == negate
)
3519 which we transform into
3522 This pushes down the negate which we possibly can merge
3523 into some other operation, hence insert it into the
3524 plus_negates vector. */
3525 gimple feed
= SSA_NAME_DEF_STMT (negate
);
3526 tree a
= gimple_assign_rhs1 (feed
);
3527 tree rhs2
= gimple_assign_rhs2 (user
);
3528 gimple_stmt_iterator gsi
= gsi_for_stmt (feed
), gsi2
;
3529 gimple_replace_lhs (feed
, negate
);
3530 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, a
, rhs2
);
3531 update_stmt (gsi_stmt (gsi
));
3532 gsi2
= gsi_for_stmt (user
);
3533 gimple_assign_set_rhs_with_ops (&gsi2
, NEGATE_EXPR
, negate
, NULL
);
3534 update_stmt (gsi_stmt (gsi2
));
3535 gsi_move_before (&gsi
, &gsi2
);
3536 plus_negates
.safe_push (gimple_assign_lhs (gsi_stmt (gsi2
)));
3540 /* Transform "x = -a; y = b - x" into "y = b + a", getting
3541 rid of one operation. */
3542 gimple feed
= SSA_NAME_DEF_STMT (negate
);
3543 tree a
= gimple_assign_rhs1 (feed
);
3544 tree rhs1
= gimple_assign_rhs1 (user
);
3545 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
3546 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, rhs1
, a
);
3547 update_stmt (gsi_stmt (gsi
));
3553 /* Returns true if OP is of a type for which we can do reassociation.
3554 That is for integral or non-saturating fixed-point types, and for
3555 floating point type when associative-math is enabled. */
3558 can_reassociate_p (tree op
)
3560 tree type
= TREE_TYPE (op
);
3561 if ((INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_WRAPS (type
))
3562 || NON_SAT_FIXED_POINT_TYPE_P (type
)
3563 || (flag_associative_math
&& FLOAT_TYPE_P (type
)))
3568 /* Break up subtract operations in block BB.
3570 We do this top down because we don't know whether the subtract is
3571 part of a possible chain of reassociation except at the top.
3580 we want to break up k = t - q, but we won't until we've transformed q
3581 = b - r, which won't be broken up until we transform b = c - d.
3583 En passant, clear the GIMPLE visited flag on every statement. */
3586 break_up_subtract_bb (basic_block bb
)
3588 gimple_stmt_iterator gsi
;
3591 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3593 gimple stmt
= gsi_stmt (gsi
);
3594 gimple_set_visited (stmt
, false);
3596 if (!is_gimple_assign (stmt
)
3597 || !can_reassociate_p (gimple_assign_lhs (stmt
)))
3600 /* Look for simple gimple subtract operations. */
3601 if (gimple_assign_rhs_code (stmt
) == MINUS_EXPR
)
3603 if (!can_reassociate_p (gimple_assign_rhs1 (stmt
))
3604 || !can_reassociate_p (gimple_assign_rhs2 (stmt
)))
3607 /* Check for a subtract used only in an addition. If this
3608 is the case, transform it into add of a negate for better
3609 reassociation. IE transform C = A-B into C = A + -B if C
3610 is only used in an addition. */
3611 if (should_break_up_subtract (stmt
))
3612 break_up_subtract (stmt
, &gsi
);
3614 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
3615 && can_reassociate_p (gimple_assign_rhs1 (stmt
)))
3616 plus_negates
.safe_push (gimple_assign_lhs (stmt
));
3618 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
3620 son
= next_dom_son (CDI_DOMINATORS
, son
))
3621 break_up_subtract_bb (son
);
3624 /* Used for repeated factor analysis. */
3625 struct repeat_factor_d
3627 /* An SSA name that occurs in a multiply chain. */
3630 /* Cached rank of the factor. */
3633 /* Number of occurrences of the factor in the chain. */
3634 HOST_WIDE_INT count
;
3636 /* An SSA name representing the product of this factor and
3637 all factors appearing later in the repeated factor vector. */
3641 typedef struct repeat_factor_d repeat_factor
, *repeat_factor_t
;
3642 typedef const struct repeat_factor_d
*const_repeat_factor_t
;
3645 static vec
<repeat_factor
> repeat_factor_vec
;
3647 /* Used for sorting the repeat factor vector. Sort primarily by
3648 ascending occurrence count, secondarily by descending rank. */
3651 compare_repeat_factors (const void *x1
, const void *x2
)
3653 const_repeat_factor_t rf1
= (const_repeat_factor_t
) x1
;
3654 const_repeat_factor_t rf2
= (const_repeat_factor_t
) x2
;
3656 if (rf1
->count
!= rf2
->count
)
3657 return rf1
->count
- rf2
->count
;
3659 return rf2
->rank
- rf1
->rank
;
3662 /* Look for repeated operands in OPS in the multiply tree rooted at
3663 STMT. Replace them with an optimal sequence of multiplies and powi
3664 builtin calls, and remove the used operands from OPS. Return an
3665 SSA name representing the value of the replacement sequence. */
3668 attempt_builtin_powi (gimple stmt
, vec
<operand_entry_t
> *ops
)
3670 unsigned i
, j
, vec_len
;
3673 repeat_factor_t rf1
, rf2
;
3674 repeat_factor rfnew
;
3675 tree result
= NULL_TREE
;
3676 tree target_ssa
, iter_result
;
3677 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
3678 tree powi_fndecl
= mathfn_built_in (type
, BUILT_IN_POWI
);
3679 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3680 gimple mul_stmt
, pow_stmt
;
3682 /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and
3687 /* Allocate the repeated factor vector. */
3688 repeat_factor_vec
.create (10);
3690 /* Scan the OPS vector for all SSA names in the product and build
3691 up a vector of occurrence counts for each factor. */
3692 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
3694 if (TREE_CODE (oe
->op
) == SSA_NAME
)
3696 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
3698 if (rf1
->factor
== oe
->op
)
3700 rf1
->count
+= oe
->count
;
3705 if (j
>= repeat_factor_vec
.length ())
3707 rfnew
.factor
= oe
->op
;
3708 rfnew
.rank
= oe
->rank
;
3709 rfnew
.count
= oe
->count
;
3710 rfnew
.repr
= NULL_TREE
;
3711 repeat_factor_vec
.safe_push (rfnew
);
3716 /* Sort the repeated factor vector by (a) increasing occurrence count,
3717 and (b) decreasing rank. */
3718 repeat_factor_vec
.qsort (compare_repeat_factors
);
3720 /* It is generally best to combine as many base factors as possible
3721 into a product before applying __builtin_powi to the result.
3722 However, the sort order chosen for the repeated factor vector
3723 allows us to cache partial results for the product of the base
3724 factors for subsequent use. When we already have a cached partial
3725 result from a previous iteration, it is best to make use of it
3726 before looking for another __builtin_pow opportunity.
3728 As an example, consider x * x * y * y * y * z * z * z * z.
3729 We want to first compose the product x * y * z, raise it to the
3730 second power, then multiply this by y * z, and finally multiply
3731 by z. This can be done in 5 multiplies provided we cache y * z
3732 for use in both expressions:
3740 If we instead ignored the cached y * z and first multiplied by
3741 the __builtin_pow opportunity z * z, we would get the inferior:
3750 vec_len
= repeat_factor_vec
.length ();
3752 /* Repeatedly look for opportunities to create a builtin_powi call. */
3755 HOST_WIDE_INT power
;
3757 /* First look for the largest cached product of factors from
3758 preceding iterations. If found, create a builtin_powi for
3759 it if the minimum occurrence count for its factors is at
3760 least 2, or just use this cached product as our next
3761 multiplicand if the minimum occurrence count is 1. */
3762 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
3764 if (rf1
->repr
&& rf1
->count
> 0)
3774 iter_result
= rf1
->repr
;
3776 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3780 fputs ("Multiplying by cached product ", dump_file
);
3781 for (elt
= j
; elt
< vec_len
; elt
++)
3783 rf
= &repeat_factor_vec
[elt
];
3784 print_generic_expr (dump_file
, rf
->factor
, 0);
3785 if (elt
< vec_len
- 1)
3786 fputs (" * ", dump_file
);
3788 fputs ("\n", dump_file
);
3793 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3794 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
3795 build_int_cst (integer_type_node
,
3797 gimple_call_set_lhs (pow_stmt
, iter_result
);
3798 gimple_set_location (pow_stmt
, gimple_location (stmt
));
3799 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
3801 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3805 fputs ("Building __builtin_pow call for cached product (",
3807 for (elt
= j
; elt
< vec_len
; elt
++)
3809 rf
= &repeat_factor_vec
[elt
];
3810 print_generic_expr (dump_file
, rf
->factor
, 0);
3811 if (elt
< vec_len
- 1)
3812 fputs (" * ", dump_file
);
3814 fprintf (dump_file
, ")^"HOST_WIDE_INT_PRINT_DEC
"\n",
3821 /* Otherwise, find the first factor in the repeated factor
3822 vector whose occurrence count is at least 2. If no such
3823 factor exists, there are no builtin_powi opportunities
3825 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
3827 if (rf1
->count
>= 2)
3836 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3840 fputs ("Building __builtin_pow call for (", dump_file
);
3841 for (elt
= j
; elt
< vec_len
; elt
++)
3843 rf
= &repeat_factor_vec
[elt
];
3844 print_generic_expr (dump_file
, rf
->factor
, 0);
3845 if (elt
< vec_len
- 1)
3846 fputs (" * ", dump_file
);
3848 fprintf (dump_file
, ")^"HOST_WIDE_INT_PRINT_DEC
"\n", power
);
3851 reassociate_stats
.pows_created
++;
3853 /* Visit each element of the vector in reverse order (so that
3854 high-occurrence elements are visited first, and within the
3855 same occurrence count, lower-ranked elements are visited
3856 first). Form a linear product of all elements in this order
3857 whose occurrencce count is at least that of element J.
3858 Record the SSA name representing the product of each element
3859 with all subsequent elements in the vector. */
3860 if (j
== vec_len
- 1)
3861 rf1
->repr
= rf1
->factor
;
3864 for (ii
= vec_len
- 2; ii
>= (int)j
; ii
--)
3868 rf1
= &repeat_factor_vec
[ii
];
3869 rf2
= &repeat_factor_vec
[ii
+ 1];
3871 /* Init the last factor's representative to be itself. */
3873 rf2
->repr
= rf2
->factor
;
3878 target_ssa
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3879 mul_stmt
= gimple_build_assign_with_ops (MULT_EXPR
,
3882 gimple_set_location (mul_stmt
, gimple_location (stmt
));
3883 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
3884 rf1
->repr
= target_ssa
;
3886 /* Don't reprocess the multiply we just introduced. */
3887 gimple_set_visited (mul_stmt
, true);
3891 /* Form a call to __builtin_powi for the maximum product
3892 just formed, raised to the power obtained earlier. */
3893 rf1
= &repeat_factor_vec
[j
];
3894 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3895 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
3896 build_int_cst (integer_type_node
,
3898 gimple_call_set_lhs (pow_stmt
, iter_result
);
3899 gimple_set_location (pow_stmt
, gimple_location (stmt
));
3900 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
3903 /* If we previously formed at least one other builtin_powi call,
3904 form the product of this one and those others. */
3907 tree new_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
3908 mul_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, new_result
,
3909 result
, iter_result
);
3910 gimple_set_location (mul_stmt
, gimple_location (stmt
));
3911 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
3912 gimple_set_visited (mul_stmt
, true);
3913 result
= new_result
;
3916 result
= iter_result
;
3918 /* Decrement the occurrence count of each element in the product
3919 by the count found above, and remove this many copies of each
3921 for (i
= j
; i
< vec_len
; i
++)
3926 rf1
= &repeat_factor_vec
[i
];
3927 rf1
->count
-= power
;
3929 FOR_EACH_VEC_ELT_REVERSE (*ops
, n
, oe
)
3931 if (oe
->op
== rf1
->factor
)
3935 ops
->ordered_remove (n
);
3951 /* At this point all elements in the repeated factor vector have a
3952 remaining occurrence count of 0 or 1, and those with a count of 1
3953 don't have cached representatives. Re-sort the ops vector and
3955 ops
->qsort (sort_by_operand_rank
);
3956 repeat_factor_vec
.release ();
3958 /* Return the final product computed herein. Note that there may
3959 still be some elements with single occurrence count left in OPS;
3960 those will be handled by the normal reassociation logic. */
3964 /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */
3967 transform_stmt_to_copy (gimple_stmt_iterator
*gsi
, gimple stmt
, tree new_rhs
)
3971 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3973 fprintf (dump_file
, "Transforming ");
3974 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3977 rhs1
= gimple_assign_rhs1 (stmt
);
3978 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
3980 remove_visited_stmt_chain (rhs1
);
3982 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3984 fprintf (dump_file
, " into ");
3985 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3989 /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */
3992 transform_stmt_to_multiply (gimple_stmt_iterator
*gsi
, gimple stmt
,
3993 tree rhs1
, tree rhs2
)
3995 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3997 fprintf (dump_file
, "Transforming ");
3998 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4001 gimple_assign_set_rhs_with_ops (gsi
, MULT_EXPR
, rhs1
, rhs2
);
4002 update_stmt (gsi_stmt (*gsi
));
4003 remove_visited_stmt_chain (rhs1
);
4005 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4007 fprintf (dump_file
, " into ");
4008 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4012 /* Reassociate expressions in basic block BB and its post-dominator as
4016 reassociate_bb (basic_block bb
)
4018 gimple_stmt_iterator gsi
;
4020 gimple stmt
= last_stmt (bb
);
4022 if (stmt
&& !gimple_visited_p (stmt
))
4023 maybe_optimize_range_tests (stmt
);
4025 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
4027 stmt
= gsi_stmt (gsi
);
4029 if (is_gimple_assign (stmt
)
4030 && !stmt_could_throw_p (stmt
))
4032 tree lhs
, rhs1
, rhs2
;
4033 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
4035 /* If this is not a gimple binary expression, there is
4036 nothing for us to do with it. */
4037 if (get_gimple_rhs_class (rhs_code
) != GIMPLE_BINARY_RHS
)
4040 /* If this was part of an already processed statement,
4041 we don't need to touch it again. */
4042 if (gimple_visited_p (stmt
))
4044 /* This statement might have become dead because of previous
4046 if (has_zero_uses (gimple_get_lhs (stmt
)))
4048 gsi_remove (&gsi
, true);
4049 release_defs (stmt
);
4050 /* We might end up removing the last stmt above which
4051 places the iterator to the end of the sequence.
4052 Reset it to the last stmt in this case which might
4053 be the end of the sequence as well if we removed
4054 the last statement of the sequence. In which case
4055 we need to bail out. */
4056 if (gsi_end_p (gsi
))
4058 gsi
= gsi_last_bb (bb
);
4059 if (gsi_end_p (gsi
))
4066 lhs
= gimple_assign_lhs (stmt
);
4067 rhs1
= gimple_assign_rhs1 (stmt
);
4068 rhs2
= gimple_assign_rhs2 (stmt
);
4070 /* For non-bit or min/max operations we can't associate
4071 all types. Verify that here. */
4072 if (rhs_code
!= BIT_IOR_EXPR
4073 && rhs_code
!= BIT_AND_EXPR
4074 && rhs_code
!= BIT_XOR_EXPR
4075 && rhs_code
!= MIN_EXPR
4076 && rhs_code
!= MAX_EXPR
4077 && (!can_reassociate_p (lhs
)
4078 || !can_reassociate_p (rhs1
)
4079 || !can_reassociate_p (rhs2
)))
4082 if (associative_tree_code (rhs_code
))
4084 vec
<operand_entry_t
> ops
= vNULL
;
4085 tree powi_result
= NULL_TREE
;
4087 /* There may be no immediate uses left by the time we
4088 get here because we may have eliminated them all. */
4089 if (TREE_CODE (lhs
) == SSA_NAME
&& has_zero_uses (lhs
))
4092 gimple_set_visited (stmt
, true);
4093 linearize_expr_tree (&ops
, stmt
, true, true);
4094 ops
.qsort (sort_by_operand_rank
);
4095 optimize_ops_list (rhs_code
, &ops
);
4096 if (undistribute_ops_list (rhs_code
, &ops
,
4097 loop_containing_stmt (stmt
)))
4099 ops
.qsort (sort_by_operand_rank
);
4100 optimize_ops_list (rhs_code
, &ops
);
4103 if (rhs_code
== BIT_IOR_EXPR
|| rhs_code
== BIT_AND_EXPR
)
4104 optimize_range_tests (rhs_code
, &ops
);
4106 if (first_pass_instance
4107 && rhs_code
== MULT_EXPR
4108 && flag_unsafe_math_optimizations
)
4109 powi_result
= attempt_builtin_powi (stmt
, &ops
);
4111 /* If the operand vector is now empty, all operands were
4112 consumed by the __builtin_powi optimization. */
4113 if (ops
.length () == 0)
4114 transform_stmt_to_copy (&gsi
, stmt
, powi_result
);
4115 else if (ops
.length () == 1)
4117 tree last_op
= ops
.last ()->op
;
4120 transform_stmt_to_multiply (&gsi
, stmt
, last_op
,
4123 transform_stmt_to_copy (&gsi
, stmt
, last_op
);
4127 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
4128 int ops_num
= ops
.length ();
4129 int width
= get_reassociation_width (ops_num
, rhs_code
, mode
);
4131 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4133 "Width = %d was chosen for reassociation\n", width
);
4136 && ops
.length () > 3)
4137 rewrite_expr_tree_parallel (stmt
, width
, ops
);
4139 rewrite_expr_tree (stmt
, 0, ops
, false);
4141 /* If we combined some repeated factors into a
4142 __builtin_powi call, multiply that result by the
4143 reassociated operands. */
4147 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
4148 tree target_ssa
= make_temp_ssa_name (type
, NULL
,
4150 gimple_set_lhs (stmt
, target_ssa
);
4152 mul_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, lhs
,
4155 gimple_set_location (mul_stmt
, gimple_location (stmt
));
4156 gsi_insert_after (&gsi
, mul_stmt
, GSI_NEW_STMT
);
4164 for (son
= first_dom_son (CDI_POST_DOMINATORS
, bb
);
4166 son
= next_dom_son (CDI_POST_DOMINATORS
, son
))
4167 reassociate_bb (son
);
4170 void dump_ops_vector (FILE *file
, vec
<operand_entry_t
> ops
);
4171 void debug_ops_vector (vec
<operand_entry_t
> ops
);
4173 /* Dump the operand entry vector OPS to FILE. */
4176 dump_ops_vector (FILE *file
, vec
<operand_entry_t
> ops
)
4181 FOR_EACH_VEC_ELT (ops
, i
, oe
)
4183 fprintf (file
, "Op %d -> rank: %d, tree: ", i
, oe
->rank
);
4184 print_generic_expr (file
, oe
->op
, 0);
4188 /* Dump the operand entry vector OPS to STDERR. */
4191 debug_ops_vector (vec
<operand_entry_t
> ops
)
4193 dump_ops_vector (stderr
, ops
);
4199 break_up_subtract_bb (ENTRY_BLOCK_PTR
);
4200 reassociate_bb (EXIT_BLOCK_PTR
);
4203 /* Initialize the reassociation pass. */
4210 int *bbs
= XNEWVEC (int, n_basic_blocks
- NUM_FIXED_BLOCKS
);
4212 /* Find the loops, so that we can prevent moving calculations in
4214 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
4216 memset (&reassociate_stats
, 0, sizeof (reassociate_stats
));
4218 operand_entry_pool
= create_alloc_pool ("operand entry pool",
4219 sizeof (struct operand_entry
), 30);
4220 next_operand_entry_id
= 0;
4222 /* Reverse RPO (Reverse Post Order) will give us something where
4223 deeper loops come later. */
4224 pre_and_rev_post_order_compute (NULL
, bbs
, false);
4225 bb_rank
= XCNEWVEC (long, last_basic_block
);
4226 operand_rank
= pointer_map_create ();
4228 /* Give each default definition a distinct rank. This includes
4229 parameters and the static chain. Walk backwards over all
4230 SSA names so that we get proper rank ordering according
4231 to tree_swap_operands_p. */
4232 for (i
= num_ssa_names
- 1; i
> 0; --i
)
4234 tree name
= ssa_name (i
);
4235 if (name
&& SSA_NAME_IS_DEFAULT_DEF (name
))
4236 insert_operand_rank (name
, ++rank
);
4239 /* Set up rank for each BB */
4240 for (i
= 0; i
< n_basic_blocks
- NUM_FIXED_BLOCKS
; i
++)
4241 bb_rank
[bbs
[i
]] = ++rank
<< 16;
4244 calculate_dominance_info (CDI_POST_DOMINATORS
);
4245 plus_negates
= vNULL
;
4248 /* Cleanup after the reassociation pass, and print stats if
4254 statistics_counter_event (cfun
, "Linearized",
4255 reassociate_stats
.linearized
);
4256 statistics_counter_event (cfun
, "Constants eliminated",
4257 reassociate_stats
.constants_eliminated
);
4258 statistics_counter_event (cfun
, "Ops eliminated",
4259 reassociate_stats
.ops_eliminated
);
4260 statistics_counter_event (cfun
, "Statements rewritten",
4261 reassociate_stats
.rewritten
);
4262 statistics_counter_event (cfun
, "Built-in pow[i] calls encountered",
4263 reassociate_stats
.pows_encountered
);
4264 statistics_counter_event (cfun
, "Built-in powi calls created",
4265 reassociate_stats
.pows_created
);
4267 pointer_map_destroy (operand_rank
);
4268 free_alloc_pool (operand_entry_pool
);
4270 plus_negates
.release ();
4271 free_dominance_info (CDI_POST_DOMINATORS
);
4272 loop_optimizer_finalize ();
4275 /* Gate and execute functions for Reassociation. */
4278 execute_reassoc (void)
4283 repropagate_negates ();
4290 gate_tree_ssa_reassoc (void)
4292 return flag_tree_reassoc
!= 0;
4295 struct gimple_opt_pass pass_reassoc
=
4299 "reassoc", /* name */
4300 OPTGROUP_NONE
, /* optinfo_flags */
4301 gate_tree_ssa_reassoc
, /* gate */
4302 execute_reassoc
, /* execute */
4305 0, /* static_pass_number */
4306 TV_TREE_REASSOC
, /* tv_id */
4307 PROP_cfg
| PROP_ssa
, /* properties_required */
4308 0, /* properties_provided */
4309 0, /* properties_destroyed */
4310 0, /* todo_flags_start */
4312 | TODO_update_ssa_only_virtuals
4314 | TODO_ggc_collect
/* todo_flags_finish */