1 /* Reassociation for trees.
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "alloc-pool.h"
31 #include "tree-pass.h"
34 #include "optabs-tree.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
40 #include "gimple-fold.h"
42 #include "gimple-iterator.h"
43 #include "gimplify-me.h"
45 #include "tree-ssa-loop.h"
48 #include "langhooks.h"
53 #include "case-cfn-macros.h"
55 /* This is a simple global reassociation pass. It is, in part, based
56 on the LLVM pass of the same name (They do some things more/less
57 than we do, in different orders, etc).
59 It consists of five steps:
61 1. Breaking up subtract operations into addition + negate, where
62 it would promote the reassociation of adds.
64 2. Left linearization of the expression trees, so that (A+B)+(C+D)
65 becomes (((A+B)+C)+D), which is easier for us to rewrite later.
66 During linearization, we place the operands of the binary
67 expressions into a vector of operand_entry_*
69 3. Optimization of the operand lists, eliminating things like a +
72 3a. Combine repeated factors with the same occurrence counts
73 into a __builtin_powi call that will later be optimized into
74 an optimal number of multiplies.
76 4. Rewrite the expression trees we linearized and optimized so
77 they are in proper rank order.
79 5. Repropagate negates, as nothing else will clean it up ATM.
81 A bit of theory on #4, since nobody seems to write anything down
82 about why it makes sense to do it the way they do it:
84 We could do this much nicer theoretically, but don't (for reasons
85 explained after how to do it theoretically nice :P).
87 In order to promote the most redundancy elimination, you want
88 binary expressions whose operands are the same rank (or
89 preferably, the same value) exposed to the redundancy eliminator,
90 for possible elimination.
92 So the way to do this if we really cared, is to build the new op
93 tree from the leaves to the roots, merging as you go, and putting the
94 new op on the end of the worklist, until you are left with one
95 thing on the worklist.
97 IE if you have to rewrite the following set of operands (listed with
98 rank in parentheses), with opcode PLUS_EXPR:
100 a (1), b (1), c (1), d (2), e (2)
103 We start with our merge worklist empty, and the ops list with all of
106 You want to first merge all leaves of the same rank, as much as
109 So first build a binary op of
111 mergetmp = a + b, and put "mergetmp" on the merge worklist.
113 Because there is no three operand form of PLUS_EXPR, c is not going to
114 be exposed to redundancy elimination as a rank 1 operand.
116 So you might as well throw it on the merge worklist (you could also
117 consider it to now be a rank two operand, and merge it with d and e,
118 but in this case, you then have evicted e from a binary op. So at
119 least in this situation, you can't win.)
121 Then build a binary op of d + e
124 and put mergetmp2 on the merge worklist.
126 so merge worklist = {mergetmp, c, mergetmp2}
128 Continue building binary ops of these operations until you have only
129 one operation left on the worklist.
134 mergetmp3 = mergetmp + c
136 worklist = {mergetmp2, mergetmp3}
138 mergetmp4 = mergetmp2 + mergetmp3
140 worklist = {mergetmp4}
142 because we have one operation left, we can now just set the original
143 statement equal to the result of that operation.
145 This will at least expose a + b and d + e to redundancy elimination
146 as binary operations.
148 For extra points, you can reuse the old statements to build the
149 mergetmps, since you shouldn't run out.
151 So why don't we do this?
153 Because it's expensive, and rarely will help. Most trees we are
154 reassociating have 3 or less ops. If they have 2 ops, they already
155 will be written into a nice single binary op. If you have 3 ops, a
156 single simple check suffices to tell you whether the first two are of the
157 same rank. If so, you know to order it
160 newstmt = mergetmp + op3
164 newstmt = mergetmp + op1
166 If all three are of the same rank, you can't expose them all in a
167 single binary operator anyway, so the above is *still* the best you
170 Thus, this is what we do. When we have three ops left, we check to see
171 what order to put them in, and call it a day. As a nod to vector sum
172 reduction, we check if any of the ops are really a phi node that is a
173 destructive update for the associating op, and keep the destructive
174 update together for vector sum reduction recognition. */
176 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
177 point 3a in the pass header comment. */
178 static bool reassoc_insert_powi_p
;
184 int constants_eliminated
;
187 int pows_encountered
;
191 /* Operator, rank pair. */
200 static object_allocator
<operand_entry
> operand_entry_pool
201 ("operand entry pool");
203 /* This is used to assign a unique ID to each struct operand_entry
204 so that qsort results are identical on different hosts. */
205 static int next_operand_entry_id
;
207 /* Starting rank number for a given basic block, so that we can rank
208 operations using unmovable instructions in that BB based on the bb
210 static long *bb_rank
;
212 /* Operand->rank hashtable. */
213 static hash_map
<tree
, long> *operand_rank
;
215 /* Vector of SSA_NAMEs on which after reassociate_bb is done with
216 all basic blocks the CFG should be adjusted - basic blocks
217 split right after that SSA_NAME's definition statement and before
218 the only use, which must be a bit ior. */
219 static vec
<tree
> reassoc_branch_fixups
;
222 static long get_rank (tree
);
223 static bool reassoc_stmt_dominates_stmt_p (gimple
*, gimple
*);
225 /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts
226 possibly added by gsi_remove. */
229 reassoc_remove_stmt (gimple_stmt_iterator
*gsi
)
231 gimple
*stmt
= gsi_stmt (*gsi
);
233 if (!MAY_HAVE_DEBUG_STMTS
|| gimple_code (stmt
) == GIMPLE_PHI
)
234 return gsi_remove (gsi
, true);
236 gimple_stmt_iterator prev
= *gsi
;
238 unsigned uid
= gimple_uid (stmt
);
239 basic_block bb
= gimple_bb (stmt
);
240 bool ret
= gsi_remove (gsi
, true);
241 if (!gsi_end_p (prev
))
244 prev
= gsi_start_bb (bb
);
245 gimple
*end_stmt
= gsi_stmt (*gsi
);
246 while ((stmt
= gsi_stmt (prev
)) != end_stmt
)
248 gcc_assert (stmt
&& is_gimple_debug (stmt
) && gimple_uid (stmt
) == 0);
249 gimple_set_uid (stmt
, uid
);
255 /* Bias amount for loop-carried phis. We want this to be larger than
256 the depth of any reassociation tree we can see, but not larger than
257 the rank difference between two blocks. */
258 #define PHI_LOOP_BIAS (1 << 15)
260 /* Rank assigned to a phi statement. If STMT is a loop-carried phi of
261 an innermost loop, and the phi has only a single use which is inside
262 the loop, then the rank is the block rank of the loop latch plus an
263 extra bias for the loop-carried dependence. This causes expressions
264 calculated into an accumulator variable to be independent for each
265 iteration of the loop. If STMT is some other phi, the rank is the
266 block rank of its containing block. */
268 phi_rank (gimple
*stmt
)
270 basic_block bb
= gimple_bb (stmt
);
271 struct loop
*father
= bb
->loop_father
;
277 /* We only care about real loops (those with a latch). */
279 return bb_rank
[bb
->index
];
281 /* Interesting phis must be in headers of innermost loops. */
282 if (bb
!= father
->header
284 return bb_rank
[bb
->index
];
286 /* Ignore virtual SSA_NAMEs. */
287 res
= gimple_phi_result (stmt
);
288 if (virtual_operand_p (res
))
289 return bb_rank
[bb
->index
];
291 /* The phi definition must have a single use, and that use must be
292 within the loop. Otherwise this isn't an accumulator pattern. */
293 if (!single_imm_use (res
, &use
, &use_stmt
)
294 || gimple_bb (use_stmt
)->loop_father
!= father
)
295 return bb_rank
[bb
->index
];
297 /* Look for phi arguments from within the loop. If found, bias this phi. */
298 for (i
= 0; i
< gimple_phi_num_args (stmt
); i
++)
300 tree arg
= gimple_phi_arg_def (stmt
, i
);
301 if (TREE_CODE (arg
) == SSA_NAME
302 && !SSA_NAME_IS_DEFAULT_DEF (arg
))
304 gimple
*def_stmt
= SSA_NAME_DEF_STMT (arg
);
305 if (gimple_bb (def_stmt
)->loop_father
== father
)
306 return bb_rank
[father
->latch
->index
] + PHI_LOOP_BIAS
;
310 /* Must be an uninteresting phi. */
311 return bb_rank
[bb
->index
];
314 /* If EXP is an SSA_NAME defined by a PHI statement that represents a
315 loop-carried dependence of an innermost loop, return TRUE; else
318 loop_carried_phi (tree exp
)
323 if (TREE_CODE (exp
) != SSA_NAME
324 || SSA_NAME_IS_DEFAULT_DEF (exp
))
327 phi_stmt
= SSA_NAME_DEF_STMT (exp
);
329 if (gimple_code (SSA_NAME_DEF_STMT (exp
)) != GIMPLE_PHI
)
332 /* Non-loop-carried phis have block rank. Loop-carried phis have
333 an additional bias added in. If this phi doesn't have block rank,
334 it's biased and should not be propagated. */
335 block_rank
= bb_rank
[gimple_bb (phi_stmt
)->index
];
337 if (phi_rank (phi_stmt
) != block_rank
)
343 /* Return the maximum of RANK and the rank that should be propagated
344 from expression OP. For most operands, this is just the rank of OP.
345 For loop-carried phis, the value is zero to avoid undoing the bias
346 in favor of the phi. */
348 propagate_rank (long rank
, tree op
)
352 if (loop_carried_phi (op
))
355 op_rank
= get_rank (op
);
357 return MAX (rank
, op_rank
);
360 /* Look up the operand rank structure for expression E. */
363 find_operand_rank (tree e
)
365 long *slot
= operand_rank
->get (e
);
366 return slot
? *slot
: -1;
369 /* Insert {E,RANK} into the operand rank hashtable. */
372 insert_operand_rank (tree e
, long rank
)
374 gcc_assert (rank
> 0);
375 gcc_assert (!operand_rank
->put (e
, rank
));
378 /* Given an expression E, return the rank of the expression. */
383 /* SSA_NAME's have the rank of the expression they are the result
385 For globals and uninitialized values, the rank is 0.
386 For function arguments, use the pre-setup rank.
387 For PHI nodes, stores, asm statements, etc, we use the rank of
389 For simple operations, the rank is the maximum rank of any of
390 its operands, or the bb_rank, whichever is less.
391 I make no claims that this is optimal, however, it gives good
394 /* We make an exception to the normal ranking system to break
395 dependences of accumulator variables in loops. Suppose we
396 have a simple one-block loop containing:
403 As shown, each iteration of the calculation into x is fully
404 dependent upon the iteration before it. We would prefer to
405 see this in the form:
412 If the loop is unrolled, the calculations of b and c from
413 different iterations can be interleaved.
415 To obtain this result during reassociation, we bias the rank
416 of the phi definition x_1 upward, when it is recognized as an
417 accumulator pattern. The artificial rank causes it to be
418 added last, providing the desired independence. */
420 if (TREE_CODE (e
) == SSA_NAME
)
427 if (SSA_NAME_IS_DEFAULT_DEF (e
))
428 return find_operand_rank (e
);
430 stmt
= SSA_NAME_DEF_STMT (e
);
431 if (gimple_code (stmt
) == GIMPLE_PHI
)
432 return phi_rank (stmt
);
434 if (!is_gimple_assign (stmt
))
435 return bb_rank
[gimple_bb (stmt
)->index
];
437 /* If we already have a rank for this expression, use that. */
438 rank
= find_operand_rank (e
);
442 /* Otherwise, find the maximum rank for the operands. As an
443 exception, remove the bias from loop-carried phis when propagating
444 the rank so that dependent operations are not also biased. */
445 /* Simply walk over all SSA uses - this takes advatage of the
446 fact that non-SSA operands are is_gimple_min_invariant and
449 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
450 rank
= propagate_rank (rank
, op
);
452 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
454 fprintf (dump_file
, "Rank for ");
455 print_generic_expr (dump_file
, e
, 0);
456 fprintf (dump_file
, " is %ld\n", (rank
+ 1));
459 /* Note the rank in the hashtable so we don't recompute it. */
460 insert_operand_rank (e
, (rank
+ 1));
464 /* Constants, globals, etc., are rank 0 */
469 /* We want integer ones to end up last no matter what, since they are
470 the ones we can do the most with. */
471 #define INTEGER_CONST_TYPE 1 << 3
472 #define FLOAT_CONST_TYPE 1 << 2
473 #define OTHER_CONST_TYPE 1 << 1
475 /* Classify an invariant tree into integer, float, or other, so that
476 we can sort them to be near other constants of the same type. */
478 constant_type (tree t
)
480 if (INTEGRAL_TYPE_P (TREE_TYPE (t
)))
481 return INTEGER_CONST_TYPE
;
482 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t
)))
483 return FLOAT_CONST_TYPE
;
485 return OTHER_CONST_TYPE
;
488 /* qsort comparison function to sort operand entries PA and PB by rank
489 so that the sorted array is ordered by rank in decreasing order. */
491 sort_by_operand_rank (const void *pa
, const void *pb
)
493 const operand_entry
*oea
= *(const operand_entry
*const *)pa
;
494 const operand_entry
*oeb
= *(const operand_entry
*const *)pb
;
496 /* It's nicer for optimize_expression if constants that are likely
497 to fold when added/multiplied//whatever are put next to each
498 other. Since all constants have rank 0, order them by type. */
499 if (oeb
->rank
== 0 && oea
->rank
== 0)
501 if (constant_type (oeb
->op
) != constant_type (oea
->op
))
502 return constant_type (oeb
->op
) - constant_type (oea
->op
);
504 /* To make sorting result stable, we use unique IDs to determine
506 return oeb
->id
- oea
->id
;
509 /* Lastly, make sure the versions that are the same go next to each
511 if ((oeb
->rank
- oea
->rank
== 0)
512 && TREE_CODE (oea
->op
) == SSA_NAME
513 && TREE_CODE (oeb
->op
) == SSA_NAME
)
515 /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse
516 versions of removed SSA_NAMEs, so if possible, prefer to sort
517 based on basic block and gimple_uid of the SSA_NAME_DEF_STMT.
519 if (!SSA_NAME_IS_DEFAULT_DEF (oea
->op
)
520 && !SSA_NAME_IS_DEFAULT_DEF (oeb
->op
)
521 && SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
523 gimple
*stmta
= SSA_NAME_DEF_STMT (oea
->op
);
524 gimple
*stmtb
= SSA_NAME_DEF_STMT (oeb
->op
);
525 basic_block bba
= gimple_bb (stmta
);
526 basic_block bbb
= gimple_bb (stmtb
);
529 if (bb_rank
[bbb
->index
] != bb_rank
[bba
->index
])
530 return bb_rank
[bbb
->index
] - bb_rank
[bba
->index
];
534 bool da
= reassoc_stmt_dominates_stmt_p (stmta
, stmtb
);
535 bool db
= reassoc_stmt_dominates_stmt_p (stmtb
, stmta
);
541 if (SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
542 return SSA_NAME_VERSION (oeb
->op
) - SSA_NAME_VERSION (oea
->op
);
544 return oeb
->id
- oea
->id
;
547 if (oeb
->rank
!= oea
->rank
)
548 return oeb
->rank
- oea
->rank
;
550 return oeb
->id
- oea
->id
;
553 /* Add an operand entry to *OPS for the tree operand OP. */
556 add_to_ops_vec (vec
<operand_entry
*> *ops
, tree op
)
558 operand_entry
*oe
= operand_entry_pool
.allocate ();
561 oe
->rank
= get_rank (op
);
562 oe
->id
= next_operand_entry_id
++;
567 /* Add an operand entry to *OPS for the tree operand OP with repeat
571 add_repeat_to_ops_vec (vec
<operand_entry
*> *ops
, tree op
,
572 HOST_WIDE_INT repeat
)
574 operand_entry
*oe
= operand_entry_pool
.allocate ();
577 oe
->rank
= get_rank (op
);
578 oe
->id
= next_operand_entry_id
++;
582 reassociate_stats
.pows_encountered
++;
585 /* Return true if STMT is reassociable operation containing a binary
586 operation with tree code CODE, and is inside LOOP. */
589 is_reassociable_op (gimple
*stmt
, enum tree_code code
, struct loop
*loop
)
591 basic_block bb
= gimple_bb (stmt
);
593 if (gimple_bb (stmt
) == NULL
)
596 if (!flow_bb_inside_loop_p (loop
, bb
))
599 if (is_gimple_assign (stmt
)
600 && gimple_assign_rhs_code (stmt
) == code
601 && has_single_use (gimple_assign_lhs (stmt
)))
608 /* Given NAME, if NAME is defined by a unary operation OPCODE, return the
609 operand of the negate operation. Otherwise, return NULL. */
612 get_unary_op (tree name
, enum tree_code opcode
)
614 gimple
*stmt
= SSA_NAME_DEF_STMT (name
);
616 if (!is_gimple_assign (stmt
))
619 if (gimple_assign_rhs_code (stmt
) == opcode
)
620 return gimple_assign_rhs1 (stmt
);
624 /* If CURR and LAST are a pair of ops that OPCODE allows us to
625 eliminate through equivalences, do so, remove them from OPS, and
626 return true. Otherwise, return false. */
629 eliminate_duplicate_pair (enum tree_code opcode
,
630 vec
<operand_entry
*> *ops
,
637 /* If we have two of the same op, and the opcode is & |, min, or max,
638 we can eliminate one of them.
639 If we have two of the same op, and the opcode is ^, we can
640 eliminate both of them. */
642 if (last
&& last
->op
== curr
->op
)
650 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
652 fprintf (dump_file
, "Equivalence: ");
653 print_generic_expr (dump_file
, curr
->op
, 0);
654 fprintf (dump_file
, " [&|minmax] ");
655 print_generic_expr (dump_file
, last
->op
, 0);
656 fprintf (dump_file
, " -> ");
657 print_generic_stmt (dump_file
, last
->op
, 0);
660 ops
->ordered_remove (i
);
661 reassociate_stats
.ops_eliminated
++;
666 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
668 fprintf (dump_file
, "Equivalence: ");
669 print_generic_expr (dump_file
, curr
->op
, 0);
670 fprintf (dump_file
, " ^ ");
671 print_generic_expr (dump_file
, last
->op
, 0);
672 fprintf (dump_file
, " -> nothing\n");
675 reassociate_stats
.ops_eliminated
+= 2;
677 if (ops
->length () == 2)
680 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (last
->op
)));
685 ops
->ordered_remove (i
-1);
686 ops
->ordered_remove (i
-1);
698 static vec
<tree
> plus_negates
;
700 /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
701 expression, look in OPS for a corresponding positive operation to cancel
702 it out. If we find one, remove the other from OPS, replace
703 OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise,
707 eliminate_plus_minus_pair (enum tree_code opcode
,
708 vec
<operand_entry
*> *ops
,
709 unsigned int currindex
,
717 if (opcode
!= PLUS_EXPR
|| TREE_CODE (curr
->op
) != SSA_NAME
)
720 negateop
= get_unary_op (curr
->op
, NEGATE_EXPR
);
721 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
722 if (negateop
== NULL_TREE
&& notop
== NULL_TREE
)
725 /* Any non-negated version will have a rank that is one less than
726 the current rank. So once we hit those ranks, if we don't find
729 for (i
= currindex
+ 1;
730 ops
->iterate (i
, &oe
)
731 && oe
->rank
>= curr
->rank
- 1 ;
734 if (oe
->op
== negateop
)
737 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
739 fprintf (dump_file
, "Equivalence: ");
740 print_generic_expr (dump_file
, negateop
, 0);
741 fprintf (dump_file
, " + -");
742 print_generic_expr (dump_file
, oe
->op
, 0);
743 fprintf (dump_file
, " -> 0\n");
746 ops
->ordered_remove (i
);
747 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (oe
->op
)));
748 ops
->ordered_remove (currindex
);
749 reassociate_stats
.ops_eliminated
++;
753 else if (oe
->op
== notop
)
755 tree op_type
= TREE_TYPE (oe
->op
);
757 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
759 fprintf (dump_file
, "Equivalence: ");
760 print_generic_expr (dump_file
, notop
, 0);
761 fprintf (dump_file
, " + ~");
762 print_generic_expr (dump_file
, oe
->op
, 0);
763 fprintf (dump_file
, " -> -1\n");
766 ops
->ordered_remove (i
);
767 add_to_ops_vec (ops
, build_int_cst_type (op_type
, -1));
768 ops
->ordered_remove (currindex
);
769 reassociate_stats
.ops_eliminated
++;
775 /* CURR->OP is a negate expr in a plus expr: save it for later
776 inspection in repropagate_negates(). */
777 if (negateop
!= NULL_TREE
)
778 plus_negates
.safe_push (curr
->op
);
783 /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a
784 bitwise not expression, look in OPS for a corresponding operand to
785 cancel it out. If we find one, remove the other from OPS, replace
786 OPS[CURRINDEX] with 0, and return true. Otherwise, return
790 eliminate_not_pairs (enum tree_code opcode
,
791 vec
<operand_entry
*> *ops
,
792 unsigned int currindex
,
799 if ((opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
800 || TREE_CODE (curr
->op
) != SSA_NAME
)
803 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
804 if (notop
== NULL_TREE
)
807 /* Any non-not version will have a rank that is one less than
808 the current rank. So once we hit those ranks, if we don't find
811 for (i
= currindex
+ 1;
812 ops
->iterate (i
, &oe
)
813 && oe
->rank
>= curr
->rank
- 1;
818 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
820 fprintf (dump_file
, "Equivalence: ");
821 print_generic_expr (dump_file
, notop
, 0);
822 if (opcode
== BIT_AND_EXPR
)
823 fprintf (dump_file
, " & ~");
824 else if (opcode
== BIT_IOR_EXPR
)
825 fprintf (dump_file
, " | ~");
826 print_generic_expr (dump_file
, oe
->op
, 0);
827 if (opcode
== BIT_AND_EXPR
)
828 fprintf (dump_file
, " -> 0\n");
829 else if (opcode
== BIT_IOR_EXPR
)
830 fprintf (dump_file
, " -> -1\n");
833 if (opcode
== BIT_AND_EXPR
)
834 oe
->op
= build_zero_cst (TREE_TYPE (oe
->op
));
835 else if (opcode
== BIT_IOR_EXPR
)
836 oe
->op
= build_all_ones_cst (TREE_TYPE (oe
->op
));
838 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
840 ops
->quick_push (oe
);
848 /* Use constant value that may be present in OPS to try to eliminate
849 operands. Note that this function is only really used when we've
850 eliminated ops for other reasons, or merged constants. Across
851 single statements, fold already does all of this, plus more. There
852 is little point in duplicating logic, so I've only included the
853 identities that I could ever construct testcases to trigger. */
856 eliminate_using_constants (enum tree_code opcode
,
857 vec
<operand_entry
*> *ops
)
859 operand_entry
*oelast
= ops
->last ();
860 tree type
= TREE_TYPE (oelast
->op
);
862 if (oelast
->rank
== 0
863 && (INTEGRAL_TYPE_P (type
) || FLOAT_TYPE_P (type
)))
868 if (integer_zerop (oelast
->op
))
870 if (ops
->length () != 1)
872 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
873 fprintf (dump_file
, "Found & 0, removing all other ops\n");
875 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
878 ops
->quick_push (oelast
);
882 else if (integer_all_onesp (oelast
->op
))
884 if (ops
->length () != 1)
886 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
887 fprintf (dump_file
, "Found & -1, removing\n");
889 reassociate_stats
.ops_eliminated
++;
894 if (integer_all_onesp (oelast
->op
))
896 if (ops
->length () != 1)
898 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
899 fprintf (dump_file
, "Found | -1, removing all other ops\n");
901 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
904 ops
->quick_push (oelast
);
908 else if (integer_zerop (oelast
->op
))
910 if (ops
->length () != 1)
912 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
913 fprintf (dump_file
, "Found | 0, removing\n");
915 reassociate_stats
.ops_eliminated
++;
920 if (integer_zerop (oelast
->op
)
921 || (FLOAT_TYPE_P (type
)
922 && !HONOR_NANS (type
)
923 && !HONOR_SIGNED_ZEROS (type
)
924 && real_zerop (oelast
->op
)))
926 if (ops
->length () != 1)
928 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
929 fprintf (dump_file
, "Found * 0, removing all other ops\n");
931 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
933 ops
->quick_push (oelast
);
937 else if (integer_onep (oelast
->op
)
938 || (FLOAT_TYPE_P (type
)
939 && !HONOR_SNANS (type
)
940 && real_onep (oelast
->op
)))
942 if (ops
->length () != 1)
944 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
945 fprintf (dump_file
, "Found * 1, removing\n");
947 reassociate_stats
.ops_eliminated
++;
955 if (integer_zerop (oelast
->op
)
956 || (FLOAT_TYPE_P (type
)
957 && (opcode
== PLUS_EXPR
|| opcode
== MINUS_EXPR
)
958 && fold_real_zero_addition_p (type
, oelast
->op
,
959 opcode
== MINUS_EXPR
)))
961 if (ops
->length () != 1)
963 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
964 fprintf (dump_file
, "Found [|^+] 0, removing\n");
966 reassociate_stats
.ops_eliminated
++;
978 static void linearize_expr_tree (vec
<operand_entry
*> *, gimple
*,
981 /* Structure for tracking and counting operands. */
985 enum tree_code oecode
;
990 /* The heap for the oecount hashtable and the sorted list of operands. */
991 static vec
<oecount
> cvec
;
994 /* Oecount hashtable helpers. */
996 struct oecount_hasher
: int_hash
<int, 0, 1>
998 static inline hashval_t
hash (int);
999 static inline bool equal (int, int);
1002 /* Hash function for oecount. */
1005 oecount_hasher::hash (int p
)
1007 const oecount
*c
= &cvec
[p
- 42];
1008 return htab_hash_pointer (c
->op
) ^ (hashval_t
)c
->oecode
;
1011 /* Comparison function for oecount. */
1014 oecount_hasher::equal (int p1
, int p2
)
1016 const oecount
*c1
= &cvec
[p1
- 42];
1017 const oecount
*c2
= &cvec
[p2
- 42];
1018 return (c1
->oecode
== c2
->oecode
1019 && c1
->op
== c2
->op
);
1022 /* Comparison function for qsort sorting oecount elements by count. */
1025 oecount_cmp (const void *p1
, const void *p2
)
1027 const oecount
*c1
= (const oecount
*)p1
;
1028 const oecount
*c2
= (const oecount
*)p2
;
1029 if (c1
->cnt
!= c2
->cnt
)
1030 return c1
->cnt
- c2
->cnt
;
1032 /* If counts are identical, use unique IDs to stabilize qsort. */
1033 return c1
->id
- c2
->id
;
1036 /* Return TRUE iff STMT represents a builtin call that raises OP
1037 to some exponent. */
1040 stmt_is_power_of_op (gimple
*stmt
, tree op
)
1042 if (!is_gimple_call (stmt
))
1045 switch (gimple_call_combined_fn (stmt
))
1049 return (operand_equal_p (gimple_call_arg (stmt
, 0), op
, 0));
1056 /* Given STMT which is a __builtin_pow* call, decrement its exponent
1057 in place and return the result. Assumes that stmt_is_power_of_op
1058 was previously called for STMT and returned TRUE. */
1060 static HOST_WIDE_INT
1061 decrement_power (gimple
*stmt
)
1063 REAL_VALUE_TYPE c
, cint
;
1064 HOST_WIDE_INT power
;
1067 switch (gimple_call_combined_fn (stmt
))
1070 arg1
= gimple_call_arg (stmt
, 1);
1071 c
= TREE_REAL_CST (arg1
);
1072 power
= real_to_integer (&c
) - 1;
1073 real_from_integer (&cint
, VOIDmode
, power
, SIGNED
);
1074 gimple_call_set_arg (stmt
, 1, build_real (TREE_TYPE (arg1
), cint
));
1078 arg1
= gimple_call_arg (stmt
, 1);
1079 power
= TREE_INT_CST_LOW (arg1
) - 1;
1080 gimple_call_set_arg (stmt
, 1, build_int_cst (TREE_TYPE (arg1
), power
));
1088 /* Find the single immediate use of STMT's LHS, and replace it
1089 with OP. Remove STMT. If STMT's LHS is the same as *DEF,
1090 replace *DEF with OP as well. */
1093 propagate_op_to_single_use (tree op
, gimple
*stmt
, tree
*def
)
1098 gimple_stmt_iterator gsi
;
1100 if (is_gimple_call (stmt
))
1101 lhs
= gimple_call_lhs (stmt
);
1103 lhs
= gimple_assign_lhs (stmt
);
1105 gcc_assert (has_single_use (lhs
));
1106 single_imm_use (lhs
, &use
, &use_stmt
);
1110 if (TREE_CODE (op
) != SSA_NAME
)
1111 update_stmt (use_stmt
);
1112 gsi
= gsi_for_stmt (stmt
);
1113 unlink_stmt_vdef (stmt
);
1114 reassoc_remove_stmt (&gsi
);
1115 release_defs (stmt
);
1118 /* Walks the linear chain with result *DEF searching for an operation
1119 with operand OP and code OPCODE removing that from the chain. *DEF
1120 is updated if there is only one operand but no operation left. */
1123 zero_one_operation (tree
*def
, enum tree_code opcode
, tree op
)
1125 gimple
*stmt
= SSA_NAME_DEF_STMT (*def
);
1131 if (opcode
== MULT_EXPR
1132 && stmt_is_power_of_op (stmt
, op
))
1134 if (decrement_power (stmt
) == 1)
1135 propagate_op_to_single_use (op
, stmt
, def
);
1139 name
= gimple_assign_rhs1 (stmt
);
1141 /* If this is the operation we look for and one of the operands
1142 is ours simply propagate the other operand into the stmts
1144 if (gimple_assign_rhs_code (stmt
) == opcode
1146 || gimple_assign_rhs2 (stmt
) == op
))
1149 name
= gimple_assign_rhs2 (stmt
);
1150 propagate_op_to_single_use (name
, stmt
, def
);
1154 /* We might have a multiply of two __builtin_pow* calls, and
1155 the operand might be hiding in the rightmost one. */
1156 if (opcode
== MULT_EXPR
1157 && gimple_assign_rhs_code (stmt
) == opcode
1158 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
1159 && has_single_use (gimple_assign_rhs2 (stmt
)))
1161 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
1162 if (stmt_is_power_of_op (stmt2
, op
))
1164 if (decrement_power (stmt2
) == 1)
1165 propagate_op_to_single_use (op
, stmt2
, def
);
1170 /* Continue walking the chain. */
1171 gcc_assert (name
!= op
1172 && TREE_CODE (name
) == SSA_NAME
);
1173 stmt
= SSA_NAME_DEF_STMT (name
);
1178 /* Returns true if statement S1 dominates statement S2. Like
1179 stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */
1182 reassoc_stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
1184 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
1186 /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D)
1187 SSA_NAME. Assume it lives at the beginning of function and
1188 thus dominates everything. */
1189 if (!bb1
|| s1
== s2
)
1192 /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */
1198 /* PHIs in the same basic block are assumed to be
1199 executed all in parallel, if only one stmt is a PHI,
1200 it dominates the other stmt in the same basic block. */
1201 if (gimple_code (s1
) == GIMPLE_PHI
)
1204 if (gimple_code (s2
) == GIMPLE_PHI
)
1207 gcc_assert (gimple_uid (s1
) && gimple_uid (s2
));
1209 if (gimple_uid (s1
) < gimple_uid (s2
))
1212 if (gimple_uid (s1
) > gimple_uid (s2
))
1215 gimple_stmt_iterator gsi
= gsi_for_stmt (s1
);
1216 unsigned int uid
= gimple_uid (s1
);
1217 for (gsi_next (&gsi
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1219 gimple
*s
= gsi_stmt (gsi
);
1220 if (gimple_uid (s
) != uid
)
1229 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
1232 /* Insert STMT after INSERT_POINT. */
1235 insert_stmt_after (gimple
*stmt
, gimple
*insert_point
)
1237 gimple_stmt_iterator gsi
;
1240 if (gimple_code (insert_point
) == GIMPLE_PHI
)
1241 bb
= gimple_bb (insert_point
);
1242 else if (!stmt_ends_bb_p (insert_point
))
1244 gsi
= gsi_for_stmt (insert_point
);
1245 gimple_set_uid (stmt
, gimple_uid (insert_point
));
1246 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1250 /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME,
1251 thus if it must end a basic block, it should be a call that can
1252 throw, or some assignment that can throw. If it throws, the LHS
1253 of it will not be initialized though, so only valid places using
1254 the SSA_NAME should be dominated by the fallthru edge. */
1255 bb
= find_fallthru_edge (gimple_bb (insert_point
)->succs
)->dest
;
1256 gsi
= gsi_after_labels (bb
);
1257 if (gsi_end_p (gsi
))
1259 gimple_stmt_iterator gsi2
= gsi_last_bb (bb
);
1260 gimple_set_uid (stmt
,
1261 gsi_end_p (gsi2
) ? 1 : gimple_uid (gsi_stmt (gsi2
)));
1264 gimple_set_uid (stmt
, gimple_uid (gsi_stmt (gsi
)));
1265 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1268 /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for
1269 the result. Places the statement after the definition of either
1270 OP1 or OP2. Returns the new statement. */
1273 build_and_add_sum (tree type
, tree op1
, tree op2
, enum tree_code opcode
)
1275 gimple
*op1def
= NULL
, *op2def
= NULL
;
1276 gimple_stmt_iterator gsi
;
1280 /* Create the addition statement. */
1281 op
= make_ssa_name (type
);
1282 sum
= gimple_build_assign (op
, opcode
, op1
, op2
);
1284 /* Find an insertion place and insert. */
1285 if (TREE_CODE (op1
) == SSA_NAME
)
1286 op1def
= SSA_NAME_DEF_STMT (op1
);
1287 if (TREE_CODE (op2
) == SSA_NAME
)
1288 op2def
= SSA_NAME_DEF_STMT (op2
);
1289 if ((!op1def
|| gimple_nop_p (op1def
))
1290 && (!op2def
|| gimple_nop_p (op2def
)))
1292 gsi
= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
1293 if (gsi_end_p (gsi
))
1295 gimple_stmt_iterator gsi2
1296 = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
1297 gimple_set_uid (sum
,
1298 gsi_end_p (gsi2
) ? 1 : gimple_uid (gsi_stmt (gsi2
)));
1301 gimple_set_uid (sum
, gimple_uid (gsi_stmt (gsi
)));
1302 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1306 gimple
*insert_point
;
1307 if ((!op1def
|| gimple_nop_p (op1def
))
1308 || (op2def
&& !gimple_nop_p (op2def
)
1309 && reassoc_stmt_dominates_stmt_p (op1def
, op2def
)))
1310 insert_point
= op2def
;
1312 insert_point
= op1def
;
1313 insert_stmt_after (sum
, insert_point
);
1320 /* Perform un-distribution of divisions and multiplications.
1321 A * X + B * X is transformed into (A + B) * X and A / X + B / X
1322 to (A + B) / X for real X.
1324 The algorithm is organized as follows.
1326 - First we walk the addition chain *OPS looking for summands that
1327 are defined by a multiplication or a real division. This results
1328 in the candidates bitmap with relevant indices into *OPS.
1330 - Second we build the chains of multiplications or divisions for
1331 these candidates, counting the number of occurrences of (operand, code)
1332 pairs in all of the candidates chains.
1334 - Third we sort the (operand, code) pairs by number of occurrence and
1335 process them starting with the pair with the most uses.
1337 * For each such pair we walk the candidates again to build a
1338 second candidate bitmap noting all multiplication/division chains
1339 that have at least one occurrence of (operand, code).
1341 * We build an alternate addition chain only covering these
1342 candidates with one (operand, code) operation removed from their
1343 multiplication/division chain.
1345 * The first candidate gets replaced by the alternate addition chain
1346 multiplied/divided by the operand.
1348 * All candidate chains get disabled for further processing and
1349 processing of (operand, code) pairs continues.
1351 The alternate addition chains built are re-processed by the main
1352 reassociation algorithm which allows optimizing a * x * y + b * y * x
1353 to (a + b ) * x * y in one invocation of the reassociation pass. */
1356 undistribute_ops_list (enum tree_code opcode
,
1357 vec
<operand_entry
*> *ops
, struct loop
*loop
)
1359 unsigned int length
= ops
->length ();
1362 sbitmap candidates
, candidates2
;
1363 unsigned nr_candidates
, nr_candidates2
;
1364 sbitmap_iterator sbi0
;
1365 vec
<operand_entry
*> *subops
;
1366 bool changed
= false;
1367 int next_oecount_id
= 0;
1370 || opcode
!= PLUS_EXPR
)
1373 /* Build a list of candidates to process. */
1374 candidates
= sbitmap_alloc (length
);
1375 bitmap_clear (candidates
);
1377 FOR_EACH_VEC_ELT (*ops
, i
, oe1
)
1379 enum tree_code dcode
;
1382 if (TREE_CODE (oe1
->op
) != SSA_NAME
)
1384 oe1def
= SSA_NAME_DEF_STMT (oe1
->op
);
1385 if (!is_gimple_assign (oe1def
))
1387 dcode
= gimple_assign_rhs_code (oe1def
);
1388 if ((dcode
!= MULT_EXPR
1389 && dcode
!= RDIV_EXPR
)
1390 || !is_reassociable_op (oe1def
, dcode
, loop
))
1393 bitmap_set_bit (candidates
, i
);
1397 if (nr_candidates
< 2)
1399 sbitmap_free (candidates
);
1403 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1405 fprintf (dump_file
, "searching for un-distribute opportunities ");
1406 print_generic_expr (dump_file
,
1407 (*ops
)[bitmap_first_set_bit (candidates
)]->op
, 0);
1408 fprintf (dump_file
, " %d\n", nr_candidates
);
1411 /* Build linearized sub-operand lists and the counting table. */
1414 hash_table
<oecount_hasher
> ctable (15);
1416 /* ??? Macro arguments cannot have multi-argument template types in
1417 them. This typedef is needed to workaround that limitation. */
1418 typedef vec
<operand_entry
*> vec_operand_entry_t_heap
;
1419 subops
= XCNEWVEC (vec_operand_entry_t_heap
, ops
->length ());
1420 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1423 enum tree_code oecode
;
1426 oedef
= SSA_NAME_DEF_STMT ((*ops
)[i
]->op
);
1427 oecode
= gimple_assign_rhs_code (oedef
);
1428 linearize_expr_tree (&subops
[i
], oedef
,
1429 associative_tree_code (oecode
), false);
1431 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1438 c
.id
= next_oecount_id
++;
1441 idx
= cvec
.length () + 41;
1442 slot
= ctable
.find_slot (idx
, INSERT
);
1450 cvec
[*slot
- 42].cnt
++;
1455 /* Sort the counting table. */
1456 cvec
.qsort (oecount_cmp
);
1458 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1461 fprintf (dump_file
, "Candidates:\n");
1462 FOR_EACH_VEC_ELT (cvec
, j
, c
)
1464 fprintf (dump_file
, " %u %s: ", c
->cnt
,
1465 c
->oecode
== MULT_EXPR
1466 ? "*" : c
->oecode
== RDIV_EXPR
? "/" : "?");
1467 print_generic_expr (dump_file
, c
->op
, 0);
1468 fprintf (dump_file
, "\n");
1472 /* Process the (operand, code) pairs in order of most occurrence. */
1473 candidates2
= sbitmap_alloc (length
);
1474 while (!cvec
.is_empty ())
1476 oecount
*c
= &cvec
.last ();
1480 /* Now collect the operands in the outer chain that contain
1481 the common operand in their inner chain. */
1482 bitmap_clear (candidates2
);
1484 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1487 enum tree_code oecode
;
1489 tree op
= (*ops
)[i
]->op
;
1491 /* If we undistributed in this chain already this may be
1493 if (TREE_CODE (op
) != SSA_NAME
)
1496 oedef
= SSA_NAME_DEF_STMT (op
);
1497 oecode
= gimple_assign_rhs_code (oedef
);
1498 if (oecode
!= c
->oecode
)
1501 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1503 if (oe1
->op
== c
->op
)
1505 bitmap_set_bit (candidates2
, i
);
1512 if (nr_candidates2
>= 2)
1514 operand_entry
*oe1
, *oe2
;
1516 int first
= bitmap_first_set_bit (candidates2
);
1518 /* Build the new addition chain. */
1519 oe1
= (*ops
)[first
];
1520 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1522 fprintf (dump_file
, "Building (");
1523 print_generic_expr (dump_file
, oe1
->op
, 0);
1525 zero_one_operation (&oe1
->op
, c
->oecode
, c
->op
);
1526 EXECUTE_IF_SET_IN_BITMAP (candidates2
, first
+1, i
, sbi0
)
1530 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1532 fprintf (dump_file
, " + ");
1533 print_generic_expr (dump_file
, oe2
->op
, 0);
1535 zero_one_operation (&oe2
->op
, c
->oecode
, c
->op
);
1536 sum
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1537 oe1
->op
, oe2
->op
, opcode
);
1538 oe2
->op
= build_zero_cst (TREE_TYPE (oe2
->op
));
1540 oe1
->op
= gimple_get_lhs (sum
);
1543 /* Apply the multiplication/division. */
1544 prod
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1545 oe1
->op
, c
->op
, c
->oecode
);
1546 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1548 fprintf (dump_file
, ") %s ", c
->oecode
== MULT_EXPR
? "*" : "/");
1549 print_generic_expr (dump_file
, c
->op
, 0);
1550 fprintf (dump_file
, "\n");
1553 /* Record it in the addition chain and disable further
1554 undistribution with this op. */
1555 oe1
->op
= gimple_assign_lhs (prod
);
1556 oe1
->rank
= get_rank (oe1
->op
);
1557 subops
[first
].release ();
1565 for (i
= 0; i
< ops
->length (); ++i
)
1566 subops
[i
].release ();
1569 sbitmap_free (candidates
);
1570 sbitmap_free (candidates2
);
1575 /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison
1576 expression, examine the other OPS to see if any of them are comparisons
1577 of the same values, which we may be able to combine or eliminate.
1578 For example, we can rewrite (a < b) | (a == b) as (a <= b). */
1581 eliminate_redundant_comparison (enum tree_code opcode
,
1582 vec
<operand_entry
*> *ops
,
1583 unsigned int currindex
,
1584 operand_entry
*curr
)
1587 enum tree_code lcode
, rcode
;
1588 gimple
*def1
, *def2
;
1592 if (opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
1595 /* Check that CURR is a comparison. */
1596 if (TREE_CODE (curr
->op
) != SSA_NAME
)
1598 def1
= SSA_NAME_DEF_STMT (curr
->op
);
1599 if (!is_gimple_assign (def1
))
1601 lcode
= gimple_assign_rhs_code (def1
);
1602 if (TREE_CODE_CLASS (lcode
) != tcc_comparison
)
1604 op1
= gimple_assign_rhs1 (def1
);
1605 op2
= gimple_assign_rhs2 (def1
);
1607 /* Now look for a similar comparison in the remaining OPS. */
1608 for (i
= currindex
+ 1; ops
->iterate (i
, &oe
); i
++)
1612 if (TREE_CODE (oe
->op
) != SSA_NAME
)
1614 def2
= SSA_NAME_DEF_STMT (oe
->op
);
1615 if (!is_gimple_assign (def2
))
1617 rcode
= gimple_assign_rhs_code (def2
);
1618 if (TREE_CODE_CLASS (rcode
) != tcc_comparison
)
1621 /* If we got here, we have a match. See if we can combine the
1623 if (opcode
== BIT_IOR_EXPR
)
1624 t
= maybe_fold_or_comparisons (lcode
, op1
, op2
,
1625 rcode
, gimple_assign_rhs1 (def2
),
1626 gimple_assign_rhs2 (def2
));
1628 t
= maybe_fold_and_comparisons (lcode
, op1
, op2
,
1629 rcode
, gimple_assign_rhs1 (def2
),
1630 gimple_assign_rhs2 (def2
));
1634 /* maybe_fold_and_comparisons and maybe_fold_or_comparisons
1635 always give us a boolean_type_node value back. If the original
1636 BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type,
1637 we need to convert. */
1638 if (!useless_type_conversion_p (TREE_TYPE (curr
->op
), TREE_TYPE (t
)))
1639 t
= fold_convert (TREE_TYPE (curr
->op
), t
);
1641 if (TREE_CODE (t
) != INTEGER_CST
1642 && !operand_equal_p (t
, curr
->op
, 0))
1644 enum tree_code subcode
;
1645 tree newop1
, newop2
;
1646 if (!COMPARISON_CLASS_P (t
))
1648 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1649 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1650 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1651 if (!is_gimple_val (newop1
) || !is_gimple_val (newop2
))
1655 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1657 fprintf (dump_file
, "Equivalence: ");
1658 print_generic_expr (dump_file
, curr
->op
, 0);
1659 fprintf (dump_file
, " %s ", op_symbol_code (opcode
));
1660 print_generic_expr (dump_file
, oe
->op
, 0);
1661 fprintf (dump_file
, " -> ");
1662 print_generic_expr (dump_file
, t
, 0);
1663 fprintf (dump_file
, "\n");
1666 /* Now we can delete oe, as it has been subsumed by the new combined
1668 ops
->ordered_remove (i
);
1669 reassociate_stats
.ops_eliminated
++;
1671 /* If t is the same as curr->op, we're done. Otherwise we must
1672 replace curr->op with t. Special case is if we got a constant
1673 back, in which case we add it to the end instead of in place of
1674 the current entry. */
1675 if (TREE_CODE (t
) == INTEGER_CST
)
1677 ops
->ordered_remove (currindex
);
1678 add_to_ops_vec (ops
, t
);
1680 else if (!operand_equal_p (t
, curr
->op
, 0))
1683 enum tree_code subcode
;
1686 gcc_assert (COMPARISON_CLASS_P (t
));
1687 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1688 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1689 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1690 gcc_checking_assert (is_gimple_val (newop1
)
1691 && is_gimple_val (newop2
));
1692 sum
= build_and_add_sum (TREE_TYPE (t
), newop1
, newop2
, subcode
);
1693 curr
->op
= gimple_get_lhs (sum
);
1701 /* Perform various identities and other optimizations on the list of
1702 operand entries, stored in OPS. The tree code for the binary
1703 operation between all the operands is OPCODE. */
1706 optimize_ops_list (enum tree_code opcode
,
1707 vec
<operand_entry
*> *ops
)
1709 unsigned int length
= ops
->length ();
1712 operand_entry
*oelast
= NULL
;
1713 bool iterate
= false;
1718 oelast
= ops
->last ();
1720 /* If the last two are constants, pop the constants off, merge them
1721 and try the next two. */
1722 if (oelast
->rank
== 0 && is_gimple_min_invariant (oelast
->op
))
1724 operand_entry
*oelm1
= (*ops
)[length
- 2];
1726 if (oelm1
->rank
== 0
1727 && is_gimple_min_invariant (oelm1
->op
)
1728 && useless_type_conversion_p (TREE_TYPE (oelm1
->op
),
1729 TREE_TYPE (oelast
->op
)))
1731 tree folded
= fold_binary (opcode
, TREE_TYPE (oelm1
->op
),
1732 oelm1
->op
, oelast
->op
);
1734 if (folded
&& is_gimple_min_invariant (folded
))
1736 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1737 fprintf (dump_file
, "Merging constants\n");
1742 add_to_ops_vec (ops
, folded
);
1743 reassociate_stats
.constants_eliminated
++;
1745 optimize_ops_list (opcode
, ops
);
1751 eliminate_using_constants (opcode
, ops
);
1754 for (i
= 0; ops
->iterate (i
, &oe
);)
1758 if (eliminate_not_pairs (opcode
, ops
, i
, oe
))
1760 if (eliminate_duplicate_pair (opcode
, ops
, &done
, i
, oe
, oelast
)
1761 || (!done
&& eliminate_plus_minus_pair (opcode
, ops
, i
, oe
))
1762 || (!done
&& eliminate_redundant_comparison (opcode
, ops
, i
, oe
)))
1774 length
= ops
->length ();
1775 oelast
= ops
->last ();
1778 optimize_ops_list (opcode
, ops
);
1781 /* The following functions are subroutines to optimize_range_tests and allow
1782 it to try to change a logical combination of comparisons into a range
1786 X == 2 || X == 5 || X == 3 || X == 4
1790 (unsigned) (X - 2) <= 3
1792 For more information see comments above fold_test_range in fold-const.c,
1793 this implementation is for GIMPLE. */
1801 bool strict_overflow_p
;
1802 unsigned int idx
, next
;
1805 /* This is similar to make_range in fold-const.c, but on top of
1806 GIMPLE instead of trees. If EXP is non-NULL, it should be
1807 an SSA_NAME and STMT argument is ignored, otherwise STMT
1808 argument should be a GIMPLE_COND. */
1811 init_range_entry (struct range_entry
*r
, tree exp
, gimple
*stmt
)
1815 bool is_bool
, strict_overflow_p
;
1819 r
->strict_overflow_p
= false;
1821 r
->high
= NULL_TREE
;
1822 if (exp
!= NULL_TREE
1823 && (TREE_CODE (exp
) != SSA_NAME
|| !INTEGRAL_TYPE_P (TREE_TYPE (exp
))))
1826 /* Start with simply saying "EXP != 0" and then look at the code of EXP
1827 and see if we can refine the range. Some of the cases below may not
1828 happen, but it doesn't seem worth worrying about this. We "continue"
1829 the outer loop when we've changed something; otherwise we "break"
1830 the switch, which will "break" the while. */
1831 low
= exp
? build_int_cst (TREE_TYPE (exp
), 0) : boolean_false_node
;
1834 strict_overflow_p
= false;
1836 if (exp
== NULL_TREE
)
1838 else if (TYPE_PRECISION (TREE_TYPE (exp
)) == 1)
1840 if (TYPE_UNSIGNED (TREE_TYPE (exp
)))
1845 else if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
)
1850 enum tree_code code
;
1851 tree arg0
, arg1
, exp_type
;
1855 if (exp
!= NULL_TREE
)
1857 if (TREE_CODE (exp
) != SSA_NAME
1858 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
))
1861 stmt
= SSA_NAME_DEF_STMT (exp
);
1862 if (!is_gimple_assign (stmt
))
1865 code
= gimple_assign_rhs_code (stmt
);
1866 arg0
= gimple_assign_rhs1 (stmt
);
1867 arg1
= gimple_assign_rhs2 (stmt
);
1868 exp_type
= TREE_TYPE (exp
);
1872 code
= gimple_cond_code (stmt
);
1873 arg0
= gimple_cond_lhs (stmt
);
1874 arg1
= gimple_cond_rhs (stmt
);
1875 exp_type
= boolean_type_node
;
1878 if (TREE_CODE (arg0
) != SSA_NAME
)
1880 loc
= gimple_location (stmt
);
1884 if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
1885 /* Ensure the range is either +[-,0], +[0,0],
1886 -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or
1887 -[1,1]. If it is e.g. +[-,-] or -[-,-]
1888 or similar expression of unconditional true or
1889 false, it should not be negated. */
1890 && ((high
&& integer_zerop (high
))
1891 || (low
&& integer_onep (low
))))
1904 if (TYPE_PRECISION (TREE_TYPE (arg0
)) == 1)
1906 if (TYPE_UNSIGNED (TREE_TYPE (arg0
)))
1911 else if (TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
)
1926 nexp
= make_range_step (loc
, code
, arg0
, arg1
, exp_type
,
1928 &strict_overflow_p
);
1929 if (nexp
!= NULL_TREE
)
1932 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
1945 r
->strict_overflow_p
= strict_overflow_p
;
1949 /* Comparison function for qsort. Sort entries
1950 without SSA_NAME exp first, then with SSA_NAMEs sorted
1951 by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs
1952 by increasing ->low and if ->low is the same, by increasing
1953 ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE
1957 range_entry_cmp (const void *a
, const void *b
)
1959 const struct range_entry
*p
= (const struct range_entry
*) a
;
1960 const struct range_entry
*q
= (const struct range_entry
*) b
;
1962 if (p
->exp
!= NULL_TREE
&& TREE_CODE (p
->exp
) == SSA_NAME
)
1964 if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
1966 /* Group range_entries for the same SSA_NAME together. */
1967 if (SSA_NAME_VERSION (p
->exp
) < SSA_NAME_VERSION (q
->exp
))
1969 else if (SSA_NAME_VERSION (p
->exp
) > SSA_NAME_VERSION (q
->exp
))
1971 /* If ->low is different, NULL low goes first, then by
1973 if (p
->low
!= NULL_TREE
)
1975 if (q
->low
!= NULL_TREE
)
1977 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
1979 if (tem
&& integer_onep (tem
))
1981 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
1983 if (tem
&& integer_onep (tem
))
1989 else if (q
->low
!= NULL_TREE
)
1991 /* If ->high is different, NULL high goes last, before that by
1993 if (p
->high
!= NULL_TREE
)
1995 if (q
->high
!= NULL_TREE
)
1997 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
1999 if (tem
&& integer_onep (tem
))
2001 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2003 if (tem
&& integer_onep (tem
))
2009 else if (q
->high
!= NULL_TREE
)
2011 /* If both ranges are the same, sort below by ascending idx. */
2016 else if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
2019 if (p
->idx
< q
->idx
)
2023 gcc_checking_assert (p
->idx
> q
->idx
);
2028 /* Helper routine of optimize_range_test.
2029 [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for
2030 RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges,
2031 OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE
2032 is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to
2033 an array of COUNT pointers to other ranges. Return
2034 true if the range merge has been successful.
2035 If OPCODE is ERROR_MARK, this is called from within
2036 maybe_optimize_range_tests and is performing inter-bb range optimization.
2037 In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in
2041 update_range_test (struct range_entry
*range
, struct range_entry
*otherrange
,
2042 struct range_entry
**otherrangep
,
2043 unsigned int count
, enum tree_code opcode
,
2044 vec
<operand_entry
*> *ops
, tree exp
, gimple_seq seq
,
2045 bool in_p
, tree low
, tree high
, bool strict_overflow_p
)
2047 operand_entry
*oe
= (*ops
)[range
->idx
];
2049 gimple
*stmt
= op
? SSA_NAME_DEF_STMT (op
) :
2050 last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2051 location_t loc
= gimple_location (stmt
);
2052 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
2053 tree tem
= build_range_check (loc
, optype
, unshare_expr (exp
),
2055 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
2056 gimple_stmt_iterator gsi
;
2059 if (tem
== NULL_TREE
)
2062 if (strict_overflow_p
&& issue_strict_overflow_warning (wc
))
2063 warning_at (loc
, OPT_Wstrict_overflow
,
2064 "assuming signed overflow does not occur "
2065 "when simplifying range test");
2067 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2069 struct range_entry
*r
;
2070 fprintf (dump_file
, "Optimizing range tests ");
2071 print_generic_expr (dump_file
, range
->exp
, 0);
2072 fprintf (dump_file
, " %c[", range
->in_p
? '+' : '-');
2073 print_generic_expr (dump_file
, range
->low
, 0);
2074 fprintf (dump_file
, ", ");
2075 print_generic_expr (dump_file
, range
->high
, 0);
2076 fprintf (dump_file
, "]");
2077 for (i
= 0; i
< count
; i
++)
2083 fprintf (dump_file
, " and %c[", r
->in_p
? '+' : '-');
2084 print_generic_expr (dump_file
, r
->low
, 0);
2085 fprintf (dump_file
, ", ");
2086 print_generic_expr (dump_file
, r
->high
, 0);
2087 fprintf (dump_file
, "]");
2089 fprintf (dump_file
, "\n into ");
2090 print_generic_expr (dump_file
, tem
, 0);
2091 fprintf (dump_file
, "\n");
2094 if (opcode
== BIT_IOR_EXPR
2095 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
2096 tem
= invert_truthvalue_loc (loc
, tem
);
2098 tem
= fold_convert_loc (loc
, optype
, tem
);
2099 gsi
= gsi_for_stmt (stmt
);
2100 unsigned int uid
= gimple_uid (stmt
);
2101 /* In rare cases range->exp can be equal to lhs of stmt.
2102 In that case we have to insert after the stmt rather then before
2103 it. If stmt is a PHI, insert it at the start of the basic block. */
2104 if (op
!= range
->exp
)
2106 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2107 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, true,
2111 else if (gimple_code (stmt
) != GIMPLE_PHI
)
2113 gsi_insert_seq_after (&gsi
, seq
, GSI_CONTINUE_LINKING
);
2114 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, false,
2115 GSI_CONTINUE_LINKING
);
2119 gsi
= gsi_after_labels (gimple_bb (stmt
));
2120 if (!gsi_end_p (gsi
))
2121 uid
= gimple_uid (gsi_stmt (gsi
));
2124 gsi
= gsi_start_bb (gimple_bb (stmt
));
2126 while (!gsi_end_p (gsi
))
2128 uid
= gimple_uid (gsi_stmt (gsi
));
2132 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2133 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, true,
2135 if (gsi_end_p (gsi
))
2136 gsi
= gsi_last_bb (gimple_bb (stmt
));
2140 for (; !gsi_end_p (gsi
); gsi_prev (&gsi
))
2141 if (gimple_uid (gsi_stmt (gsi
)))
2144 gimple_set_uid (gsi_stmt (gsi
), uid
);
2151 range
->strict_overflow_p
= false;
2153 for (i
= 0; i
< count
; i
++)
2156 range
= otherrange
+ i
;
2158 range
= otherrangep
[i
];
2159 oe
= (*ops
)[range
->idx
];
2160 /* Now change all the other range test immediate uses, so that
2161 those tests will be optimized away. */
2162 if (opcode
== ERROR_MARK
)
2165 oe
->op
= build_int_cst (TREE_TYPE (oe
->op
),
2166 oe
->rank
== BIT_IOR_EXPR
? 0 : 1);
2168 oe
->op
= (oe
->rank
== BIT_IOR_EXPR
2169 ? boolean_false_node
: boolean_true_node
);
2172 oe
->op
= error_mark_node
;
2173 range
->exp
= NULL_TREE
;
2178 /* Optimize X == CST1 || X == CST2
2179 if popcount (CST1 ^ CST2) == 1 into
2180 (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)).
2181 Similarly for ranges. E.g.
2182 X != 2 && X != 3 && X != 10 && X != 11
2183 will be transformed by the previous optimization into
2184 !((X - 2U) <= 1U || (X - 10U) <= 1U)
2185 and this loop can transform that into
2186 !(((X & ~8) - 2U) <= 1U). */
2189 optimize_range_tests_xor (enum tree_code opcode
, tree type
,
2190 tree lowi
, tree lowj
, tree highi
, tree highj
,
2191 vec
<operand_entry
*> *ops
,
2192 struct range_entry
*rangei
,
2193 struct range_entry
*rangej
)
2195 tree lowxor
, highxor
, tem
, exp
;
2196 /* Check lowi ^ lowj == highi ^ highj and
2197 popcount (lowi ^ lowj) == 1. */
2198 lowxor
= fold_binary (BIT_XOR_EXPR
, type
, lowi
, lowj
);
2199 if (lowxor
== NULL_TREE
|| TREE_CODE (lowxor
) != INTEGER_CST
)
2201 if (!integer_pow2p (lowxor
))
2203 highxor
= fold_binary (BIT_XOR_EXPR
, type
, highi
, highj
);
2204 if (!tree_int_cst_equal (lowxor
, highxor
))
2207 tem
= fold_build1 (BIT_NOT_EXPR
, type
, lowxor
);
2208 exp
= fold_build2 (BIT_AND_EXPR
, type
, rangei
->exp
, tem
);
2209 lowj
= fold_build2 (BIT_AND_EXPR
, type
, lowi
, tem
);
2210 highj
= fold_build2 (BIT_AND_EXPR
, type
, highi
, tem
);
2211 if (update_range_test (rangei
, rangej
, NULL
, 1, opcode
, ops
, exp
,
2212 NULL
, rangei
->in_p
, lowj
, highj
,
2213 rangei
->strict_overflow_p
2214 || rangej
->strict_overflow_p
))
2219 /* Optimize X == CST1 || X == CST2
2220 if popcount (CST2 - CST1) == 1 into
2221 ((X - CST1) & ~(CST2 - CST1)) == 0.
2222 Similarly for ranges. E.g.
2223 X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46
2224 || X == 75 || X == 45
2225 will be transformed by the previous optimization into
2226 (X - 43U) <= 3U || (X - 75U) <= 3U
2227 and this loop can transform that into
2228 ((X - 43U) & ~(75U - 43U)) <= 3U. */
2230 optimize_range_tests_diff (enum tree_code opcode
, tree type
,
2231 tree lowi
, tree lowj
, tree highi
, tree highj
,
2232 vec
<operand_entry
*> *ops
,
2233 struct range_entry
*rangei
,
2234 struct range_entry
*rangej
)
2236 tree tem1
, tem2
, mask
;
2237 /* Check highi - lowi == highj - lowj. */
2238 tem1
= fold_binary (MINUS_EXPR
, type
, highi
, lowi
);
2239 if (tem1
== NULL_TREE
|| TREE_CODE (tem1
) != INTEGER_CST
)
2241 tem2
= fold_binary (MINUS_EXPR
, type
, highj
, lowj
);
2242 if (!tree_int_cst_equal (tem1
, tem2
))
2244 /* Check popcount (lowj - lowi) == 1. */
2245 tem1
= fold_binary (MINUS_EXPR
, type
, lowj
, lowi
);
2246 if (tem1
== NULL_TREE
|| TREE_CODE (tem1
) != INTEGER_CST
)
2248 if (!integer_pow2p (tem1
))
2251 type
= unsigned_type_for (type
);
2252 tem1
= fold_convert (type
, tem1
);
2253 tem2
= fold_convert (type
, tem2
);
2254 lowi
= fold_convert (type
, lowi
);
2255 mask
= fold_build1 (BIT_NOT_EXPR
, type
, tem1
);
2256 tem1
= fold_binary (MINUS_EXPR
, type
,
2257 fold_convert (type
, rangei
->exp
), lowi
);
2258 tem1
= fold_build2 (BIT_AND_EXPR
, type
, tem1
, mask
);
2259 lowj
= build_int_cst (type
, 0);
2260 if (update_range_test (rangei
, rangej
, NULL
, 1, opcode
, ops
, tem1
,
2261 NULL
, rangei
->in_p
, lowj
, tem2
,
2262 rangei
->strict_overflow_p
2263 || rangej
->strict_overflow_p
))
2268 /* It does some common checks for function optimize_range_tests_xor and
2269 optimize_range_tests_diff.
2270 If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor.
2271 Else it calls optimize_range_tests_diff. */
2274 optimize_range_tests_1 (enum tree_code opcode
, int first
, int length
,
2275 bool optimize_xor
, vec
<operand_entry
*> *ops
,
2276 struct range_entry
*ranges
)
2279 bool any_changes
= false;
2280 for (i
= first
; i
< length
; i
++)
2282 tree lowi
, highi
, lowj
, highj
, type
, tem
;
2284 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2286 type
= TREE_TYPE (ranges
[i
].exp
);
2287 if (!INTEGRAL_TYPE_P (type
))
2289 lowi
= ranges
[i
].low
;
2290 if (lowi
== NULL_TREE
)
2291 lowi
= TYPE_MIN_VALUE (type
);
2292 highi
= ranges
[i
].high
;
2293 if (highi
== NULL_TREE
)
2295 for (j
= i
+ 1; j
< length
&& j
< i
+ 64; j
++)
2298 if (ranges
[i
].exp
!= ranges
[j
].exp
|| ranges
[j
].in_p
)
2300 lowj
= ranges
[j
].low
;
2301 if (lowj
== NULL_TREE
)
2303 highj
= ranges
[j
].high
;
2304 if (highj
== NULL_TREE
)
2305 highj
= TYPE_MAX_VALUE (type
);
2306 /* Check lowj > highi. */
2307 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2309 if (tem
== NULL_TREE
|| !integer_onep (tem
))
2312 changes
= optimize_range_tests_xor (opcode
, type
, lowi
, lowj
,
2314 ranges
+ i
, ranges
+ j
);
2316 changes
= optimize_range_tests_diff (opcode
, type
, lowi
, lowj
,
2318 ranges
+ i
, ranges
+ j
);
2329 /* Helper function of optimize_range_tests_to_bit_test. Handle a single
2330 range, EXP, LOW, HIGH, compute bit mask of bits to test and return
2331 EXP on success, NULL otherwise. */
2334 extract_bit_test_mask (tree exp
, int prec
, tree totallow
, tree low
, tree high
,
2335 wide_int
*mask
, tree
*totallowp
)
2337 tree tem
= int_const_binop (MINUS_EXPR
, high
, low
);
2338 if (tem
== NULL_TREE
2339 || TREE_CODE (tem
) != INTEGER_CST
2340 || TREE_OVERFLOW (tem
)
2341 || tree_int_cst_sgn (tem
) == -1
2342 || compare_tree_int (tem
, prec
) != -1)
2345 unsigned HOST_WIDE_INT max
= tree_to_uhwi (tem
) + 1;
2346 *mask
= wi::shifted_mask (0, max
, false, prec
);
2347 if (TREE_CODE (exp
) == BIT_AND_EXPR
2348 && TREE_CODE (TREE_OPERAND (exp
, 1)) == INTEGER_CST
)
2350 widest_int msk
= wi::to_widest (TREE_OPERAND (exp
, 1));
2351 msk
= wi::zext (~msk
, TYPE_PRECISION (TREE_TYPE (exp
)));
2352 if (wi::popcount (msk
) == 1
2353 && wi::ltu_p (msk
, prec
- max
))
2355 *mask
|= wi::shifted_mask (msk
.to_uhwi (), max
, false, prec
);
2356 max
+= msk
.to_uhwi ();
2357 exp
= TREE_OPERAND (exp
, 0);
2358 if (integer_zerop (low
)
2359 && TREE_CODE (exp
) == PLUS_EXPR
2360 && TREE_CODE (TREE_OPERAND (exp
, 1)) == INTEGER_CST
)
2362 tree ret
= TREE_OPERAND (exp
, 0);
2365 = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp
, 1)),
2366 TYPE_PRECISION (TREE_TYPE (low
))));
2367 tree tbias
= wide_int_to_tree (TREE_TYPE (ret
), bias
);
2373 else if (!tree_int_cst_lt (totallow
, tbias
))
2375 bias
= wi::to_widest (tbias
);
2376 bias
-= wi::to_widest (totallow
);
2377 if (wi::ges_p (bias
, 0) && wi::lts_p (bias
, prec
- max
))
2379 *mask
= wi::lshift (*mask
, bias
);
2387 if (!tree_int_cst_lt (totallow
, low
))
2389 tem
= int_const_binop (MINUS_EXPR
, low
, totallow
);
2390 if (tem
== NULL_TREE
2391 || TREE_CODE (tem
) != INTEGER_CST
2392 || TREE_OVERFLOW (tem
)
2393 || compare_tree_int (tem
, prec
- max
) == 1)
2396 *mask
= wi::lshift (*mask
, wi::to_widest (tem
));
2400 /* Attempt to optimize small range tests using bit test.
2402 X != 43 && X != 76 && X != 44 && X != 78 && X != 49
2403 && X != 77 && X != 46 && X != 75 && X != 45 && X != 82
2404 has been by earlier optimizations optimized into:
2405 ((X - 43U) & ~32U) > 3U && X != 49 && X != 82
2406 As all the 43 through 82 range is less than 64 numbers,
2407 for 64-bit word targets optimize that into:
2408 (X - 43U) > 40U && ((1 << (X - 43U)) & 0x8F0000004FULL) == 0 */
2411 optimize_range_tests_to_bit_test (enum tree_code opcode
, int first
, int length
,
2412 vec
<operand_entry
*> *ops
,
2413 struct range_entry
*ranges
)
2416 bool any_changes
= false;
2417 int prec
= GET_MODE_BITSIZE (word_mode
);
2418 auto_vec
<struct range_entry
*, 64> candidates
;
2420 for (i
= first
; i
< length
- 2; i
++)
2422 tree lowi
, highi
, lowj
, highj
, type
;
2424 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2426 type
= TREE_TYPE (ranges
[i
].exp
);
2427 if (!INTEGRAL_TYPE_P (type
))
2429 lowi
= ranges
[i
].low
;
2430 if (lowi
== NULL_TREE
)
2431 lowi
= TYPE_MIN_VALUE (type
);
2432 highi
= ranges
[i
].high
;
2433 if (highi
== NULL_TREE
)
2436 tree exp
= extract_bit_test_mask (ranges
[i
].exp
, prec
, lowi
, lowi
,
2437 highi
, &mask
, &lowi
);
2438 if (exp
== NULL_TREE
)
2440 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
2441 candidates
.truncate (0);
2442 int end
= MIN (i
+ 64, length
);
2443 for (j
= i
+ 1; j
< end
; j
++)
2446 if (ranges
[j
].exp
== NULL_TREE
|| ranges
[j
].in_p
)
2448 if (ranges
[j
].exp
== exp
)
2450 else if (TREE_CODE (ranges
[j
].exp
) == BIT_AND_EXPR
)
2452 exp2
= TREE_OPERAND (ranges
[j
].exp
, 0);
2455 else if (TREE_CODE (exp2
) == PLUS_EXPR
)
2457 exp2
= TREE_OPERAND (exp2
, 0);
2467 lowj
= ranges
[j
].low
;
2468 if (lowj
== NULL_TREE
)
2470 highj
= ranges
[j
].high
;
2471 if (highj
== NULL_TREE
)
2472 highj
= TYPE_MAX_VALUE (type
);
2474 exp2
= extract_bit_test_mask (ranges
[j
].exp
, prec
, lowi
, lowj
,
2475 highj
, &mask2
, NULL
);
2479 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
2480 candidates
.safe_push (&ranges
[j
]);
2483 /* If we need otherwise 3 or more comparisons, use a bit test. */
2484 if (candidates
.length () >= 2)
2486 tree high
= wide_int_to_tree (TREE_TYPE (lowi
),
2487 wi::to_widest (lowi
)
2488 + prec
- 1 - wi::clz (mask
));
2489 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
2491 gimple
*stmt
= op
? SSA_NAME_DEF_STMT (op
)
2492 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2493 location_t loc
= gimple_location (stmt
);
2494 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
2496 /* See if it isn't cheaper to pretend the minimum value of the
2497 range is 0, if maximum value is small enough.
2498 We can avoid then subtraction of the minimum value, but the
2499 mask constant could be perhaps more expensive. */
2500 if (compare_tree_int (lowi
, 0) > 0
2501 && compare_tree_int (high
, prec
) < 0)
2504 HOST_WIDE_INT m
= tree_to_uhwi (lowi
);
2505 rtx reg
= gen_raw_REG (word_mode
, 10000);
2506 bool speed_p
= optimize_bb_for_speed_p (gimple_bb (stmt
));
2507 cost_diff
= set_rtx_cost (gen_rtx_PLUS (word_mode
, reg
,
2508 GEN_INT (-m
)), speed_p
);
2509 rtx r
= immed_wide_int_const (mask
, word_mode
);
2510 cost_diff
+= set_src_cost (gen_rtx_AND (word_mode
, reg
, r
),
2511 word_mode
, speed_p
);
2512 r
= immed_wide_int_const (wi::lshift (mask
, m
), word_mode
);
2513 cost_diff
-= set_src_cost (gen_rtx_AND (word_mode
, reg
, r
),
2514 word_mode
, speed_p
);
2517 mask
= wi::lshift (mask
, m
);
2518 lowi
= build_zero_cst (TREE_TYPE (lowi
));
2522 tree tem
= build_range_check (loc
, optype
, unshare_expr (exp
),
2524 if (tem
== NULL_TREE
|| is_gimple_val (tem
))
2526 tree etype
= unsigned_type_for (TREE_TYPE (exp
));
2527 exp
= fold_build2_loc (loc
, MINUS_EXPR
, etype
,
2528 fold_convert_loc (loc
, etype
, exp
),
2529 fold_convert_loc (loc
, etype
, lowi
));
2530 exp
= fold_convert_loc (loc
, integer_type_node
, exp
);
2531 tree word_type
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
2532 exp
= fold_build2_loc (loc
, LSHIFT_EXPR
, word_type
,
2533 build_int_cst (word_type
, 1), exp
);
2534 exp
= fold_build2_loc (loc
, BIT_AND_EXPR
, word_type
, exp
,
2535 wide_int_to_tree (word_type
, mask
));
2536 exp
= fold_build2_loc (loc
, EQ_EXPR
, optype
, exp
,
2537 build_zero_cst (word_type
));
2538 if (is_gimple_val (exp
))
2541 /* The shift might have undefined behavior if TEM is true,
2542 but reassociate_bb isn't prepared to have basic blocks
2543 split when it is running. So, temporarily emit a code
2544 with BIT_IOR_EXPR instead of &&, and fix it up in
2547 tem
= force_gimple_operand (tem
, &seq
, true, NULL_TREE
);
2548 gcc_assert (TREE_CODE (tem
) == SSA_NAME
);
2549 gimple_set_visited (SSA_NAME_DEF_STMT (tem
), true);
2551 exp
= force_gimple_operand (exp
, &seq2
, true, NULL_TREE
);
2552 gimple_seq_add_seq_without_update (&seq
, seq2
);
2553 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
2554 gimple_set_visited (SSA_NAME_DEF_STMT (exp
), true);
2555 gimple
*g
= gimple_build_assign (make_ssa_name (optype
),
2556 BIT_IOR_EXPR
, tem
, exp
);
2557 gimple_set_location (g
, loc
);
2558 gimple_seq_add_stmt_without_update (&seq
, g
);
2559 exp
= gimple_assign_lhs (g
);
2560 tree val
= build_zero_cst (optype
);
2561 if (update_range_test (&ranges
[i
], NULL
, candidates
.address (),
2562 candidates
.length (), opcode
, ops
, exp
,
2563 seq
, false, val
, val
, strict_overflow_p
))
2566 reassoc_branch_fixups
.safe_push (tem
);
2569 gimple_seq_discard (seq
);
2575 /* Optimize range tests, similarly how fold_range_test optimizes
2576 it on trees. The tree code for the binary
2577 operation between all the operands is OPCODE.
2578 If OPCODE is ERROR_MARK, optimize_range_tests is called from within
2579 maybe_optimize_range_tests for inter-bb range optimization.
2580 In that case if oe->op is NULL, oe->id is bb->index whose
2581 GIMPLE_COND is && or ||ed into the test, and oe->rank says
2582 the actual opcode. */
2585 optimize_range_tests (enum tree_code opcode
,
2586 vec
<operand_entry
*> *ops
)
2588 unsigned int length
= ops
->length (), i
, j
, first
;
2590 struct range_entry
*ranges
;
2591 bool any_changes
= false;
2596 ranges
= XNEWVEC (struct range_entry
, length
);
2597 for (i
= 0; i
< length
; i
++)
2601 init_range_entry (ranges
+ i
, oe
->op
,
2603 last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
)));
2604 /* For | invert it now, we will invert it again before emitting
2605 the optimized expression. */
2606 if (opcode
== BIT_IOR_EXPR
2607 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
2608 ranges
[i
].in_p
= !ranges
[i
].in_p
;
2611 qsort (ranges
, length
, sizeof (*ranges
), range_entry_cmp
);
2612 for (i
= 0; i
< length
; i
++)
2613 if (ranges
[i
].exp
!= NULL_TREE
&& TREE_CODE (ranges
[i
].exp
) == SSA_NAME
)
2616 /* Try to merge ranges. */
2617 for (first
= i
; i
< length
; i
++)
2619 tree low
= ranges
[i
].low
;
2620 tree high
= ranges
[i
].high
;
2621 int in_p
= ranges
[i
].in_p
;
2622 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
2623 int update_fail_count
= 0;
2625 for (j
= i
+ 1; j
< length
; j
++)
2627 if (ranges
[i
].exp
!= ranges
[j
].exp
)
2629 if (!merge_ranges (&in_p
, &low
, &high
, in_p
, low
, high
,
2630 ranges
[j
].in_p
, ranges
[j
].low
, ranges
[j
].high
))
2632 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
2638 if (update_range_test (ranges
+ i
, ranges
+ i
+ 1, NULL
, j
- i
- 1,
2639 opcode
, ops
, ranges
[i
].exp
, NULL
, in_p
,
2640 low
, high
, strict_overflow_p
))
2645 /* Avoid quadratic complexity if all merge_ranges calls would succeed,
2646 while update_range_test would fail. */
2647 else if (update_fail_count
== 64)
2650 ++update_fail_count
;
2653 any_changes
|= optimize_range_tests_1 (opcode
, first
, length
, true,
2656 if (BRANCH_COST (optimize_function_for_speed_p (cfun
), false) >= 2)
2657 any_changes
|= optimize_range_tests_1 (opcode
, first
, length
, false,
2659 if (lshift_cheap_p (optimize_function_for_speed_p (cfun
)))
2660 any_changes
|= optimize_range_tests_to_bit_test (opcode
, first
, length
,
2663 if (any_changes
&& opcode
!= ERROR_MARK
)
2666 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
2668 if (oe
->op
== error_mark_node
)
2677 XDELETEVEC (ranges
);
2681 /* Return true if STMT is a cast like:
2687 # _345 = PHI <_123(N), 1(...), 1(...)>
2688 where _234 has bool type, _123 has single use and
2689 bb N has a single successor M. This is commonly used in
2690 the last block of a range test. */
2693 final_range_test_p (gimple
*stmt
)
2695 basic_block bb
, rhs_bb
;
2698 use_operand_p use_p
;
2701 if (!gimple_assign_cast_p (stmt
))
2703 bb
= gimple_bb (stmt
);
2704 if (!single_succ_p (bb
))
2706 e
= single_succ_edge (bb
);
2707 if (e
->flags
& EDGE_COMPLEX
)
2710 lhs
= gimple_assign_lhs (stmt
);
2711 rhs
= gimple_assign_rhs1 (stmt
);
2712 if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2713 || TREE_CODE (rhs
) != SSA_NAME
2714 || TREE_CODE (TREE_TYPE (rhs
)) != BOOLEAN_TYPE
)
2717 /* Test whether lhs is consumed only by a PHI in the only successor bb. */
2718 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
2721 if (gimple_code (use_stmt
) != GIMPLE_PHI
2722 || gimple_bb (use_stmt
) != e
->dest
)
2725 /* And that the rhs is defined in the same loop. */
2726 rhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (rhs
));
2728 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), rhs_bb
))
2734 /* Return true if BB is suitable basic block for inter-bb range test
2735 optimization. If BACKWARD is true, BB should be the only predecessor
2736 of TEST_BB, and *OTHER_BB is either NULL and filled by the routine,
2737 or compared with to find a common basic block to which all conditions
2738 branch to if true resp. false. If BACKWARD is false, TEST_BB should
2739 be the only predecessor of BB. */
2742 suitable_cond_bb (basic_block bb
, basic_block test_bb
, basic_block
*other_bb
,
2745 edge_iterator ei
, ei2
;
2749 bool other_edge_seen
= false;
2754 /* Check last stmt first. */
2755 stmt
= last_stmt (bb
);
2757 || (gimple_code (stmt
) != GIMPLE_COND
2758 && (backward
|| !final_range_test_p (stmt
)))
2759 || gimple_visited_p (stmt
)
2760 || stmt_could_throw_p (stmt
)
2763 is_cond
= gimple_code (stmt
) == GIMPLE_COND
;
2766 /* If last stmt is GIMPLE_COND, verify that one of the succ edges
2767 goes to the next bb (if BACKWARD, it is TEST_BB), and the other
2768 to *OTHER_BB (if not set yet, try to find it out). */
2769 if (EDGE_COUNT (bb
->succs
) != 2)
2771 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2773 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2775 if (e
->dest
== test_bb
)
2784 if (*other_bb
== NULL
)
2786 FOR_EACH_EDGE (e2
, ei2
, test_bb
->succs
)
2787 if (!(e2
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2789 else if (e
->dest
== e2
->dest
)
2790 *other_bb
= e
->dest
;
2791 if (*other_bb
== NULL
)
2794 if (e
->dest
== *other_bb
)
2795 other_edge_seen
= true;
2799 if (*other_bb
== NULL
|| !other_edge_seen
)
2802 else if (single_succ (bb
) != *other_bb
)
2805 /* Now check all PHIs of *OTHER_BB. */
2806 e
= find_edge (bb
, *other_bb
);
2807 e2
= find_edge (test_bb
, *other_bb
);
2808 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2810 gphi
*phi
= gsi
.phi ();
2811 /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments
2812 corresponding to BB and TEST_BB predecessor must be the same. */
2813 if (!operand_equal_p (gimple_phi_arg_def (phi
, e
->dest_idx
),
2814 gimple_phi_arg_def (phi
, e2
->dest_idx
), 0))
2816 /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND,
2817 one of the PHIs should have the lhs of the last stmt in
2818 that block as PHI arg and that PHI should have 0 or 1
2819 corresponding to it in all other range test basic blocks
2823 if (gimple_phi_arg_def (phi
, e
->dest_idx
)
2824 == gimple_assign_lhs (stmt
)
2825 && (integer_zerop (gimple_phi_arg_def (phi
, e2
->dest_idx
))
2826 || integer_onep (gimple_phi_arg_def (phi
,
2832 gimple
*test_last
= last_stmt (test_bb
);
2833 if (gimple_code (test_last
) != GIMPLE_COND
2834 && gimple_phi_arg_def (phi
, e2
->dest_idx
)
2835 == gimple_assign_lhs (test_last
)
2836 && (integer_zerop (gimple_phi_arg_def (phi
, e
->dest_idx
))
2837 || integer_onep (gimple_phi_arg_def (phi
, e
->dest_idx
))))
2847 /* Return true if BB doesn't have side-effects that would disallow
2848 range test optimization, all SSA_NAMEs set in the bb are consumed
2849 in the bb and there are no PHIs. */
2852 no_side_effect_bb (basic_block bb
)
2854 gimple_stmt_iterator gsi
;
2857 if (!gimple_seq_empty_p (phi_nodes (bb
)))
2859 last
= last_stmt (bb
);
2860 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2862 gimple
*stmt
= gsi_stmt (gsi
);
2864 imm_use_iterator imm_iter
;
2865 use_operand_p use_p
;
2867 if (is_gimple_debug (stmt
))
2869 if (gimple_has_side_effects (stmt
))
2873 if (!is_gimple_assign (stmt
))
2875 lhs
= gimple_assign_lhs (stmt
);
2876 if (TREE_CODE (lhs
) != SSA_NAME
)
2878 if (gimple_assign_rhs_could_trap_p (stmt
))
2880 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2882 gimple
*use_stmt
= USE_STMT (use_p
);
2883 if (is_gimple_debug (use_stmt
))
2885 if (gimple_bb (use_stmt
) != bb
)
2892 /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable,
2893 return true and fill in *OPS recursively. */
2896 get_ops (tree var
, enum tree_code code
, vec
<operand_entry
*> *ops
,
2899 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
2903 if (!is_reassociable_op (stmt
, code
, loop
))
2906 rhs
[0] = gimple_assign_rhs1 (stmt
);
2907 rhs
[1] = gimple_assign_rhs2 (stmt
);
2908 gimple_set_visited (stmt
, true);
2909 for (i
= 0; i
< 2; i
++)
2910 if (TREE_CODE (rhs
[i
]) == SSA_NAME
2911 && !get_ops (rhs
[i
], code
, ops
, loop
)
2912 && has_single_use (rhs
[i
]))
2914 operand_entry
*oe
= operand_entry_pool
.allocate ();
2920 ops
->safe_push (oe
);
2925 /* Find the ops that were added by get_ops starting from VAR, see if
2926 they were changed during update_range_test and if yes, create new
2930 update_ops (tree var
, enum tree_code code
, vec
<operand_entry
*> ops
,
2931 unsigned int *pidx
, struct loop
*loop
)
2933 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
2937 if (!is_reassociable_op (stmt
, code
, loop
))
2940 rhs
[0] = gimple_assign_rhs1 (stmt
);
2941 rhs
[1] = gimple_assign_rhs2 (stmt
);
2944 for (i
= 0; i
< 2; i
++)
2945 if (TREE_CODE (rhs
[i
]) == SSA_NAME
)
2947 rhs
[2 + i
] = update_ops (rhs
[i
], code
, ops
, pidx
, loop
);
2948 if (rhs
[2 + i
] == NULL_TREE
)
2950 if (has_single_use (rhs
[i
]))
2951 rhs
[2 + i
] = ops
[(*pidx
)++]->op
;
2953 rhs
[2 + i
] = rhs
[i
];
2956 if ((rhs
[2] != rhs
[0] || rhs
[3] != rhs
[1])
2957 && (rhs
[2] != rhs
[1] || rhs
[3] != rhs
[0]))
2959 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2960 var
= make_ssa_name (TREE_TYPE (var
));
2961 gassign
*g
= gimple_build_assign (var
, gimple_assign_rhs_code (stmt
),
2963 gimple_set_uid (g
, gimple_uid (stmt
));
2964 gimple_set_visited (g
, true);
2965 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2970 /* Structure to track the initial value passed to get_ops and
2971 the range in the ops vector for each basic block. */
2973 struct inter_bb_range_test_entry
2976 unsigned int first_idx
, last_idx
;
2979 /* Inter-bb range test optimization. */
2982 maybe_optimize_range_tests (gimple
*stmt
)
2984 basic_block first_bb
= gimple_bb (stmt
);
2985 basic_block last_bb
= first_bb
;
2986 basic_block other_bb
= NULL
;
2990 auto_vec
<operand_entry
*> ops
;
2991 auto_vec
<inter_bb_range_test_entry
> bbinfo
;
2992 bool any_changes
= false;
2994 /* Consider only basic blocks that end with GIMPLE_COND or
2995 a cast statement satisfying final_range_test_p. All
2996 but the last bb in the first_bb .. last_bb range
2997 should end with GIMPLE_COND. */
2998 if (gimple_code (stmt
) == GIMPLE_COND
)
3000 if (EDGE_COUNT (first_bb
->succs
) != 2)
3003 else if (final_range_test_p (stmt
))
3004 other_bb
= single_succ (first_bb
);
3008 if (stmt_could_throw_p (stmt
))
3011 /* As relative ordering of post-dominator sons isn't fixed,
3012 maybe_optimize_range_tests can be called first on any
3013 bb in the range we want to optimize. So, start searching
3014 backwards, if first_bb can be set to a predecessor. */
3015 while (single_pred_p (first_bb
))
3017 basic_block pred_bb
= single_pred (first_bb
);
3018 if (!suitable_cond_bb (pred_bb
, first_bb
, &other_bb
, true))
3020 if (!no_side_effect_bb (first_bb
))
3024 /* If first_bb is last_bb, other_bb hasn't been computed yet.
3025 Before starting forward search in last_bb successors, find
3026 out the other_bb. */
3027 if (first_bb
== last_bb
)
3030 /* As non-GIMPLE_COND last stmt always terminates the range,
3031 if forward search didn't discover anything, just give up. */
3032 if (gimple_code (stmt
) != GIMPLE_COND
)
3034 /* Look at both successors. Either it ends with a GIMPLE_COND
3035 and satisfies suitable_cond_bb, or ends with a cast and
3036 other_bb is that cast's successor. */
3037 FOR_EACH_EDGE (e
, ei
, first_bb
->succs
)
3038 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
))
3039 || e
->dest
== first_bb
)
3041 else if (single_pred_p (e
->dest
))
3043 stmt
= last_stmt (e
->dest
);
3045 && gimple_code (stmt
) == GIMPLE_COND
3046 && EDGE_COUNT (e
->dest
->succs
) == 2)
3048 if (suitable_cond_bb (first_bb
, e
->dest
, &other_bb
, true))
3054 && final_range_test_p (stmt
)
3055 && find_edge (first_bb
, single_succ (e
->dest
)))
3057 other_bb
= single_succ (e
->dest
);
3058 if (other_bb
== first_bb
)
3062 if (other_bb
== NULL
)
3065 /* Now do the forward search, moving last_bb to successor bbs
3066 that aren't other_bb. */
3067 while (EDGE_COUNT (last_bb
->succs
) == 2)
3069 FOR_EACH_EDGE (e
, ei
, last_bb
->succs
)
3070 if (e
->dest
!= other_bb
)
3074 if (!single_pred_p (e
->dest
))
3076 if (!suitable_cond_bb (e
->dest
, last_bb
, &other_bb
, false))
3078 if (!no_side_effect_bb (e
->dest
))
3082 if (first_bb
== last_bb
)
3084 /* Here basic blocks first_bb through last_bb's predecessor
3085 end with GIMPLE_COND, all of them have one of the edges to
3086 other_bb and another to another block in the range,
3087 all blocks except first_bb don't have side-effects and
3088 last_bb ends with either GIMPLE_COND, or cast satisfying
3089 final_range_test_p. */
3090 for (bb
= last_bb
; ; bb
= single_pred (bb
))
3092 enum tree_code code
;
3094 inter_bb_range_test_entry bb_ent
;
3096 bb_ent
.op
= NULL_TREE
;
3097 bb_ent
.first_idx
= ops
.length ();
3098 bb_ent
.last_idx
= bb_ent
.first_idx
;
3099 e
= find_edge (bb
, other_bb
);
3100 stmt
= last_stmt (bb
);
3101 gimple_set_visited (stmt
, true);
3102 if (gimple_code (stmt
) != GIMPLE_COND
)
3104 use_operand_p use_p
;
3109 lhs
= gimple_assign_lhs (stmt
);
3110 rhs
= gimple_assign_rhs1 (stmt
);
3111 gcc_assert (bb
== last_bb
);
3118 # _345 = PHI <_123(N), 1(...), 1(...)>
3120 or 0 instead of 1. If it is 0, the _234
3121 range test is anded together with all the
3122 other range tests, if it is 1, it is ored with
3124 single_imm_use (lhs
, &use_p
, &phi
);
3125 gcc_assert (gimple_code (phi
) == GIMPLE_PHI
);
3126 e2
= find_edge (first_bb
, other_bb
);
3128 gcc_assert (gimple_phi_arg_def (phi
, e
->dest_idx
) == lhs
);
3129 if (integer_zerop (gimple_phi_arg_def (phi
, d
)))
3130 code
= BIT_AND_EXPR
;
3133 gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi
, d
)));
3134 code
= BIT_IOR_EXPR
;
3137 /* If _234 SSA_NAME_DEF_STMT is
3139 (or &, corresponding to 1/0 in the phi arguments,
3140 push into ops the individual range test arguments
3141 of the bitwise or resp. and, recursively. */
3142 if (!get_ops (rhs
, code
, &ops
,
3143 loop_containing_stmt (stmt
))
3144 && has_single_use (rhs
))
3146 /* Otherwise, push the _234 range test itself. */
3147 operand_entry
*oe
= operand_entry_pool
.allocate ();
3157 bb_ent
.last_idx
= ops
.length ();
3159 bbinfo
.safe_push (bb_ent
);
3162 /* Otherwise stmt is GIMPLE_COND. */
3163 code
= gimple_cond_code (stmt
);
3164 lhs
= gimple_cond_lhs (stmt
);
3165 rhs
= gimple_cond_rhs (stmt
);
3166 if (TREE_CODE (lhs
) == SSA_NAME
3167 && INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3168 && ((code
!= EQ_EXPR
&& code
!= NE_EXPR
)
3169 || rhs
!= boolean_false_node
3170 /* Either push into ops the individual bitwise
3171 or resp. and operands, depending on which
3172 edge is other_bb. */
3173 || !get_ops (lhs
, (((e
->flags
& EDGE_TRUE_VALUE
) == 0)
3174 ^ (code
== EQ_EXPR
))
3175 ? BIT_AND_EXPR
: BIT_IOR_EXPR
, &ops
,
3176 loop_containing_stmt (stmt
))))
3178 /* Or push the GIMPLE_COND stmt itself. */
3179 operand_entry
*oe
= operand_entry_pool
.allocate ();
3182 oe
->rank
= (e
->flags
& EDGE_TRUE_VALUE
)
3183 ? BIT_IOR_EXPR
: BIT_AND_EXPR
;
3184 /* oe->op = NULL signs that there is no SSA_NAME
3185 for the range test, and oe->id instead is the
3186 basic block number, at which's end the GIMPLE_COND
3194 else if (ops
.length () > bb_ent
.first_idx
)
3197 bb_ent
.last_idx
= ops
.length ();
3199 bbinfo
.safe_push (bb_ent
);
3203 if (ops
.length () > 1)
3204 any_changes
= optimize_range_tests (ERROR_MARK
, &ops
);
3208 /* update_ops relies on has_single_use predicates returning the
3209 same values as it did during get_ops earlier. Additionally it
3210 never removes statements, only adds new ones and it should walk
3211 from the single imm use and check the predicate already before
3212 making those changes.
3213 On the other side, the handling of GIMPLE_COND directly can turn
3214 previously multiply used SSA_NAMEs into single use SSA_NAMEs, so
3215 it needs to be done in a separate loop afterwards. */
3216 for (bb
= last_bb
, idx
= 0; ; bb
= single_pred (bb
), idx
++)
3218 if (bbinfo
[idx
].first_idx
< bbinfo
[idx
].last_idx
3219 && bbinfo
[idx
].op
!= NULL_TREE
)
3223 stmt
= last_stmt (bb
);
3224 new_op
= update_ops (bbinfo
[idx
].op
,
3226 ops
[bbinfo
[idx
].first_idx
]->rank
,
3227 ops
, &bbinfo
[idx
].first_idx
,
3228 loop_containing_stmt (stmt
));
3229 if (new_op
== NULL_TREE
)
3231 gcc_assert (bb
== last_bb
);
3232 new_op
= ops
[bbinfo
[idx
].first_idx
++]->op
;
3234 if (bbinfo
[idx
].op
!= new_op
)
3236 imm_use_iterator iter
;
3237 use_operand_p use_p
;
3238 gimple
*use_stmt
, *cast_stmt
= NULL
;
3240 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, bbinfo
[idx
].op
)
3241 if (is_gimple_debug (use_stmt
))
3243 else if (gimple_code (use_stmt
) == GIMPLE_COND
3244 || gimple_code (use_stmt
) == GIMPLE_PHI
)
3245 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3246 SET_USE (use_p
, new_op
);
3247 else if (gimple_assign_cast_p (use_stmt
))
3248 cast_stmt
= use_stmt
;
3253 gcc_assert (bb
== last_bb
);
3254 tree lhs
= gimple_assign_lhs (cast_stmt
);
3255 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
3256 enum tree_code rhs_code
3257 = gimple_assign_rhs_code (cast_stmt
);
3259 if (is_gimple_min_invariant (new_op
))
3261 new_op
= fold_convert (TREE_TYPE (lhs
), new_op
);
3262 g
= gimple_build_assign (new_lhs
, new_op
);
3265 g
= gimple_build_assign (new_lhs
, rhs_code
, new_op
);
3266 gimple_stmt_iterator gsi
= gsi_for_stmt (cast_stmt
);
3267 gimple_set_uid (g
, gimple_uid (cast_stmt
));
3268 gimple_set_visited (g
, true);
3269 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3270 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3271 if (is_gimple_debug (use_stmt
))
3273 else if (gimple_code (use_stmt
) == GIMPLE_COND
3274 || gimple_code (use_stmt
) == GIMPLE_PHI
)
3275 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3276 SET_USE (use_p
, new_lhs
);
3285 for (bb
= last_bb
, idx
= 0; ; bb
= single_pred (bb
), idx
++)
3287 if (bbinfo
[idx
].first_idx
< bbinfo
[idx
].last_idx
3288 && bbinfo
[idx
].op
== NULL_TREE
3289 && ops
[bbinfo
[idx
].first_idx
]->op
!= NULL_TREE
)
3291 gcond
*cond_stmt
= as_a
<gcond
*> (last_stmt (bb
));
3292 if (integer_zerop (ops
[bbinfo
[idx
].first_idx
]->op
))
3293 gimple_cond_make_false (cond_stmt
);
3294 else if (integer_onep (ops
[bbinfo
[idx
].first_idx
]->op
))
3295 gimple_cond_make_true (cond_stmt
);
3298 gimple_cond_set_code (cond_stmt
, NE_EXPR
);
3299 gimple_cond_set_lhs (cond_stmt
,
3300 ops
[bbinfo
[idx
].first_idx
]->op
);
3301 gimple_cond_set_rhs (cond_stmt
, boolean_false_node
);
3303 update_stmt (cond_stmt
);
3311 /* Return true if OPERAND is defined by a PHI node which uses the LHS
3312 of STMT in it's operands. This is also known as a "destructive
3313 update" operation. */
3316 is_phi_for_stmt (gimple
*stmt
, tree operand
)
3321 use_operand_p arg_p
;
3324 if (TREE_CODE (operand
) != SSA_NAME
)
3327 lhs
= gimple_assign_lhs (stmt
);
3329 def_stmt
= SSA_NAME_DEF_STMT (operand
);
3330 def_phi
= dyn_cast
<gphi
*> (def_stmt
);
3334 FOR_EACH_PHI_ARG (arg_p
, def_phi
, i
, SSA_OP_USE
)
3335 if (lhs
== USE_FROM_PTR (arg_p
))
3340 /* Remove def stmt of VAR if VAR has zero uses and recurse
3341 on rhs1 operand if so. */
3344 remove_visited_stmt_chain (tree var
)
3347 gimple_stmt_iterator gsi
;
3351 if (TREE_CODE (var
) != SSA_NAME
|| !has_zero_uses (var
))
3353 stmt
= SSA_NAME_DEF_STMT (var
);
3354 if (is_gimple_assign (stmt
) && gimple_visited_p (stmt
))
3356 var
= gimple_assign_rhs1 (stmt
);
3357 gsi
= gsi_for_stmt (stmt
);
3358 reassoc_remove_stmt (&gsi
);
3359 release_defs (stmt
);
3366 /* This function checks three consequtive operands in
3367 passed operands vector OPS starting from OPINDEX and
3368 swaps two operands if it is profitable for binary operation
3369 consuming OPINDEX + 1 abnd OPINDEX + 2 operands.
3371 We pair ops with the same rank if possible.
3373 The alternative we try is to see if STMT is a destructive
3374 update style statement, which is like:
3377 In that case, we want to use the destructive update form to
3378 expose the possible vectorizer sum reduction opportunity.
3379 In that case, the third operand will be the phi node. This
3380 check is not performed if STMT is null.
3382 We could, of course, try to be better as noted above, and do a
3383 lot of work to try to find these opportunities in >3 operand
3384 cases, but it is unlikely to be worth it. */
3387 swap_ops_for_binary_stmt (vec
<operand_entry
*> ops
,
3388 unsigned int opindex
, gimple
*stmt
)
3390 operand_entry
*oe1
, *oe2
, *oe3
;
3393 oe2
= ops
[opindex
+ 1];
3394 oe3
= ops
[opindex
+ 2];
3396 if ((oe1
->rank
== oe2
->rank
3397 && oe2
->rank
!= oe3
->rank
)
3398 || (stmt
&& is_phi_for_stmt (stmt
, oe3
->op
)
3399 && !is_phi_for_stmt (stmt
, oe1
->op
)
3400 && !is_phi_for_stmt (stmt
, oe2
->op
)))
3402 operand_entry temp
= *oe3
;
3404 oe3
->rank
= oe1
->rank
;
3406 oe1
->rank
= temp
.rank
;
3408 else if ((oe1
->rank
== oe3
->rank
3409 && oe2
->rank
!= oe3
->rank
)
3410 || (stmt
&& is_phi_for_stmt (stmt
, oe2
->op
)
3411 && !is_phi_for_stmt (stmt
, oe1
->op
)
3412 && !is_phi_for_stmt (stmt
, oe3
->op
)))
3414 operand_entry temp
= *oe2
;
3416 oe2
->rank
= oe1
->rank
;
3418 oe1
->rank
= temp
.rank
;
3422 /* If definition of RHS1 or RHS2 dominates STMT, return the later of those
3423 two definitions, otherwise return STMT. */
3425 static inline gimple
*
3426 find_insert_point (gimple
*stmt
, tree rhs1
, tree rhs2
)
3428 if (TREE_CODE (rhs1
) == SSA_NAME
3429 && reassoc_stmt_dominates_stmt_p (stmt
, SSA_NAME_DEF_STMT (rhs1
)))
3430 stmt
= SSA_NAME_DEF_STMT (rhs1
);
3431 if (TREE_CODE (rhs2
) == SSA_NAME
3432 && reassoc_stmt_dominates_stmt_p (stmt
, SSA_NAME_DEF_STMT (rhs2
)))
3433 stmt
= SSA_NAME_DEF_STMT (rhs2
);
3437 /* Recursively rewrite our linearized statements so that the operators
3438 match those in OPS[OPINDEX], putting the computation in rank
3439 order. Return new lhs. */
3442 rewrite_expr_tree (gimple
*stmt
, unsigned int opindex
,
3443 vec
<operand_entry
*> ops
, bool changed
)
3445 tree rhs1
= gimple_assign_rhs1 (stmt
);
3446 tree rhs2
= gimple_assign_rhs2 (stmt
);
3447 tree lhs
= gimple_assign_lhs (stmt
);
3450 /* The final recursion case for this function is that you have
3451 exactly two operations left.
3452 If we had exactly one op in the entire list to start with, we
3453 would have never called this function, and the tail recursion
3454 rewrites them one at a time. */
3455 if (opindex
+ 2 == ops
.length ())
3457 operand_entry
*oe1
, *oe2
;
3460 oe2
= ops
[opindex
+ 1];
3462 if (rhs1
!= oe1
->op
|| rhs2
!= oe2
->op
)
3464 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3465 unsigned int uid
= gimple_uid (stmt
);
3467 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3469 fprintf (dump_file
, "Transforming ");
3470 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3473 /* Even when changed is false, reassociation could have e.g. removed
3474 some redundant operations, so unless we are just swapping the
3475 arguments or unless there is no change at all (then we just
3476 return lhs), force creation of a new SSA_NAME. */
3477 if (changed
|| ((rhs1
!= oe2
->op
|| rhs2
!= oe1
->op
) && opindex
))
3479 gimple
*insert_point
3480 = find_insert_point (stmt
, oe1
->op
, oe2
->op
);
3481 lhs
= make_ssa_name (TREE_TYPE (lhs
));
3483 = gimple_build_assign (lhs
, gimple_assign_rhs_code (stmt
),
3485 gimple_set_uid (stmt
, uid
);
3486 gimple_set_visited (stmt
, true);
3487 if (insert_point
== gsi_stmt (gsi
))
3488 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3490 insert_stmt_after (stmt
, insert_point
);
3494 gcc_checking_assert (find_insert_point (stmt
, oe1
->op
, oe2
->op
)
3496 gimple_assign_set_rhs1 (stmt
, oe1
->op
);
3497 gimple_assign_set_rhs2 (stmt
, oe2
->op
);
3501 if (rhs1
!= oe1
->op
&& rhs1
!= oe2
->op
)
3502 remove_visited_stmt_chain (rhs1
);
3504 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3506 fprintf (dump_file
, " into ");
3507 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3513 /* If we hit here, we should have 3 or more ops left. */
3514 gcc_assert (opindex
+ 2 < ops
.length ());
3516 /* Rewrite the next operator. */
3519 /* Recurse on the LHS of the binary operator, which is guaranteed to
3520 be the non-leaf side. */
3522 = rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1
), opindex
+ 1, ops
,
3523 changed
|| oe
->op
!= rhs2
);
3525 if (oe
->op
!= rhs2
|| new_rhs1
!= rhs1
)
3527 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3529 fprintf (dump_file
, "Transforming ");
3530 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3533 /* If changed is false, this is either opindex == 0
3534 or all outer rhs2's were equal to corresponding oe->op,
3535 and powi_result is NULL.
3536 That means lhs is equivalent before and after reassociation.
3537 Otherwise ensure the old lhs SSA_NAME is not reused and
3538 create a new stmt as well, so that any debug stmts will be
3539 properly adjusted. */
3542 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3543 unsigned int uid
= gimple_uid (stmt
);
3544 gimple
*insert_point
= find_insert_point (stmt
, new_rhs1
, oe
->op
);
3546 lhs
= make_ssa_name (TREE_TYPE (lhs
));
3547 stmt
= gimple_build_assign (lhs
, gimple_assign_rhs_code (stmt
),
3549 gimple_set_uid (stmt
, uid
);
3550 gimple_set_visited (stmt
, true);
3551 if (insert_point
== gsi_stmt (gsi
))
3552 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3554 insert_stmt_after (stmt
, insert_point
);
3558 gcc_checking_assert (find_insert_point (stmt
, new_rhs1
, oe
->op
)
3560 gimple_assign_set_rhs1 (stmt
, new_rhs1
);
3561 gimple_assign_set_rhs2 (stmt
, oe
->op
);
3565 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3567 fprintf (dump_file
, " into ");
3568 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3574 /* Find out how many cycles we need to compute statements chain.
3575 OPS_NUM holds number os statements in a chain. CPU_WIDTH is a
3576 maximum number of independent statements we may execute per cycle. */
3579 get_required_cycles (int ops_num
, int cpu_width
)
3585 /* While we have more than 2 * cpu_width operands
3586 we may reduce number of operands by cpu_width
3588 res
= ops_num
/ (2 * cpu_width
);
3590 /* Remained operands count may be reduced twice per cycle
3591 until we have only one operand. */
3592 rest
= (unsigned)(ops_num
- res
* cpu_width
);
3593 elog
= exact_log2 (rest
);
3597 res
+= floor_log2 (rest
) + 1;
3602 /* Returns an optimal number of registers to use for computation of
3603 given statements. */
3606 get_reassociation_width (int ops_num
, enum tree_code opc
,
3609 int param_width
= PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH
);
3614 if (param_width
> 0)
3615 width
= param_width
;
3617 width
= targetm
.sched
.reassociation_width (opc
, mode
);
3622 /* Get the minimal time required for sequence computation. */
3623 cycles_best
= get_required_cycles (ops_num
, width
);
3625 /* Check if we may use less width and still compute sequence for
3626 the same time. It will allow us to reduce registers usage.
3627 get_required_cycles is monotonically increasing with lower width
3628 so we can perform a binary search for the minimal width that still
3629 results in the optimal cycle count. */
3631 while (width
> width_min
)
3633 int width_mid
= (width
+ width_min
) / 2;
3635 if (get_required_cycles (ops_num
, width_mid
) == cycles_best
)
3637 else if (width_min
< width_mid
)
3638 width_min
= width_mid
;
3646 /* Recursively rewrite our linearized statements so that the operators
3647 match those in OPS[OPINDEX], putting the computation in rank
3648 order and trying to allow operations to be executed in
3652 rewrite_expr_tree_parallel (gassign
*stmt
, int width
,
3653 vec
<operand_entry
*> ops
)
3655 enum tree_code opcode
= gimple_assign_rhs_code (stmt
);
3656 int op_num
= ops
.length ();
3657 int stmt_num
= op_num
- 1;
3658 gimple
**stmts
= XALLOCAVEC (gimple
*, stmt_num
);
3659 int op_index
= op_num
- 1;
3661 int ready_stmts_end
= 0;
3663 tree last_rhs1
= gimple_assign_rhs1 (stmt
);
3665 /* We start expression rewriting from the top statements.
3666 So, in this loop we create a full list of statements
3667 we will work with. */
3668 stmts
[stmt_num
- 1] = stmt
;
3669 for (i
= stmt_num
- 2; i
>= 0; i
--)
3670 stmts
[i
] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts
[i
+1]));
3672 for (i
= 0; i
< stmt_num
; i
++)
3676 /* Determine whether we should use results of
3677 already handled statements or not. */
3678 if (ready_stmts_end
== 0
3679 && (i
- stmt_index
>= width
|| op_index
< 1))
3680 ready_stmts_end
= i
;
3682 /* Now we choose operands for the next statement. Non zero
3683 value in ready_stmts_end means here that we should use
3684 the result of already generated statements as new operand. */
3685 if (ready_stmts_end
> 0)
3687 op1
= gimple_assign_lhs (stmts
[stmt_index
++]);
3688 if (ready_stmts_end
> stmt_index
)
3689 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
3690 else if (op_index
>= 0)
3691 op2
= ops
[op_index
--]->op
;
3694 gcc_assert (stmt_index
< i
);
3695 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
3698 if (stmt_index
>= ready_stmts_end
)
3699 ready_stmts_end
= 0;
3704 swap_ops_for_binary_stmt (ops
, op_index
- 2, NULL
);
3705 op2
= ops
[op_index
--]->op
;
3706 op1
= ops
[op_index
--]->op
;
3709 /* If we emit the last statement then we should put
3710 operands into the last statement. It will also
3712 if (op_index
< 0 && stmt_index
== i
)
3715 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3717 fprintf (dump_file
, "Transforming ");
3718 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
3721 /* We keep original statement only for the last one. All
3722 others are recreated. */
3723 if (i
== stmt_num
- 1)
3725 gimple_assign_set_rhs1 (stmts
[i
], op1
);
3726 gimple_assign_set_rhs2 (stmts
[i
], op2
);
3727 update_stmt (stmts
[i
]);
3730 stmts
[i
] = build_and_add_sum (TREE_TYPE (last_rhs1
), op1
, op2
, opcode
);
3732 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3734 fprintf (dump_file
, " into ");
3735 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
3739 remove_visited_stmt_chain (last_rhs1
);
3742 /* Transform STMT, which is really (A +B) + (C + D) into the left
3743 linear form, ((A+B)+C)+D.
3744 Recurse on D if necessary. */
3747 linearize_expr (gimple
*stmt
)
3749 gimple_stmt_iterator gsi
;
3750 gimple
*binlhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3751 gimple
*binrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3752 gimple
*oldbinrhs
= binrhs
;
3753 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
3754 gimple
*newbinrhs
= NULL
;
3755 struct loop
*loop
= loop_containing_stmt (stmt
);
3756 tree lhs
= gimple_assign_lhs (stmt
);
3758 gcc_assert (is_reassociable_op (binlhs
, rhscode
, loop
)
3759 && is_reassociable_op (binrhs
, rhscode
, loop
));
3761 gsi
= gsi_for_stmt (stmt
);
3763 gimple_assign_set_rhs2 (stmt
, gimple_assign_rhs1 (binrhs
));
3764 binrhs
= gimple_build_assign (make_ssa_name (TREE_TYPE (lhs
)),
3765 gimple_assign_rhs_code (binrhs
),
3766 gimple_assign_lhs (binlhs
),
3767 gimple_assign_rhs2 (binrhs
));
3768 gimple_assign_set_rhs1 (stmt
, gimple_assign_lhs (binrhs
));
3769 gsi_insert_before (&gsi
, binrhs
, GSI_SAME_STMT
);
3770 gimple_set_uid (binrhs
, gimple_uid (stmt
));
3772 if (TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
)
3773 newbinrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3775 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3777 fprintf (dump_file
, "Linearized: ");
3778 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3781 reassociate_stats
.linearized
++;
3784 gsi
= gsi_for_stmt (oldbinrhs
);
3785 reassoc_remove_stmt (&gsi
);
3786 release_defs (oldbinrhs
);
3788 gimple_set_visited (stmt
, true);
3789 gimple_set_visited (binlhs
, true);
3790 gimple_set_visited (binrhs
, true);
3792 /* Tail recurse on the new rhs if it still needs reassociation. */
3793 if (newbinrhs
&& is_reassociable_op (newbinrhs
, rhscode
, loop
))
3794 /* ??? This should probably be linearize_expr (newbinrhs) but I don't
3795 want to change the algorithm while converting to tuples. */
3796 linearize_expr (stmt
);
3799 /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return
3800 it. Otherwise, return NULL. */
3803 get_single_immediate_use (tree lhs
)
3805 use_operand_p immuse
;
3808 if (TREE_CODE (lhs
) == SSA_NAME
3809 && single_imm_use (lhs
, &immuse
, &immusestmt
)
3810 && is_gimple_assign (immusestmt
))
3816 /* Recursively negate the value of TONEGATE, and return the SSA_NAME
3817 representing the negated value. Insertions of any necessary
3818 instructions go before GSI.
3819 This function is recursive in that, if you hand it "a_5" as the
3820 value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will
3821 transform b_3 + b_4 into a_5 = -b_3 + -b_4. */
3824 negate_value (tree tonegate
, gimple_stmt_iterator
*gsip
)
3826 gimple
*negatedefstmt
= NULL
;
3827 tree resultofnegate
;
3828 gimple_stmt_iterator gsi
;
3831 /* If we are trying to negate a name, defined by an add, negate the
3832 add operands instead. */
3833 if (TREE_CODE (tonegate
) == SSA_NAME
)
3834 negatedefstmt
= SSA_NAME_DEF_STMT (tonegate
);
3835 if (TREE_CODE (tonegate
) == SSA_NAME
3836 && is_gimple_assign (negatedefstmt
)
3837 && TREE_CODE (gimple_assign_lhs (negatedefstmt
)) == SSA_NAME
3838 && has_single_use (gimple_assign_lhs (negatedefstmt
))
3839 && gimple_assign_rhs_code (negatedefstmt
) == PLUS_EXPR
)
3841 tree rhs1
= gimple_assign_rhs1 (negatedefstmt
);
3842 tree rhs2
= gimple_assign_rhs2 (negatedefstmt
);
3843 tree lhs
= gimple_assign_lhs (negatedefstmt
);
3846 gsi
= gsi_for_stmt (negatedefstmt
);
3847 rhs1
= negate_value (rhs1
, &gsi
);
3849 gsi
= gsi_for_stmt (negatedefstmt
);
3850 rhs2
= negate_value (rhs2
, &gsi
);
3852 gsi
= gsi_for_stmt (negatedefstmt
);
3853 lhs
= make_ssa_name (TREE_TYPE (lhs
));
3854 gimple_set_visited (negatedefstmt
, true);
3855 g
= gimple_build_assign (lhs
, PLUS_EXPR
, rhs1
, rhs2
);
3856 gimple_set_uid (g
, gimple_uid (negatedefstmt
));
3857 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3861 tonegate
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (tonegate
), tonegate
);
3862 resultofnegate
= force_gimple_operand_gsi (gsip
, tonegate
, true,
3863 NULL_TREE
, true, GSI_SAME_STMT
);
3865 uid
= gimple_uid (gsi_stmt (gsi
));
3866 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3868 gimple
*stmt
= gsi_stmt (gsi
);
3869 if (gimple_uid (stmt
) != 0)
3871 gimple_set_uid (stmt
, uid
);
3873 return resultofnegate
;
3876 /* Return true if we should break up the subtract in STMT into an add
3877 with negate. This is true when we the subtract operands are really
3878 adds, or the subtract itself is used in an add expression. In
3879 either case, breaking up the subtract into an add with negate
3880 exposes the adds to reassociation. */
3883 should_break_up_subtract (gimple
*stmt
)
3885 tree lhs
= gimple_assign_lhs (stmt
);
3886 tree binlhs
= gimple_assign_rhs1 (stmt
);
3887 tree binrhs
= gimple_assign_rhs2 (stmt
);
3889 struct loop
*loop
= loop_containing_stmt (stmt
);
3891 if (TREE_CODE (binlhs
) == SSA_NAME
3892 && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs
), PLUS_EXPR
, loop
))
3895 if (TREE_CODE (binrhs
) == SSA_NAME
3896 && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
), PLUS_EXPR
, loop
))
3899 if (TREE_CODE (lhs
) == SSA_NAME
3900 && (immusestmt
= get_single_immediate_use (lhs
))
3901 && is_gimple_assign (immusestmt
)
3902 && (gimple_assign_rhs_code (immusestmt
) == PLUS_EXPR
3903 || gimple_assign_rhs_code (immusestmt
) == MULT_EXPR
))
3908 /* Transform STMT from A - B into A + -B. */
3911 break_up_subtract (gimple
*stmt
, gimple_stmt_iterator
*gsip
)
3913 tree rhs1
= gimple_assign_rhs1 (stmt
);
3914 tree rhs2
= gimple_assign_rhs2 (stmt
);
3916 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3918 fprintf (dump_file
, "Breaking up subtract ");
3919 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3922 rhs2
= negate_value (rhs2
, gsip
);
3923 gimple_assign_set_rhs_with_ops (gsip
, PLUS_EXPR
, rhs1
, rhs2
);
3927 /* Determine whether STMT is a builtin call that raises an SSA name
3928 to an integer power and has only one use. If so, and this is early
3929 reassociation and unsafe math optimizations are permitted, place
3930 the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE.
3931 If any of these conditions does not hold, return FALSE. */
3934 acceptable_pow_call (gimple
*stmt
, tree
*base
, HOST_WIDE_INT
*exponent
)
3937 REAL_VALUE_TYPE c
, cint
;
3939 if (!reassoc_insert_powi_p
3940 || !flag_unsafe_math_optimizations
3941 || !is_gimple_call (stmt
)
3942 || !has_single_use (gimple_call_lhs (stmt
)))
3945 switch (gimple_call_combined_fn (stmt
))
3948 if (flag_errno_math
)
3951 *base
= gimple_call_arg (stmt
, 0);
3952 arg1
= gimple_call_arg (stmt
, 1);
3954 if (TREE_CODE (arg1
) != REAL_CST
)
3957 c
= TREE_REAL_CST (arg1
);
3959 if (REAL_EXP (&c
) > HOST_BITS_PER_WIDE_INT
)
3962 *exponent
= real_to_integer (&c
);
3963 real_from_integer (&cint
, VOIDmode
, *exponent
, SIGNED
);
3964 if (!real_identical (&c
, &cint
))
3970 *base
= gimple_call_arg (stmt
, 0);
3971 arg1
= gimple_call_arg (stmt
, 1);
3973 if (!tree_fits_shwi_p (arg1
))
3976 *exponent
= tree_to_shwi (arg1
);
3983 /* Expanding negative exponents is generally unproductive, so we don't
3984 complicate matters with those. Exponents of zero and one should
3985 have been handled by expression folding. */
3986 if (*exponent
< 2 || TREE_CODE (*base
) != SSA_NAME
)
3992 /* Recursively linearize a binary expression that is the RHS of STMT.
3993 Place the operands of the expression tree in the vector named OPS. */
3996 linearize_expr_tree (vec
<operand_entry
*> *ops
, gimple
*stmt
,
3997 bool is_associative
, bool set_visited
)
3999 tree binlhs
= gimple_assign_rhs1 (stmt
);
4000 tree binrhs
= gimple_assign_rhs2 (stmt
);
4001 gimple
*binlhsdef
= NULL
, *binrhsdef
= NULL
;
4002 bool binlhsisreassoc
= false;
4003 bool binrhsisreassoc
= false;
4004 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
4005 struct loop
*loop
= loop_containing_stmt (stmt
);
4006 tree base
= NULL_TREE
;
4007 HOST_WIDE_INT exponent
= 0;
4010 gimple_set_visited (stmt
, true);
4012 if (TREE_CODE (binlhs
) == SSA_NAME
)
4014 binlhsdef
= SSA_NAME_DEF_STMT (binlhs
);
4015 binlhsisreassoc
= (is_reassociable_op (binlhsdef
, rhscode
, loop
)
4016 && !stmt_could_throw_p (binlhsdef
));
4019 if (TREE_CODE (binrhs
) == SSA_NAME
)
4021 binrhsdef
= SSA_NAME_DEF_STMT (binrhs
);
4022 binrhsisreassoc
= (is_reassociable_op (binrhsdef
, rhscode
, loop
)
4023 && !stmt_could_throw_p (binrhsdef
));
4026 /* If the LHS is not reassociable, but the RHS is, we need to swap
4027 them. If neither is reassociable, there is nothing we can do, so
4028 just put them in the ops vector. If the LHS is reassociable,
4029 linearize it. If both are reassociable, then linearize the RHS
4032 if (!binlhsisreassoc
)
4034 /* If this is not a associative operation like division, give up. */
4035 if (!is_associative
)
4037 add_to_ops_vec (ops
, binrhs
);
4041 if (!binrhsisreassoc
)
4043 if (rhscode
== MULT_EXPR
4044 && TREE_CODE (binrhs
) == SSA_NAME
4045 && acceptable_pow_call (binrhsdef
, &base
, &exponent
))
4047 add_repeat_to_ops_vec (ops
, base
, exponent
);
4048 gimple_set_visited (binrhsdef
, true);
4051 add_to_ops_vec (ops
, binrhs
);
4053 if (rhscode
== MULT_EXPR
4054 && TREE_CODE (binlhs
) == SSA_NAME
4055 && acceptable_pow_call (binlhsdef
, &base
, &exponent
))
4057 add_repeat_to_ops_vec (ops
, base
, exponent
);
4058 gimple_set_visited (binlhsdef
, true);
4061 add_to_ops_vec (ops
, binlhs
);
4066 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4068 fprintf (dump_file
, "swapping operands of ");
4069 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4072 swap_ssa_operands (stmt
,
4073 gimple_assign_rhs1_ptr (stmt
),
4074 gimple_assign_rhs2_ptr (stmt
));
4077 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4079 fprintf (dump_file
, " is now ");
4080 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4083 /* We want to make it so the lhs is always the reassociative op,
4085 std::swap (binlhs
, binrhs
);
4087 else if (binrhsisreassoc
)
4089 linearize_expr (stmt
);
4090 binlhs
= gimple_assign_rhs1 (stmt
);
4091 binrhs
= gimple_assign_rhs2 (stmt
);
4094 gcc_assert (TREE_CODE (binrhs
) != SSA_NAME
4095 || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
),
4097 linearize_expr_tree (ops
, SSA_NAME_DEF_STMT (binlhs
),
4098 is_associative
, set_visited
);
4100 if (rhscode
== MULT_EXPR
4101 && TREE_CODE (binrhs
) == SSA_NAME
4102 && acceptable_pow_call (SSA_NAME_DEF_STMT (binrhs
), &base
, &exponent
))
4104 add_repeat_to_ops_vec (ops
, base
, exponent
);
4105 gimple_set_visited (SSA_NAME_DEF_STMT (binrhs
), true);
4108 add_to_ops_vec (ops
, binrhs
);
4111 /* Repropagate the negates back into subtracts, since no other pass
4112 currently does it. */
4115 repropagate_negates (void)
4120 FOR_EACH_VEC_ELT (plus_negates
, i
, negate
)
4122 gimple
*user
= get_single_immediate_use (negate
);
4124 if (!user
|| !is_gimple_assign (user
))
4127 /* The negate operand can be either operand of a PLUS_EXPR
4128 (it can be the LHS if the RHS is a constant for example).
4130 Force the negate operand to the RHS of the PLUS_EXPR, then
4131 transform the PLUS_EXPR into a MINUS_EXPR. */
4132 if (gimple_assign_rhs_code (user
) == PLUS_EXPR
)
4134 /* If the negated operand appears on the LHS of the
4135 PLUS_EXPR, exchange the operands of the PLUS_EXPR
4136 to force the negated operand to the RHS of the PLUS_EXPR. */
4137 if (gimple_assign_rhs1 (user
) == negate
)
4139 swap_ssa_operands (user
,
4140 gimple_assign_rhs1_ptr (user
),
4141 gimple_assign_rhs2_ptr (user
));
4144 /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace
4145 the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */
4146 if (gimple_assign_rhs2 (user
) == negate
)
4148 tree rhs1
= gimple_assign_rhs1 (user
);
4149 tree rhs2
= get_unary_op (negate
, NEGATE_EXPR
);
4150 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
4151 gimple_assign_set_rhs_with_ops (&gsi
, MINUS_EXPR
, rhs1
, rhs2
);
4155 else if (gimple_assign_rhs_code (user
) == MINUS_EXPR
)
4157 if (gimple_assign_rhs1 (user
) == negate
)
4162 which we transform into
4165 This pushes down the negate which we possibly can merge
4166 into some other operation, hence insert it into the
4167 plus_negates vector. */
4168 gimple
*feed
= SSA_NAME_DEF_STMT (negate
);
4169 tree a
= gimple_assign_rhs1 (feed
);
4170 tree b
= gimple_assign_rhs2 (user
);
4171 gimple_stmt_iterator gsi
= gsi_for_stmt (feed
);
4172 gimple_stmt_iterator gsi2
= gsi_for_stmt (user
);
4173 tree x
= make_ssa_name (TREE_TYPE (gimple_assign_lhs (feed
)));
4174 gimple
*g
= gimple_build_assign (x
, PLUS_EXPR
, a
, b
);
4175 gsi_insert_before (&gsi2
, g
, GSI_SAME_STMT
);
4176 gimple_assign_set_rhs_with_ops (&gsi2
, NEGATE_EXPR
, x
);
4177 user
= gsi_stmt (gsi2
);
4179 reassoc_remove_stmt (&gsi
);
4180 release_defs (feed
);
4181 plus_negates
.safe_push (gimple_assign_lhs (user
));
4185 /* Transform "x = -a; y = b - x" into "y = b + a", getting
4186 rid of one operation. */
4187 gimple
*feed
= SSA_NAME_DEF_STMT (negate
);
4188 tree a
= gimple_assign_rhs1 (feed
);
4189 tree rhs1
= gimple_assign_rhs1 (user
);
4190 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
4191 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, rhs1
, a
);
4192 update_stmt (gsi_stmt (gsi
));
4198 /* Returns true if OP is of a type for which we can do reassociation.
4199 That is for integral or non-saturating fixed-point types, and for
4200 floating point type when associative-math is enabled. */
4203 can_reassociate_p (tree op
)
4205 tree type
= TREE_TYPE (op
);
4206 if ((INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_WRAPS (type
))
4207 || NON_SAT_FIXED_POINT_TYPE_P (type
)
4208 || (flag_associative_math
&& FLOAT_TYPE_P (type
)))
4213 /* Break up subtract operations in block BB.
4215 We do this top down because we don't know whether the subtract is
4216 part of a possible chain of reassociation except at the top.
4225 we want to break up k = t - q, but we won't until we've transformed q
4226 = b - r, which won't be broken up until we transform b = c - d.
4228 En passant, clear the GIMPLE visited flag on every statement
4229 and set UIDs within each basic block. */
4232 break_up_subtract_bb (basic_block bb
)
4234 gimple_stmt_iterator gsi
;
4236 unsigned int uid
= 1;
4238 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4240 gimple
*stmt
= gsi_stmt (gsi
);
4241 gimple_set_visited (stmt
, false);
4242 gimple_set_uid (stmt
, uid
++);
4244 if (!is_gimple_assign (stmt
)
4245 || !can_reassociate_p (gimple_assign_lhs (stmt
)))
4248 /* Look for simple gimple subtract operations. */
4249 if (gimple_assign_rhs_code (stmt
) == MINUS_EXPR
)
4251 if (!can_reassociate_p (gimple_assign_rhs1 (stmt
))
4252 || !can_reassociate_p (gimple_assign_rhs2 (stmt
)))
4255 /* Check for a subtract used only in an addition. If this
4256 is the case, transform it into add of a negate for better
4257 reassociation. IE transform C = A-B into C = A + -B if C
4258 is only used in an addition. */
4259 if (should_break_up_subtract (stmt
))
4260 break_up_subtract (stmt
, &gsi
);
4262 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
4263 && can_reassociate_p (gimple_assign_rhs1 (stmt
)))
4264 plus_negates
.safe_push (gimple_assign_lhs (stmt
));
4266 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4268 son
= next_dom_son (CDI_DOMINATORS
, son
))
4269 break_up_subtract_bb (son
);
4272 /* Used for repeated factor analysis. */
4273 struct repeat_factor
4275 /* An SSA name that occurs in a multiply chain. */
4278 /* Cached rank of the factor. */
4281 /* Number of occurrences of the factor in the chain. */
4282 HOST_WIDE_INT count
;
4284 /* An SSA name representing the product of this factor and
4285 all factors appearing later in the repeated factor vector. */
4290 static vec
<repeat_factor
> repeat_factor_vec
;
4292 /* Used for sorting the repeat factor vector. Sort primarily by
4293 ascending occurrence count, secondarily by descending rank. */
4296 compare_repeat_factors (const void *x1
, const void *x2
)
4298 const repeat_factor
*rf1
= (const repeat_factor
*) x1
;
4299 const repeat_factor
*rf2
= (const repeat_factor
*) x2
;
4301 if (rf1
->count
!= rf2
->count
)
4302 return rf1
->count
- rf2
->count
;
4304 return rf2
->rank
- rf1
->rank
;
4307 /* Look for repeated operands in OPS in the multiply tree rooted at
4308 STMT. Replace them with an optimal sequence of multiplies and powi
4309 builtin calls, and remove the used operands from OPS. Return an
4310 SSA name representing the value of the replacement sequence. */
4313 attempt_builtin_powi (gimple
*stmt
, vec
<operand_entry
*> *ops
)
4315 unsigned i
, j
, vec_len
;
4318 repeat_factor
*rf1
, *rf2
;
4319 repeat_factor rfnew
;
4320 tree result
= NULL_TREE
;
4321 tree target_ssa
, iter_result
;
4322 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
4323 tree powi_fndecl
= mathfn_built_in (type
, BUILT_IN_POWI
);
4324 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4325 gimple
*mul_stmt
, *pow_stmt
;
4327 /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and
4332 /* Allocate the repeated factor vector. */
4333 repeat_factor_vec
.create (10);
4335 /* Scan the OPS vector for all SSA names in the product and build
4336 up a vector of occurrence counts for each factor. */
4337 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
4339 if (TREE_CODE (oe
->op
) == SSA_NAME
)
4341 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
4343 if (rf1
->factor
== oe
->op
)
4345 rf1
->count
+= oe
->count
;
4350 if (j
>= repeat_factor_vec
.length ())
4352 rfnew
.factor
= oe
->op
;
4353 rfnew
.rank
= oe
->rank
;
4354 rfnew
.count
= oe
->count
;
4355 rfnew
.repr
= NULL_TREE
;
4356 repeat_factor_vec
.safe_push (rfnew
);
4361 /* Sort the repeated factor vector by (a) increasing occurrence count,
4362 and (b) decreasing rank. */
4363 repeat_factor_vec
.qsort (compare_repeat_factors
);
4365 /* It is generally best to combine as many base factors as possible
4366 into a product before applying __builtin_powi to the result.
4367 However, the sort order chosen for the repeated factor vector
4368 allows us to cache partial results for the product of the base
4369 factors for subsequent use. When we already have a cached partial
4370 result from a previous iteration, it is best to make use of it
4371 before looking for another __builtin_pow opportunity.
4373 As an example, consider x * x * y * y * y * z * z * z * z.
4374 We want to first compose the product x * y * z, raise it to the
4375 second power, then multiply this by y * z, and finally multiply
4376 by z. This can be done in 5 multiplies provided we cache y * z
4377 for use in both expressions:
4385 If we instead ignored the cached y * z and first multiplied by
4386 the __builtin_pow opportunity z * z, we would get the inferior:
4395 vec_len
= repeat_factor_vec
.length ();
4397 /* Repeatedly look for opportunities to create a builtin_powi call. */
4400 HOST_WIDE_INT power
;
4402 /* First look for the largest cached product of factors from
4403 preceding iterations. If found, create a builtin_powi for
4404 it if the minimum occurrence count for its factors is at
4405 least 2, or just use this cached product as our next
4406 multiplicand if the minimum occurrence count is 1. */
4407 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
4409 if (rf1
->repr
&& rf1
->count
> 0)
4419 iter_result
= rf1
->repr
;
4421 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4425 fputs ("Multiplying by cached product ", dump_file
);
4426 for (elt
= j
; elt
< vec_len
; elt
++)
4428 rf
= &repeat_factor_vec
[elt
];
4429 print_generic_expr (dump_file
, rf
->factor
, 0);
4430 if (elt
< vec_len
- 1)
4431 fputs (" * ", dump_file
);
4433 fputs ("\n", dump_file
);
4438 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
4439 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
4440 build_int_cst (integer_type_node
,
4442 gimple_call_set_lhs (pow_stmt
, iter_result
);
4443 gimple_set_location (pow_stmt
, gimple_location (stmt
));
4444 gimple_set_uid (pow_stmt
, gimple_uid (stmt
));
4445 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
4447 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4451 fputs ("Building __builtin_pow call for cached product (",
4453 for (elt
= j
; elt
< vec_len
; elt
++)
4455 rf
= &repeat_factor_vec
[elt
];
4456 print_generic_expr (dump_file
, rf
->factor
, 0);
4457 if (elt
< vec_len
- 1)
4458 fputs (" * ", dump_file
);
4460 fprintf (dump_file
, ")^" HOST_WIDE_INT_PRINT_DEC
"\n",
4467 /* Otherwise, find the first factor in the repeated factor
4468 vector whose occurrence count is at least 2. If no such
4469 factor exists, there are no builtin_powi opportunities
4471 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
4473 if (rf1
->count
>= 2)
4482 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4486 fputs ("Building __builtin_pow call for (", dump_file
);
4487 for (elt
= j
; elt
< vec_len
; elt
++)
4489 rf
= &repeat_factor_vec
[elt
];
4490 print_generic_expr (dump_file
, rf
->factor
, 0);
4491 if (elt
< vec_len
- 1)
4492 fputs (" * ", dump_file
);
4494 fprintf (dump_file
, ")^" HOST_WIDE_INT_PRINT_DEC
"\n", power
);
4497 reassociate_stats
.pows_created
++;
4499 /* Visit each element of the vector in reverse order (so that
4500 high-occurrence elements are visited first, and within the
4501 same occurrence count, lower-ranked elements are visited
4502 first). Form a linear product of all elements in this order
4503 whose occurrencce count is at least that of element J.
4504 Record the SSA name representing the product of each element
4505 with all subsequent elements in the vector. */
4506 if (j
== vec_len
- 1)
4507 rf1
->repr
= rf1
->factor
;
4510 for (ii
= vec_len
- 2; ii
>= (int)j
; ii
--)
4514 rf1
= &repeat_factor_vec
[ii
];
4515 rf2
= &repeat_factor_vec
[ii
+ 1];
4517 /* Init the last factor's representative to be itself. */
4519 rf2
->repr
= rf2
->factor
;
4524 target_ssa
= make_temp_ssa_name (type
, NULL
, "reassocpow");
4525 mul_stmt
= gimple_build_assign (target_ssa
, MULT_EXPR
,
4527 gimple_set_location (mul_stmt
, gimple_location (stmt
));
4528 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
4529 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
4530 rf1
->repr
= target_ssa
;
4532 /* Don't reprocess the multiply we just introduced. */
4533 gimple_set_visited (mul_stmt
, true);
4537 /* Form a call to __builtin_powi for the maximum product
4538 just formed, raised to the power obtained earlier. */
4539 rf1
= &repeat_factor_vec
[j
];
4540 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
4541 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
4542 build_int_cst (integer_type_node
,
4544 gimple_call_set_lhs (pow_stmt
, iter_result
);
4545 gimple_set_location (pow_stmt
, gimple_location (stmt
));
4546 gimple_set_uid (pow_stmt
, gimple_uid (stmt
));
4547 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
4550 /* If we previously formed at least one other builtin_powi call,
4551 form the product of this one and those others. */
4554 tree new_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
4555 mul_stmt
= gimple_build_assign (new_result
, MULT_EXPR
,
4556 result
, iter_result
);
4557 gimple_set_location (mul_stmt
, gimple_location (stmt
));
4558 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
4559 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
4560 gimple_set_visited (mul_stmt
, true);
4561 result
= new_result
;
4564 result
= iter_result
;
4566 /* Decrement the occurrence count of each element in the product
4567 by the count found above, and remove this many copies of each
4569 for (i
= j
; i
< vec_len
; i
++)
4574 rf1
= &repeat_factor_vec
[i
];
4575 rf1
->count
-= power
;
4577 FOR_EACH_VEC_ELT_REVERSE (*ops
, n
, oe
)
4579 if (oe
->op
== rf1
->factor
)
4583 ops
->ordered_remove (n
);
4599 /* At this point all elements in the repeated factor vector have a
4600 remaining occurrence count of 0 or 1, and those with a count of 1
4601 don't have cached representatives. Re-sort the ops vector and
4603 ops
->qsort (sort_by_operand_rank
);
4604 repeat_factor_vec
.release ();
4606 /* Return the final product computed herein. Note that there may
4607 still be some elements with single occurrence count left in OPS;
4608 those will be handled by the normal reassociation logic. */
4612 /* Attempt to optimize
4613 CST1 * copysign (CST2, y) -> copysign (CST1 * CST2, y) if CST1 > 0, or
4614 CST1 * copysign (CST2, y) -> -copysign (CST1 * CST2, y) if CST1 < 0. */
4617 attempt_builtin_copysign (vec
<operand_entry
*> *ops
)
4621 unsigned int length
= ops
->length ();
4622 tree cst
= ops
->last ()->op
;
4624 if (length
== 1 || TREE_CODE (cst
) != REAL_CST
)
4627 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
4629 if (TREE_CODE (oe
->op
) == SSA_NAME
4630 && has_single_use (oe
->op
))
4632 gimple
*def_stmt
= SSA_NAME_DEF_STMT (oe
->op
);
4633 if (gcall
*old_call
= dyn_cast
<gcall
*> (def_stmt
))
4636 switch (gimple_call_combined_fn (old_call
))
4639 arg0
= gimple_call_arg (old_call
, 0);
4640 arg1
= gimple_call_arg (old_call
, 1);
4641 /* The first argument of copysign must be a constant,
4642 otherwise there's nothing to do. */
4643 if (TREE_CODE (arg0
) == REAL_CST
)
4645 tree type
= TREE_TYPE (arg0
);
4646 tree mul
= const_binop (MULT_EXPR
, type
, cst
, arg0
);
4647 /* If we couldn't fold to a single constant, skip it.
4648 That happens e.g. for inexact multiplication when
4650 if (mul
== NULL_TREE
)
4652 /* Instead of adjusting OLD_CALL, let's build a new
4653 call to not leak the LHS and prevent keeping bogus
4654 debug statements. DCE will clean up the old call. */
4656 if (gimple_call_internal_p (old_call
))
4657 new_call
= gimple_build_call_internal
4658 (IFN_COPYSIGN
, 2, mul
, arg1
);
4660 new_call
= gimple_build_call
4661 (gimple_call_fndecl (old_call
), 2, mul
, arg1
);
4662 tree lhs
= make_ssa_name (type
);
4663 gimple_call_set_lhs (new_call
, lhs
);
4664 gimple_set_location (new_call
,
4665 gimple_location (old_call
));
4666 insert_stmt_after (new_call
, old_call
);
4667 /* We've used the constant, get rid of it. */
4669 bool cst1_neg
= real_isneg (TREE_REAL_CST_PTR (cst
));
4670 /* Handle the CST1 < 0 case by negating the result. */
4673 tree negrhs
= make_ssa_name (TREE_TYPE (lhs
));
4675 = gimple_build_assign (negrhs
, NEGATE_EXPR
, lhs
);
4676 insert_stmt_after (negate_stmt
, new_call
);
4681 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4683 fprintf (dump_file
, "Optimizing copysign: ");
4684 print_generic_expr (dump_file
, cst
, 0);
4685 fprintf (dump_file
, " * COPYSIGN (");
4686 print_generic_expr (dump_file
, arg0
, 0);
4687 fprintf (dump_file
, ", ");
4688 print_generic_expr (dump_file
, arg1
, 0);
4689 fprintf (dump_file
, ") into %sCOPYSIGN (",
4690 cst1_neg
? "-" : "");
4691 print_generic_expr (dump_file
, mul
, 0);
4692 fprintf (dump_file
, ", ");
4693 print_generic_expr (dump_file
, arg1
, 0);
4694 fprintf (dump_file
, "\n");
4707 /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */
4710 transform_stmt_to_copy (gimple_stmt_iterator
*gsi
, gimple
*stmt
, tree new_rhs
)
4714 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4716 fprintf (dump_file
, "Transforming ");
4717 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4720 rhs1
= gimple_assign_rhs1 (stmt
);
4721 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
4723 remove_visited_stmt_chain (rhs1
);
4725 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4727 fprintf (dump_file
, " into ");
4728 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4732 /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */
4735 transform_stmt_to_multiply (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
4736 tree rhs1
, tree rhs2
)
4738 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4740 fprintf (dump_file
, "Transforming ");
4741 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4744 gimple_assign_set_rhs_with_ops (gsi
, MULT_EXPR
, rhs1
, rhs2
);
4745 update_stmt (gsi_stmt (*gsi
));
4746 remove_visited_stmt_chain (rhs1
);
4748 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4750 fprintf (dump_file
, " into ");
4751 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4755 /* Reassociate expressions in basic block BB and its post-dominator as
4759 reassociate_bb (basic_block bb
)
4761 gimple_stmt_iterator gsi
;
4763 gimple
*stmt
= last_stmt (bb
);
4765 if (stmt
&& !gimple_visited_p (stmt
))
4766 maybe_optimize_range_tests (stmt
);
4768 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
4770 stmt
= gsi_stmt (gsi
);
4772 if (is_gimple_assign (stmt
)
4773 && !stmt_could_throw_p (stmt
))
4775 tree lhs
, rhs1
, rhs2
;
4776 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
4778 /* If this is not a gimple binary expression, there is
4779 nothing for us to do with it. */
4780 if (get_gimple_rhs_class (rhs_code
) != GIMPLE_BINARY_RHS
)
4783 /* If this was part of an already processed statement,
4784 we don't need to touch it again. */
4785 if (gimple_visited_p (stmt
))
4787 /* This statement might have become dead because of previous
4789 if (has_zero_uses (gimple_get_lhs (stmt
)))
4791 reassoc_remove_stmt (&gsi
);
4792 release_defs (stmt
);
4793 /* We might end up removing the last stmt above which
4794 places the iterator to the end of the sequence.
4795 Reset it to the last stmt in this case which might
4796 be the end of the sequence as well if we removed
4797 the last statement of the sequence. In which case
4798 we need to bail out. */
4799 if (gsi_end_p (gsi
))
4801 gsi
= gsi_last_bb (bb
);
4802 if (gsi_end_p (gsi
))
4809 lhs
= gimple_assign_lhs (stmt
);
4810 rhs1
= gimple_assign_rhs1 (stmt
);
4811 rhs2
= gimple_assign_rhs2 (stmt
);
4813 /* For non-bit or min/max operations we can't associate
4814 all types. Verify that here. */
4815 if (rhs_code
!= BIT_IOR_EXPR
4816 && rhs_code
!= BIT_AND_EXPR
4817 && rhs_code
!= BIT_XOR_EXPR
4818 && rhs_code
!= MIN_EXPR
4819 && rhs_code
!= MAX_EXPR
4820 && (!can_reassociate_p (lhs
)
4821 || !can_reassociate_p (rhs1
)
4822 || !can_reassociate_p (rhs2
)))
4825 if (associative_tree_code (rhs_code
))
4827 auto_vec
<operand_entry
*> ops
;
4828 tree powi_result
= NULL_TREE
;
4830 /* There may be no immediate uses left by the time we
4831 get here because we may have eliminated them all. */
4832 if (TREE_CODE (lhs
) == SSA_NAME
&& has_zero_uses (lhs
))
4835 gimple_set_visited (stmt
, true);
4836 linearize_expr_tree (&ops
, stmt
, true, true);
4837 ops
.qsort (sort_by_operand_rank
);
4838 optimize_ops_list (rhs_code
, &ops
);
4839 if (undistribute_ops_list (rhs_code
, &ops
,
4840 loop_containing_stmt (stmt
)))
4842 ops
.qsort (sort_by_operand_rank
);
4843 optimize_ops_list (rhs_code
, &ops
);
4846 if (rhs_code
== BIT_IOR_EXPR
|| rhs_code
== BIT_AND_EXPR
)
4847 optimize_range_tests (rhs_code
, &ops
);
4849 if (rhs_code
== MULT_EXPR
)
4850 attempt_builtin_copysign (&ops
);
4852 if (reassoc_insert_powi_p
4853 && rhs_code
== MULT_EXPR
4854 && flag_unsafe_math_optimizations
)
4855 powi_result
= attempt_builtin_powi (stmt
, &ops
);
4857 /* If the operand vector is now empty, all operands were
4858 consumed by the __builtin_powi optimization. */
4859 if (ops
.length () == 0)
4860 transform_stmt_to_copy (&gsi
, stmt
, powi_result
);
4861 else if (ops
.length () == 1)
4863 tree last_op
= ops
.last ()->op
;
4866 transform_stmt_to_multiply (&gsi
, stmt
, last_op
,
4869 transform_stmt_to_copy (&gsi
, stmt
, last_op
);
4873 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
4874 int ops_num
= ops
.length ();
4875 int width
= get_reassociation_width (ops_num
, rhs_code
, mode
);
4878 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4880 "Width = %d was chosen for reassociation\n", width
);
4883 && ops
.length () > 3)
4884 rewrite_expr_tree_parallel (as_a
<gassign
*> (stmt
),
4888 /* When there are three operands left, we want
4889 to make sure the ones that get the double
4890 binary op are chosen wisely. */
4891 int len
= ops
.length ();
4893 swap_ops_for_binary_stmt (ops
, len
- 3, stmt
);
4895 new_lhs
= rewrite_expr_tree (stmt
, 0, ops
,
4896 powi_result
!= NULL
);
4899 /* If we combined some repeated factors into a
4900 __builtin_powi call, multiply that result by the
4901 reassociated operands. */
4904 gimple
*mul_stmt
, *lhs_stmt
= SSA_NAME_DEF_STMT (lhs
);
4905 tree type
= TREE_TYPE (lhs
);
4906 tree target_ssa
= make_temp_ssa_name (type
, NULL
,
4908 gimple_set_lhs (lhs_stmt
, target_ssa
);
4909 update_stmt (lhs_stmt
);
4911 target_ssa
= new_lhs
;
4912 mul_stmt
= gimple_build_assign (lhs
, MULT_EXPR
,
4913 powi_result
, target_ssa
);
4914 gimple_set_location (mul_stmt
, gimple_location (stmt
));
4915 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
4916 gsi_insert_after (&gsi
, mul_stmt
, GSI_NEW_STMT
);
4922 for (son
= first_dom_son (CDI_POST_DOMINATORS
, bb
);
4924 son
= next_dom_son (CDI_POST_DOMINATORS
, son
))
4925 reassociate_bb (son
);
4928 /* Add jumps around shifts for range tests turned into bit tests.
4929 For each SSA_NAME VAR we have code like:
4930 VAR = ...; // final stmt of range comparison
4931 // bit test here...;
4932 OTHERVAR = ...; // final stmt of the bit test sequence
4933 RES = VAR | OTHERVAR;
4934 Turn the above into:
4941 // bit test here...;
4944 # RES = PHI<1(l1), OTHERVAR(l2)>; */
4952 FOR_EACH_VEC_ELT (reassoc_branch_fixups
, i
, var
)
4954 gimple
*def_stmt
= SSA_NAME_DEF_STMT (var
);
4957 bool ok
= single_imm_use (var
, &use
, &use_stmt
);
4959 && is_gimple_assign (use_stmt
)
4960 && gimple_assign_rhs_code (use_stmt
) == BIT_IOR_EXPR
4961 && gimple_bb (def_stmt
) == gimple_bb (use_stmt
));
4963 basic_block cond_bb
= gimple_bb (def_stmt
);
4964 basic_block then_bb
= split_block (cond_bb
, def_stmt
)->dest
;
4965 basic_block merge_bb
= split_block (then_bb
, use_stmt
)->dest
;
4967 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
4968 gimple
*g
= gimple_build_cond (NE_EXPR
, var
,
4969 build_zero_cst (TREE_TYPE (var
)),
4970 NULL_TREE
, NULL_TREE
);
4971 location_t loc
= gimple_location (use_stmt
);
4972 gimple_set_location (g
, loc
);
4973 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
4975 edge etrue
= make_edge (cond_bb
, merge_bb
, EDGE_TRUE_VALUE
);
4976 etrue
->probability
= REG_BR_PROB_BASE
/ 2;
4977 etrue
->count
= cond_bb
->count
/ 2;
4978 edge efalse
= find_edge (cond_bb
, then_bb
);
4979 efalse
->flags
= EDGE_FALSE_VALUE
;
4980 efalse
->probability
-= etrue
->probability
;
4981 efalse
->count
-= etrue
->count
;
4982 then_bb
->count
-= etrue
->count
;
4984 tree othervar
= NULL_TREE
;
4985 if (gimple_assign_rhs1 (use_stmt
) == var
)
4986 othervar
= gimple_assign_rhs2 (use_stmt
);
4987 else if (gimple_assign_rhs2 (use_stmt
) == var
)
4988 othervar
= gimple_assign_rhs1 (use_stmt
);
4991 tree lhs
= gimple_assign_lhs (use_stmt
);
4992 gphi
*phi
= create_phi_node (lhs
, merge_bb
);
4993 add_phi_arg (phi
, build_one_cst (TREE_TYPE (lhs
)), etrue
, loc
);
4994 add_phi_arg (phi
, othervar
, single_succ_edge (then_bb
), loc
);
4995 gsi
= gsi_for_stmt (use_stmt
);
4996 gsi_remove (&gsi
, true);
4998 set_immediate_dominator (CDI_DOMINATORS
, merge_bb
, cond_bb
);
4999 set_immediate_dominator (CDI_POST_DOMINATORS
, cond_bb
, merge_bb
);
5001 reassoc_branch_fixups
.release ();
5004 void dump_ops_vector (FILE *file
, vec
<operand_entry
*> ops
);
5005 void debug_ops_vector (vec
<operand_entry
*> ops
);
5007 /* Dump the operand entry vector OPS to FILE. */
5010 dump_ops_vector (FILE *file
, vec
<operand_entry
*> ops
)
5015 FOR_EACH_VEC_ELT (ops
, i
, oe
)
5017 fprintf (file
, "Op %d -> rank: %d, tree: ", i
, oe
->rank
);
5018 print_generic_expr (file
, oe
->op
, 0);
5019 fprintf (file
, "\n");
5023 /* Dump the operand entry vector OPS to STDERR. */
5026 debug_ops_vector (vec
<operand_entry
*> ops
)
5028 dump_ops_vector (stderr
, ops
);
5034 break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
5035 reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun
));
5038 /* Initialize the reassociation pass. */
5045 int *bbs
= XNEWVEC (int, n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
);
5047 /* Find the loops, so that we can prevent moving calculations in
5049 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
5051 memset (&reassociate_stats
, 0, sizeof (reassociate_stats
));
5053 next_operand_entry_id
= 0;
5055 /* Reverse RPO (Reverse Post Order) will give us something where
5056 deeper loops come later. */
5057 pre_and_rev_post_order_compute (NULL
, bbs
, false);
5058 bb_rank
= XCNEWVEC (long, last_basic_block_for_fn (cfun
));
5059 operand_rank
= new hash_map
<tree
, long>;
5061 /* Give each default definition a distinct rank. This includes
5062 parameters and the static chain. Walk backwards over all
5063 SSA names so that we get proper rank ordering according
5064 to tree_swap_operands_p. */
5065 for (i
= num_ssa_names
- 1; i
> 0; --i
)
5067 tree name
= ssa_name (i
);
5068 if (name
&& SSA_NAME_IS_DEFAULT_DEF (name
))
5069 insert_operand_rank (name
, ++rank
);
5072 /* Set up rank for each BB */
5073 for (i
= 0; i
< n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
; i
++)
5074 bb_rank
[bbs
[i
]] = ++rank
<< 16;
5077 calculate_dominance_info (CDI_POST_DOMINATORS
);
5078 plus_negates
= vNULL
;
5081 /* Cleanup after the reassociation pass, and print stats if
5087 statistics_counter_event (cfun
, "Linearized",
5088 reassociate_stats
.linearized
);
5089 statistics_counter_event (cfun
, "Constants eliminated",
5090 reassociate_stats
.constants_eliminated
);
5091 statistics_counter_event (cfun
, "Ops eliminated",
5092 reassociate_stats
.ops_eliminated
);
5093 statistics_counter_event (cfun
, "Statements rewritten",
5094 reassociate_stats
.rewritten
);
5095 statistics_counter_event (cfun
, "Built-in pow[i] calls encountered",
5096 reassociate_stats
.pows_encountered
);
5097 statistics_counter_event (cfun
, "Built-in powi calls created",
5098 reassociate_stats
.pows_created
);
5100 delete operand_rank
;
5101 operand_entry_pool
.release ();
5103 plus_negates
.release ();
5104 free_dominance_info (CDI_POST_DOMINATORS
);
5105 loop_optimizer_finalize ();
5108 /* Gate and execute functions for Reassociation. If INSERT_POWI_P, enable
5109 insertion of __builtin_powi calls. */
5112 execute_reassoc (bool insert_powi_p
)
5114 reassoc_insert_powi_p
= insert_powi_p
;
5119 repropagate_negates ();
5128 const pass_data pass_data_reassoc
=
5130 GIMPLE_PASS
, /* type */
5131 "reassoc", /* name */
5132 OPTGROUP_NONE
, /* optinfo_flags */
5133 TV_TREE_REASSOC
, /* tv_id */
5134 ( PROP_cfg
| PROP_ssa
), /* properties_required */
5135 0, /* properties_provided */
5136 0, /* properties_destroyed */
5137 0, /* todo_flags_start */
5138 TODO_update_ssa_only_virtuals
, /* todo_flags_finish */
5141 class pass_reassoc
: public gimple_opt_pass
5144 pass_reassoc (gcc::context
*ctxt
)
5145 : gimple_opt_pass (pass_data_reassoc
, ctxt
), insert_powi_p (false)
5148 /* opt_pass methods: */
5149 opt_pass
* clone () { return new pass_reassoc (m_ctxt
); }
5150 void set_pass_param (unsigned int n
, bool param
)
5152 gcc_assert (n
== 0);
5153 insert_powi_p
= param
;
5155 virtual bool gate (function
*) { return flag_tree_reassoc
!= 0; }
5156 virtual unsigned int execute (function
*)
5157 { return execute_reassoc (insert_powi_p
); }
5160 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
5161 point 3a in the pass header comment. */
5163 }; // class pass_reassoc
5168 make_pass_reassoc (gcc::context
*ctxt
)
5170 return new pass_reassoc (ctxt
);