1 /* Reassociation for trees.
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "alloc-pool.h"
31 #include "tree-pass.h"
35 #include "optabs-tree.h"
36 #include "gimple-pretty-print.h"
37 #include "diagnostic-core.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
41 #include "gimple-fold.h"
43 #include "gimple-iterator.h"
44 #include "gimplify-me.h"
46 #include "tree-ssa-loop.h"
49 #include "langhooks.h"
54 #include "case-cfn-macros.h"
56 /* This is a simple global reassociation pass. It is, in part, based
57 on the LLVM pass of the same name (They do some things more/less
58 than we do, in different orders, etc).
60 It consists of five steps:
62 1. Breaking up subtract operations into addition + negate, where
63 it would promote the reassociation of adds.
65 2. Left linearization of the expression trees, so that (A+B)+(C+D)
66 becomes (((A+B)+C)+D), which is easier for us to rewrite later.
67 During linearization, we place the operands of the binary
68 expressions into a vector of operand_entry_*
70 3. Optimization of the operand lists, eliminating things like a +
73 3a. Combine repeated factors with the same occurrence counts
74 into a __builtin_powi call that will later be optimized into
75 an optimal number of multiplies.
77 4. Rewrite the expression trees we linearized and optimized so
78 they are in proper rank order.
80 5. Repropagate negates, as nothing else will clean it up ATM.
82 A bit of theory on #4, since nobody seems to write anything down
83 about why it makes sense to do it the way they do it:
85 We could do this much nicer theoretically, but don't (for reasons
86 explained after how to do it theoretically nice :P).
88 In order to promote the most redundancy elimination, you want
89 binary expressions whose operands are the same rank (or
90 preferably, the same value) exposed to the redundancy eliminator,
91 for possible elimination.
93 So the way to do this if we really cared, is to build the new op
94 tree from the leaves to the roots, merging as you go, and putting the
95 new op on the end of the worklist, until you are left with one
96 thing on the worklist.
98 IE if you have to rewrite the following set of operands (listed with
99 rank in parentheses), with opcode PLUS_EXPR:
101 a (1), b (1), c (1), d (2), e (2)
104 We start with our merge worklist empty, and the ops list with all of
107 You want to first merge all leaves of the same rank, as much as
110 So first build a binary op of
112 mergetmp = a + b, and put "mergetmp" on the merge worklist.
114 Because there is no three operand form of PLUS_EXPR, c is not going to
115 be exposed to redundancy elimination as a rank 1 operand.
117 So you might as well throw it on the merge worklist (you could also
118 consider it to now be a rank two operand, and merge it with d and e,
119 but in this case, you then have evicted e from a binary op. So at
120 least in this situation, you can't win.)
122 Then build a binary op of d + e
125 and put mergetmp2 on the merge worklist.
127 so merge worklist = {mergetmp, c, mergetmp2}
129 Continue building binary ops of these operations until you have only
130 one operation left on the worklist.
135 mergetmp3 = mergetmp + c
137 worklist = {mergetmp2, mergetmp3}
139 mergetmp4 = mergetmp2 + mergetmp3
141 worklist = {mergetmp4}
143 because we have one operation left, we can now just set the original
144 statement equal to the result of that operation.
146 This will at least expose a + b and d + e to redundancy elimination
147 as binary operations.
149 For extra points, you can reuse the old statements to build the
150 mergetmps, since you shouldn't run out.
152 So why don't we do this?
154 Because it's expensive, and rarely will help. Most trees we are
155 reassociating have 3 or less ops. If they have 2 ops, they already
156 will be written into a nice single binary op. If you have 3 ops, a
157 single simple check suffices to tell you whether the first two are of the
158 same rank. If so, you know to order it
161 newstmt = mergetmp + op3
165 newstmt = mergetmp + op1
167 If all three are of the same rank, you can't expose them all in a
168 single binary operator anyway, so the above is *still* the best you
171 Thus, this is what we do. When we have three ops left, we check to see
172 what order to put them in, and call it a day. As a nod to vector sum
173 reduction, we check if any of the ops are really a phi node that is a
174 destructive update for the associating op, and keep the destructive
175 update together for vector sum reduction recognition. */
177 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
178 point 3a in the pass header comment. */
179 static bool reassoc_insert_powi_p
;
185 int constants_eliminated
;
188 int pows_encountered
;
192 /* Operator, rank pair. */
199 gimple
*stmt_to_insert
;
202 static object_allocator
<operand_entry
> operand_entry_pool
203 ("operand entry pool");
205 /* This is used to assign a unique ID to each struct operand_entry
206 so that qsort results are identical on different hosts. */
207 static unsigned int next_operand_entry_id
;
209 /* Starting rank number for a given basic block, so that we can rank
210 operations using unmovable instructions in that BB based on the bb
212 static long *bb_rank
;
214 /* Operand->rank hashtable. */
215 static hash_map
<tree
, long> *operand_rank
;
217 /* Vector of SSA_NAMEs on which after reassociate_bb is done with
218 all basic blocks the CFG should be adjusted - basic blocks
219 split right after that SSA_NAME's definition statement and before
220 the only use, which must be a bit ior. */
221 static vec
<tree
> reassoc_branch_fixups
;
224 static long get_rank (tree
);
225 static bool reassoc_stmt_dominates_stmt_p (gimple
*, gimple
*);
227 /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts
228 possibly added by gsi_remove. */
231 reassoc_remove_stmt (gimple_stmt_iterator
*gsi
)
233 gimple
*stmt
= gsi_stmt (*gsi
);
235 if (!MAY_HAVE_DEBUG_STMTS
|| gimple_code (stmt
) == GIMPLE_PHI
)
236 return gsi_remove (gsi
, true);
238 gimple_stmt_iterator prev
= *gsi
;
240 unsigned uid
= gimple_uid (stmt
);
241 basic_block bb
= gimple_bb (stmt
);
242 bool ret
= gsi_remove (gsi
, true);
243 if (!gsi_end_p (prev
))
246 prev
= gsi_start_bb (bb
);
247 gimple
*end_stmt
= gsi_stmt (*gsi
);
248 while ((stmt
= gsi_stmt (prev
)) != end_stmt
)
250 gcc_assert (stmt
&& is_gimple_debug (stmt
) && gimple_uid (stmt
) == 0);
251 gimple_set_uid (stmt
, uid
);
257 /* Bias amount for loop-carried phis. We want this to be larger than
258 the depth of any reassociation tree we can see, but not larger than
259 the rank difference between two blocks. */
260 #define PHI_LOOP_BIAS (1 << 15)
262 /* Rank assigned to a phi statement. If STMT is a loop-carried phi of
263 an innermost loop, and the phi has only a single use which is inside
264 the loop, then the rank is the block rank of the loop latch plus an
265 extra bias for the loop-carried dependence. This causes expressions
266 calculated into an accumulator variable to be independent for each
267 iteration of the loop. If STMT is some other phi, the rank is the
268 block rank of its containing block. */
270 phi_rank (gimple
*stmt
)
272 basic_block bb
= gimple_bb (stmt
);
273 struct loop
*father
= bb
->loop_father
;
279 /* We only care about real loops (those with a latch). */
281 return bb_rank
[bb
->index
];
283 /* Interesting phis must be in headers of innermost loops. */
284 if (bb
!= father
->header
286 return bb_rank
[bb
->index
];
288 /* Ignore virtual SSA_NAMEs. */
289 res
= gimple_phi_result (stmt
);
290 if (virtual_operand_p (res
))
291 return bb_rank
[bb
->index
];
293 /* The phi definition must have a single use, and that use must be
294 within the loop. Otherwise this isn't an accumulator pattern. */
295 if (!single_imm_use (res
, &use
, &use_stmt
)
296 || gimple_bb (use_stmt
)->loop_father
!= father
)
297 return bb_rank
[bb
->index
];
299 /* Look for phi arguments from within the loop. If found, bias this phi. */
300 for (i
= 0; i
< gimple_phi_num_args (stmt
); i
++)
302 tree arg
= gimple_phi_arg_def (stmt
, i
);
303 if (TREE_CODE (arg
) == SSA_NAME
304 && !SSA_NAME_IS_DEFAULT_DEF (arg
))
306 gimple
*def_stmt
= SSA_NAME_DEF_STMT (arg
);
307 if (gimple_bb (def_stmt
)->loop_father
== father
)
308 return bb_rank
[father
->latch
->index
] + PHI_LOOP_BIAS
;
312 /* Must be an uninteresting phi. */
313 return bb_rank
[bb
->index
];
316 /* If EXP is an SSA_NAME defined by a PHI statement that represents a
317 loop-carried dependence of an innermost loop, return TRUE; else
320 loop_carried_phi (tree exp
)
325 if (TREE_CODE (exp
) != SSA_NAME
326 || SSA_NAME_IS_DEFAULT_DEF (exp
))
329 phi_stmt
= SSA_NAME_DEF_STMT (exp
);
331 if (gimple_code (SSA_NAME_DEF_STMT (exp
)) != GIMPLE_PHI
)
334 /* Non-loop-carried phis have block rank. Loop-carried phis have
335 an additional bias added in. If this phi doesn't have block rank,
336 it's biased and should not be propagated. */
337 block_rank
= bb_rank
[gimple_bb (phi_stmt
)->index
];
339 if (phi_rank (phi_stmt
) != block_rank
)
345 /* Return the maximum of RANK and the rank that should be propagated
346 from expression OP. For most operands, this is just the rank of OP.
347 For loop-carried phis, the value is zero to avoid undoing the bias
348 in favor of the phi. */
350 propagate_rank (long rank
, tree op
)
354 if (loop_carried_phi (op
))
357 op_rank
= get_rank (op
);
359 return MAX (rank
, op_rank
);
362 /* Look up the operand rank structure for expression E. */
365 find_operand_rank (tree e
)
367 long *slot
= operand_rank
->get (e
);
368 return slot
? *slot
: -1;
371 /* Insert {E,RANK} into the operand rank hashtable. */
374 insert_operand_rank (tree e
, long rank
)
376 gcc_assert (rank
> 0);
377 gcc_assert (!operand_rank
->put (e
, rank
));
380 /* Given an expression E, return the rank of the expression. */
385 /* SSA_NAME's have the rank of the expression they are the result
387 For globals and uninitialized values, the rank is 0.
388 For function arguments, use the pre-setup rank.
389 For PHI nodes, stores, asm statements, etc, we use the rank of
391 For simple operations, the rank is the maximum rank of any of
392 its operands, or the bb_rank, whichever is less.
393 I make no claims that this is optimal, however, it gives good
396 /* We make an exception to the normal ranking system to break
397 dependences of accumulator variables in loops. Suppose we
398 have a simple one-block loop containing:
405 As shown, each iteration of the calculation into x is fully
406 dependent upon the iteration before it. We would prefer to
407 see this in the form:
414 If the loop is unrolled, the calculations of b and c from
415 different iterations can be interleaved.
417 To obtain this result during reassociation, we bias the rank
418 of the phi definition x_1 upward, when it is recognized as an
419 accumulator pattern. The artificial rank causes it to be
420 added last, providing the desired independence. */
422 if (TREE_CODE (e
) == SSA_NAME
)
429 if (SSA_NAME_IS_DEFAULT_DEF (e
))
430 return find_operand_rank (e
);
432 stmt
= SSA_NAME_DEF_STMT (e
);
433 if (gimple_code (stmt
) == GIMPLE_PHI
)
434 return phi_rank (stmt
);
436 if (!is_gimple_assign (stmt
))
437 return bb_rank
[gimple_bb (stmt
)->index
];
439 /* If we already have a rank for this expression, use that. */
440 rank
= find_operand_rank (e
);
444 /* Otherwise, find the maximum rank for the operands. As an
445 exception, remove the bias from loop-carried phis when propagating
446 the rank so that dependent operations are not also biased. */
447 /* Simply walk over all SSA uses - this takes advatage of the
448 fact that non-SSA operands are is_gimple_min_invariant and
451 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
452 rank
= propagate_rank (rank
, op
);
454 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
456 fprintf (dump_file
, "Rank for ");
457 print_generic_expr (dump_file
, e
, 0);
458 fprintf (dump_file
, " is %ld\n", (rank
+ 1));
461 /* Note the rank in the hashtable so we don't recompute it. */
462 insert_operand_rank (e
, (rank
+ 1));
466 /* Constants, globals, etc., are rank 0 */
471 /* We want integer ones to end up last no matter what, since they are
472 the ones we can do the most with. */
473 #define INTEGER_CONST_TYPE 1 << 3
474 #define FLOAT_CONST_TYPE 1 << 2
475 #define OTHER_CONST_TYPE 1 << 1
477 /* Classify an invariant tree into integer, float, or other, so that
478 we can sort them to be near other constants of the same type. */
480 constant_type (tree t
)
482 if (INTEGRAL_TYPE_P (TREE_TYPE (t
)))
483 return INTEGER_CONST_TYPE
;
484 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t
)))
485 return FLOAT_CONST_TYPE
;
487 return OTHER_CONST_TYPE
;
490 /* qsort comparison function to sort operand entries PA and PB by rank
491 so that the sorted array is ordered by rank in decreasing order. */
493 sort_by_operand_rank (const void *pa
, const void *pb
)
495 const operand_entry
*oea
= *(const operand_entry
*const *)pa
;
496 const operand_entry
*oeb
= *(const operand_entry
*const *)pb
;
498 /* It's nicer for optimize_expression if constants that are likely
499 to fold when added/multiplied//whatever are put next to each
500 other. Since all constants have rank 0, order them by type. */
501 if (oeb
->rank
== 0 && oea
->rank
== 0)
503 if (constant_type (oeb
->op
) != constant_type (oea
->op
))
504 return constant_type (oeb
->op
) - constant_type (oea
->op
);
506 /* To make sorting result stable, we use unique IDs to determine
508 return oeb
->id
> oea
->id
? 1 : -1;
511 /* Lastly, make sure the versions that are the same go next to each
513 if (oeb
->rank
== oea
->rank
514 && TREE_CODE (oea
->op
) == SSA_NAME
515 && TREE_CODE (oeb
->op
) == SSA_NAME
)
517 /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse
518 versions of removed SSA_NAMEs, so if possible, prefer to sort
519 based on basic block and gimple_uid of the SSA_NAME_DEF_STMT.
521 if (!SSA_NAME_IS_DEFAULT_DEF (oea
->op
)
522 && !SSA_NAME_IS_DEFAULT_DEF (oeb
->op
)
523 && !oea
->stmt_to_insert
524 && !oeb
->stmt_to_insert
525 && SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
527 gimple
*stmta
= SSA_NAME_DEF_STMT (oea
->op
);
528 gimple
*stmtb
= SSA_NAME_DEF_STMT (oeb
->op
);
529 basic_block bba
= gimple_bb (stmta
);
530 basic_block bbb
= gimple_bb (stmtb
);
533 if (bb_rank
[bbb
->index
] != bb_rank
[bba
->index
])
534 return bb_rank
[bbb
->index
] - bb_rank
[bba
->index
];
538 bool da
= reassoc_stmt_dominates_stmt_p (stmta
, stmtb
);
539 bool db
= reassoc_stmt_dominates_stmt_p (stmtb
, stmta
);
545 if (SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
546 return SSA_NAME_VERSION (oeb
->op
) > SSA_NAME_VERSION (oea
->op
) ? 1 : -1;
548 return oeb
->id
> oea
->id
? 1 : -1;
551 if (oeb
->rank
!= oea
->rank
)
552 return oeb
->rank
> oea
->rank
? 1 : -1;
554 return oeb
->id
> oea
->id
? 1 : -1;
557 /* Add an operand entry to *OPS for the tree operand OP. */
560 add_to_ops_vec (vec
<operand_entry
*> *ops
, tree op
, gimple
*stmt_to_insert
= NULL
)
562 operand_entry
*oe
= operand_entry_pool
.allocate ();
565 oe
->rank
= get_rank (op
);
566 oe
->id
= next_operand_entry_id
++;
568 oe
->stmt_to_insert
= stmt_to_insert
;
572 /* Add an operand entry to *OPS for the tree operand OP with repeat
576 add_repeat_to_ops_vec (vec
<operand_entry
*> *ops
, tree op
,
577 HOST_WIDE_INT repeat
)
579 operand_entry
*oe
= operand_entry_pool
.allocate ();
582 oe
->rank
= get_rank (op
);
583 oe
->id
= next_operand_entry_id
++;
585 oe
->stmt_to_insert
= NULL
;
588 reassociate_stats
.pows_encountered
++;
591 /* Return true if STMT is reassociable operation containing a binary
592 operation with tree code CODE, and is inside LOOP. */
595 is_reassociable_op (gimple
*stmt
, enum tree_code code
, struct loop
*loop
)
597 basic_block bb
= gimple_bb (stmt
);
599 if (gimple_bb (stmt
) == NULL
)
602 if (!flow_bb_inside_loop_p (loop
, bb
))
605 if (is_gimple_assign (stmt
)
606 && gimple_assign_rhs_code (stmt
) == code
607 && has_single_use (gimple_assign_lhs (stmt
)))
609 tree rhs1
= gimple_assign_rhs1 (stmt
);
610 tree rhs2
= gimple_assign_rhs1 (stmt
);
611 if (TREE_CODE (rhs1
) == SSA_NAME
612 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1
))
615 && TREE_CODE (rhs2
) == SSA_NAME
616 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2
))
625 /* Return true if STMT is a nop-conversion. */
628 gimple_nop_conversion_p (gimple
*stmt
)
630 if (gassign
*ass
= dyn_cast
<gassign
*> (stmt
))
632 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass
))
633 && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (ass
)),
634 TREE_TYPE (gimple_assign_rhs1 (ass
))))
640 /* Given NAME, if NAME is defined by a unary operation OPCODE, return the
641 operand of the negate operation. Otherwise, return NULL. */
644 get_unary_op (tree name
, enum tree_code opcode
)
646 gimple
*stmt
= SSA_NAME_DEF_STMT (name
);
648 /* Look through nop conversions (sign changes). */
649 if (gimple_nop_conversion_p (stmt
)
650 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
651 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
653 if (!is_gimple_assign (stmt
))
656 if (gimple_assign_rhs_code (stmt
) == opcode
)
657 return gimple_assign_rhs1 (stmt
);
661 /* Return true if OP1 and OP2 have the same value if casted to either type. */
664 ops_equal_values_p (tree op1
, tree op2
)
670 if (TREE_CODE (op1
) == SSA_NAME
)
672 gimple
*stmt
= SSA_NAME_DEF_STMT (op1
);
673 if (gimple_nop_conversion_p (stmt
))
675 op1
= gimple_assign_rhs1 (stmt
);
681 if (TREE_CODE (op2
) == SSA_NAME
)
683 gimple
*stmt
= SSA_NAME_DEF_STMT (op2
);
684 if (gimple_nop_conversion_p (stmt
))
686 op2
= gimple_assign_rhs1 (stmt
);
697 /* If CURR and LAST are a pair of ops that OPCODE allows us to
698 eliminate through equivalences, do so, remove them from OPS, and
699 return true. Otherwise, return false. */
702 eliminate_duplicate_pair (enum tree_code opcode
,
703 vec
<operand_entry
*> *ops
,
710 /* If we have two of the same op, and the opcode is & |, min, or max,
711 we can eliminate one of them.
712 If we have two of the same op, and the opcode is ^, we can
713 eliminate both of them. */
715 if (last
&& last
->op
== curr
->op
)
723 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
725 fprintf (dump_file
, "Equivalence: ");
726 print_generic_expr (dump_file
, curr
->op
, 0);
727 fprintf (dump_file
, " [&|minmax] ");
728 print_generic_expr (dump_file
, last
->op
, 0);
729 fprintf (dump_file
, " -> ");
730 print_generic_stmt (dump_file
, last
->op
, 0);
733 ops
->ordered_remove (i
);
734 reassociate_stats
.ops_eliminated
++;
739 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
741 fprintf (dump_file
, "Equivalence: ");
742 print_generic_expr (dump_file
, curr
->op
, 0);
743 fprintf (dump_file
, " ^ ");
744 print_generic_expr (dump_file
, last
->op
, 0);
745 fprintf (dump_file
, " -> nothing\n");
748 reassociate_stats
.ops_eliminated
+= 2;
750 if (ops
->length () == 2)
753 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (last
->op
)));
758 ops
->ordered_remove (i
-1);
759 ops
->ordered_remove (i
-1);
771 static vec
<tree
> plus_negates
;
773 /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
774 expression, look in OPS for a corresponding positive operation to cancel
775 it out. If we find one, remove the other from OPS, replace
776 OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise,
780 eliminate_plus_minus_pair (enum tree_code opcode
,
781 vec
<operand_entry
*> *ops
,
782 unsigned int currindex
,
790 if (opcode
!= PLUS_EXPR
|| TREE_CODE (curr
->op
) != SSA_NAME
)
793 negateop
= get_unary_op (curr
->op
, NEGATE_EXPR
);
794 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
795 if (negateop
== NULL_TREE
&& notop
== NULL_TREE
)
798 /* Any non-negated version will have a rank that is one less than
799 the current rank. So once we hit those ranks, if we don't find
802 for (i
= currindex
+ 1;
803 ops
->iterate (i
, &oe
)
804 && oe
->rank
>= curr
->rank
- 1 ;
808 && ops_equal_values_p (oe
->op
, negateop
))
810 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
812 fprintf (dump_file
, "Equivalence: ");
813 print_generic_expr (dump_file
, negateop
, 0);
814 fprintf (dump_file
, " + -");
815 print_generic_expr (dump_file
, oe
->op
, 0);
816 fprintf (dump_file
, " -> 0\n");
819 ops
->ordered_remove (i
);
820 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (oe
->op
)));
821 ops
->ordered_remove (currindex
);
822 reassociate_stats
.ops_eliminated
++;
827 && ops_equal_values_p (oe
->op
, notop
))
829 tree op_type
= TREE_TYPE (oe
->op
);
831 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
833 fprintf (dump_file
, "Equivalence: ");
834 print_generic_expr (dump_file
, notop
, 0);
835 fprintf (dump_file
, " + ~");
836 print_generic_expr (dump_file
, oe
->op
, 0);
837 fprintf (dump_file
, " -> -1\n");
840 ops
->ordered_remove (i
);
841 add_to_ops_vec (ops
, build_all_ones_cst (op_type
));
842 ops
->ordered_remove (currindex
);
843 reassociate_stats
.ops_eliminated
++;
849 /* If CURR->OP is a negate expr without nop conversion in a plus expr:
850 save it for later inspection in repropagate_negates(). */
851 if (negateop
!= NULL_TREE
852 && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (curr
->op
)) == NEGATE_EXPR
)
853 plus_negates
.safe_push (curr
->op
);
858 /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a
859 bitwise not expression, look in OPS for a corresponding operand to
860 cancel it out. If we find one, remove the other from OPS, replace
861 OPS[CURRINDEX] with 0, and return true. Otherwise, return
865 eliminate_not_pairs (enum tree_code opcode
,
866 vec
<operand_entry
*> *ops
,
867 unsigned int currindex
,
874 if ((opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
875 || TREE_CODE (curr
->op
) != SSA_NAME
)
878 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
879 if (notop
== NULL_TREE
)
882 /* Any non-not version will have a rank that is one less than
883 the current rank. So once we hit those ranks, if we don't find
886 for (i
= currindex
+ 1;
887 ops
->iterate (i
, &oe
)
888 && oe
->rank
>= curr
->rank
- 1;
893 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
895 fprintf (dump_file
, "Equivalence: ");
896 print_generic_expr (dump_file
, notop
, 0);
897 if (opcode
== BIT_AND_EXPR
)
898 fprintf (dump_file
, " & ~");
899 else if (opcode
== BIT_IOR_EXPR
)
900 fprintf (dump_file
, " | ~");
901 print_generic_expr (dump_file
, oe
->op
, 0);
902 if (opcode
== BIT_AND_EXPR
)
903 fprintf (dump_file
, " -> 0\n");
904 else if (opcode
== BIT_IOR_EXPR
)
905 fprintf (dump_file
, " -> -1\n");
908 if (opcode
== BIT_AND_EXPR
)
909 oe
->op
= build_zero_cst (TREE_TYPE (oe
->op
));
910 else if (opcode
== BIT_IOR_EXPR
)
911 oe
->op
= build_all_ones_cst (TREE_TYPE (oe
->op
));
913 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
915 ops
->quick_push (oe
);
923 /* Use constant value that may be present in OPS to try to eliminate
924 operands. Note that this function is only really used when we've
925 eliminated ops for other reasons, or merged constants. Across
926 single statements, fold already does all of this, plus more. There
927 is little point in duplicating logic, so I've only included the
928 identities that I could ever construct testcases to trigger. */
931 eliminate_using_constants (enum tree_code opcode
,
932 vec
<operand_entry
*> *ops
)
934 operand_entry
*oelast
= ops
->last ();
935 tree type
= TREE_TYPE (oelast
->op
);
937 if (oelast
->rank
== 0
938 && (ANY_INTEGRAL_TYPE_P (type
) || FLOAT_TYPE_P (type
)))
943 if (integer_zerop (oelast
->op
))
945 if (ops
->length () != 1)
947 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
948 fprintf (dump_file
, "Found & 0, removing all other ops\n");
950 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
953 ops
->quick_push (oelast
);
957 else if (integer_all_onesp (oelast
->op
))
959 if (ops
->length () != 1)
961 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
962 fprintf (dump_file
, "Found & -1, removing\n");
964 reassociate_stats
.ops_eliminated
++;
969 if (integer_all_onesp (oelast
->op
))
971 if (ops
->length () != 1)
973 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
974 fprintf (dump_file
, "Found | -1, removing all other ops\n");
976 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
979 ops
->quick_push (oelast
);
983 else if (integer_zerop (oelast
->op
))
985 if (ops
->length () != 1)
987 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
988 fprintf (dump_file
, "Found | 0, removing\n");
990 reassociate_stats
.ops_eliminated
++;
995 if (integer_zerop (oelast
->op
)
996 || (FLOAT_TYPE_P (type
)
997 && !HONOR_NANS (type
)
998 && !HONOR_SIGNED_ZEROS (type
)
999 && real_zerop (oelast
->op
)))
1001 if (ops
->length () != 1)
1003 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1004 fprintf (dump_file
, "Found * 0, removing all other ops\n");
1006 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
1008 ops
->quick_push (oelast
);
1012 else if (integer_onep (oelast
->op
)
1013 || (FLOAT_TYPE_P (type
)
1014 && !HONOR_SNANS (type
)
1015 && real_onep (oelast
->op
)))
1017 if (ops
->length () != 1)
1019 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1020 fprintf (dump_file
, "Found * 1, removing\n");
1022 reassociate_stats
.ops_eliminated
++;
1030 if (integer_zerop (oelast
->op
)
1031 || (FLOAT_TYPE_P (type
)
1032 && (opcode
== PLUS_EXPR
|| opcode
== MINUS_EXPR
)
1033 && fold_real_zero_addition_p (type
, oelast
->op
,
1034 opcode
== MINUS_EXPR
)))
1036 if (ops
->length () != 1)
1038 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1039 fprintf (dump_file
, "Found [|^+] 0, removing\n");
1041 reassociate_stats
.ops_eliminated
++;
1053 static void linearize_expr_tree (vec
<operand_entry
*> *, gimple
*,
1056 /* Structure for tracking and counting operands. */
1060 enum tree_code oecode
;
1065 /* The heap for the oecount hashtable and the sorted list of operands. */
1066 static vec
<oecount
> cvec
;
1069 /* Oecount hashtable helpers. */
1071 struct oecount_hasher
: int_hash
<int, 0, 1>
1073 static inline hashval_t
hash (int);
1074 static inline bool equal (int, int);
1077 /* Hash function for oecount. */
1080 oecount_hasher::hash (int p
)
1082 const oecount
*c
= &cvec
[p
- 42];
1083 return htab_hash_pointer (c
->op
) ^ (hashval_t
)c
->oecode
;
1086 /* Comparison function for oecount. */
1089 oecount_hasher::equal (int p1
, int p2
)
1091 const oecount
*c1
= &cvec
[p1
- 42];
1092 const oecount
*c2
= &cvec
[p2
- 42];
1093 return c1
->oecode
== c2
->oecode
&& c1
->op
== c2
->op
;
1096 /* Comparison function for qsort sorting oecount elements by count. */
1099 oecount_cmp (const void *p1
, const void *p2
)
1101 const oecount
*c1
= (const oecount
*)p1
;
1102 const oecount
*c2
= (const oecount
*)p2
;
1103 if (c1
->cnt
!= c2
->cnt
)
1104 return c1
->cnt
> c2
->cnt
? 1 : -1;
1106 /* If counts are identical, use unique IDs to stabilize qsort. */
1107 return c1
->id
> c2
->id
? 1 : -1;
1110 /* Return TRUE iff STMT represents a builtin call that raises OP
1111 to some exponent. */
1114 stmt_is_power_of_op (gimple
*stmt
, tree op
)
1116 if (!is_gimple_call (stmt
))
1119 switch (gimple_call_combined_fn (stmt
))
1123 return (operand_equal_p (gimple_call_arg (stmt
, 0), op
, 0));
1130 /* Given STMT which is a __builtin_pow* call, decrement its exponent
1131 in place and return the result. Assumes that stmt_is_power_of_op
1132 was previously called for STMT and returned TRUE. */
1134 static HOST_WIDE_INT
1135 decrement_power (gimple
*stmt
)
1137 REAL_VALUE_TYPE c
, cint
;
1138 HOST_WIDE_INT power
;
1141 switch (gimple_call_combined_fn (stmt
))
1144 arg1
= gimple_call_arg (stmt
, 1);
1145 c
= TREE_REAL_CST (arg1
);
1146 power
= real_to_integer (&c
) - 1;
1147 real_from_integer (&cint
, VOIDmode
, power
, SIGNED
);
1148 gimple_call_set_arg (stmt
, 1, build_real (TREE_TYPE (arg1
), cint
));
1152 arg1
= gimple_call_arg (stmt
, 1);
1153 power
= TREE_INT_CST_LOW (arg1
) - 1;
1154 gimple_call_set_arg (stmt
, 1, build_int_cst (TREE_TYPE (arg1
), power
));
1162 /* Replace SSA defined by STMT and replace all its uses with new
1163 SSA. Also return the new SSA. */
1166 make_new_ssa_for_def (gimple
*stmt
, enum tree_code opcode
, tree op
)
1170 imm_use_iterator iter
;
1171 tree new_lhs
, new_debug_lhs
= NULL_TREE
;
1172 tree lhs
= gimple_get_lhs (stmt
);
1174 new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
1175 gimple_set_lhs (stmt
, new_lhs
);
1177 /* Also need to update GIMPLE_DEBUGs. */
1178 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
1180 tree repl
= new_lhs
;
1181 if (is_gimple_debug (use_stmt
))
1183 if (new_debug_lhs
== NULL_TREE
)
1185 new_debug_lhs
= make_node (DEBUG_EXPR_DECL
);
1187 = gimple_build_debug_bind (new_debug_lhs
,
1188 build2 (opcode
, TREE_TYPE (lhs
),
1191 DECL_ARTIFICIAL (new_debug_lhs
) = 1;
1192 TREE_TYPE (new_debug_lhs
) = TREE_TYPE (lhs
);
1193 SET_DECL_MODE (new_debug_lhs
, TYPE_MODE (TREE_TYPE (lhs
)));
1194 gimple_set_uid (def_temp
, gimple_uid (stmt
));
1195 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1196 gsi_insert_after (&gsi
, def_temp
, GSI_SAME_STMT
);
1198 repl
= new_debug_lhs
;
1200 FOR_EACH_IMM_USE_ON_STMT (use
, iter
)
1201 SET_USE (use
, repl
);
1202 update_stmt (use_stmt
);
1207 /* Replace all SSAs defined in STMTS_TO_FIX and replace its
1208 uses with new SSAs. Also do this for the stmt that defines DEF
1209 if *DEF is not OP. */
1212 make_new_ssa_for_all_defs (tree
*def
, enum tree_code opcode
, tree op
,
1213 vec
<gimple
*> &stmts_to_fix
)
1219 && TREE_CODE (*def
) == SSA_NAME
1220 && (stmt
= SSA_NAME_DEF_STMT (*def
))
1221 && gimple_code (stmt
) != GIMPLE_NOP
)
1222 *def
= make_new_ssa_for_def (stmt
, opcode
, op
);
1224 FOR_EACH_VEC_ELT (stmts_to_fix
, i
, stmt
)
1225 make_new_ssa_for_def (stmt
, opcode
, op
);
1228 /* Find the single immediate use of STMT's LHS, and replace it
1229 with OP. Remove STMT. If STMT's LHS is the same as *DEF,
1230 replace *DEF with OP as well. */
1233 propagate_op_to_single_use (tree op
, gimple
*stmt
, tree
*def
)
1238 gimple_stmt_iterator gsi
;
1240 if (is_gimple_call (stmt
))
1241 lhs
= gimple_call_lhs (stmt
);
1243 lhs
= gimple_assign_lhs (stmt
);
1245 gcc_assert (has_single_use (lhs
));
1246 single_imm_use (lhs
, &use
, &use_stmt
);
1250 if (TREE_CODE (op
) != SSA_NAME
)
1251 update_stmt (use_stmt
);
1252 gsi
= gsi_for_stmt (stmt
);
1253 unlink_stmt_vdef (stmt
);
1254 reassoc_remove_stmt (&gsi
);
1255 release_defs (stmt
);
1258 /* Walks the linear chain with result *DEF searching for an operation
1259 with operand OP and code OPCODE removing that from the chain. *DEF
1260 is updated if there is only one operand but no operation left. */
1263 zero_one_operation (tree
*def
, enum tree_code opcode
, tree op
)
1265 tree orig_def
= *def
;
1266 gimple
*stmt
= SSA_NAME_DEF_STMT (*def
);
1267 /* PR72835 - Record the stmt chain that has to be updated such that
1268 we dont use the same LHS when the values computed are different. */
1269 auto_vec
<gimple
*, 64> stmts_to_fix
;
1275 if (opcode
== MULT_EXPR
)
1277 if (stmt_is_power_of_op (stmt
, op
))
1279 if (decrement_power (stmt
) == 1)
1281 if (stmts_to_fix
.length () > 0)
1282 stmts_to_fix
.pop ();
1283 propagate_op_to_single_use (op
, stmt
, def
);
1287 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
)
1289 if (gimple_assign_rhs1 (stmt
) == op
)
1291 tree cst
= build_minus_one_cst (TREE_TYPE (op
));
1292 if (stmts_to_fix
.length () > 0)
1293 stmts_to_fix
.pop ();
1294 propagate_op_to_single_use (cst
, stmt
, def
);
1297 else if (integer_minus_onep (op
)
1298 || real_minus_onep (op
))
1300 gimple_assign_set_rhs_code
1301 (stmt
, TREE_CODE (gimple_assign_rhs1 (stmt
)));
1307 name
= gimple_assign_rhs1 (stmt
);
1309 /* If this is the operation we look for and one of the operands
1310 is ours simply propagate the other operand into the stmts
1312 if (gimple_assign_rhs_code (stmt
) == opcode
1314 || gimple_assign_rhs2 (stmt
) == op
))
1317 name
= gimple_assign_rhs2 (stmt
);
1318 if (stmts_to_fix
.length () > 0)
1319 stmts_to_fix
.pop ();
1320 propagate_op_to_single_use (name
, stmt
, def
);
1324 /* We might have a multiply of two __builtin_pow* calls, and
1325 the operand might be hiding in the rightmost one. Likewise
1326 this can happen for a negate. */
1327 if (opcode
== MULT_EXPR
1328 && gimple_assign_rhs_code (stmt
) == opcode
1329 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
1330 && has_single_use (gimple_assign_rhs2 (stmt
)))
1332 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
1333 if (stmt_is_power_of_op (stmt2
, op
))
1335 if (decrement_power (stmt2
) == 1)
1336 propagate_op_to_single_use (op
, stmt2
, def
);
1338 stmts_to_fix
.safe_push (stmt2
);
1341 else if (is_gimple_assign (stmt2
)
1342 && gimple_assign_rhs_code (stmt2
) == NEGATE_EXPR
)
1344 if (gimple_assign_rhs1 (stmt2
) == op
)
1346 tree cst
= build_minus_one_cst (TREE_TYPE (op
));
1347 propagate_op_to_single_use (cst
, stmt2
, def
);
1350 else if (integer_minus_onep (op
)
1351 || real_minus_onep (op
))
1353 stmts_to_fix
.safe_push (stmt2
);
1354 gimple_assign_set_rhs_code
1355 (stmt2
, TREE_CODE (gimple_assign_rhs1 (stmt2
)));
1361 /* Continue walking the chain. */
1362 gcc_assert (name
!= op
1363 && TREE_CODE (name
) == SSA_NAME
);
1364 stmt
= SSA_NAME_DEF_STMT (name
);
1365 stmts_to_fix
.safe_push (stmt
);
1369 if (stmts_to_fix
.length () > 0 || *def
== orig_def
)
1370 make_new_ssa_for_all_defs (def
, opcode
, op
, stmts_to_fix
);
1373 /* Returns true if statement S1 dominates statement S2. Like
1374 stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */
1377 reassoc_stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
1379 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
1381 /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D)
1382 SSA_NAME. Assume it lives at the beginning of function and
1383 thus dominates everything. */
1384 if (!bb1
|| s1
== s2
)
1387 /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */
1393 /* PHIs in the same basic block are assumed to be
1394 executed all in parallel, if only one stmt is a PHI,
1395 it dominates the other stmt in the same basic block. */
1396 if (gimple_code (s1
) == GIMPLE_PHI
)
1399 if (gimple_code (s2
) == GIMPLE_PHI
)
1402 gcc_assert (gimple_uid (s1
) && gimple_uid (s2
));
1404 if (gimple_uid (s1
) < gimple_uid (s2
))
1407 if (gimple_uid (s1
) > gimple_uid (s2
))
1410 gimple_stmt_iterator gsi
= gsi_for_stmt (s1
);
1411 unsigned int uid
= gimple_uid (s1
);
1412 for (gsi_next (&gsi
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1414 gimple
*s
= gsi_stmt (gsi
);
1415 if (gimple_uid (s
) != uid
)
1424 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
1427 /* Insert STMT after INSERT_POINT. */
1430 insert_stmt_after (gimple
*stmt
, gimple
*insert_point
)
1432 gimple_stmt_iterator gsi
;
1435 if (gimple_code (insert_point
) == GIMPLE_PHI
)
1436 bb
= gimple_bb (insert_point
);
1437 else if (!stmt_ends_bb_p (insert_point
))
1439 gsi
= gsi_for_stmt (insert_point
);
1440 gimple_set_uid (stmt
, gimple_uid (insert_point
));
1441 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1445 /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME,
1446 thus if it must end a basic block, it should be a call that can
1447 throw, or some assignment that can throw. If it throws, the LHS
1448 of it will not be initialized though, so only valid places using
1449 the SSA_NAME should be dominated by the fallthru edge. */
1450 bb
= find_fallthru_edge (gimple_bb (insert_point
)->succs
)->dest
;
1451 gsi
= gsi_after_labels (bb
);
1452 if (gsi_end_p (gsi
))
1454 gimple_stmt_iterator gsi2
= gsi_last_bb (bb
);
1455 gimple_set_uid (stmt
,
1456 gsi_end_p (gsi2
) ? 1 : gimple_uid (gsi_stmt (gsi2
)));
1459 gimple_set_uid (stmt
, gimple_uid (gsi_stmt (gsi
)));
1460 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1463 /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for
1464 the result. Places the statement after the definition of either
1465 OP1 or OP2. Returns the new statement. */
1468 build_and_add_sum (tree type
, tree op1
, tree op2
, enum tree_code opcode
)
1470 gimple
*op1def
= NULL
, *op2def
= NULL
;
1471 gimple_stmt_iterator gsi
;
1475 /* Create the addition statement. */
1476 op
= make_ssa_name (type
);
1477 sum
= gimple_build_assign (op
, opcode
, op1
, op2
);
1479 /* Find an insertion place and insert. */
1480 if (TREE_CODE (op1
) == SSA_NAME
)
1481 op1def
= SSA_NAME_DEF_STMT (op1
);
1482 if (TREE_CODE (op2
) == SSA_NAME
)
1483 op2def
= SSA_NAME_DEF_STMT (op2
);
1484 if ((!op1def
|| gimple_nop_p (op1def
))
1485 && (!op2def
|| gimple_nop_p (op2def
)))
1487 gsi
= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
1488 if (gsi_end_p (gsi
))
1490 gimple_stmt_iterator gsi2
1491 = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
1492 gimple_set_uid (sum
,
1493 gsi_end_p (gsi2
) ? 1 : gimple_uid (gsi_stmt (gsi2
)));
1496 gimple_set_uid (sum
, gimple_uid (gsi_stmt (gsi
)));
1497 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1501 gimple
*insert_point
;
1502 if ((!op1def
|| gimple_nop_p (op1def
))
1503 || (op2def
&& !gimple_nop_p (op2def
)
1504 && reassoc_stmt_dominates_stmt_p (op1def
, op2def
)))
1505 insert_point
= op2def
;
1507 insert_point
= op1def
;
1508 insert_stmt_after (sum
, insert_point
);
1515 /* Perform un-distribution of divisions and multiplications.
1516 A * X + B * X is transformed into (A + B) * X and A / X + B / X
1517 to (A + B) / X for real X.
1519 The algorithm is organized as follows.
1521 - First we walk the addition chain *OPS looking for summands that
1522 are defined by a multiplication or a real division. This results
1523 in the candidates bitmap with relevant indices into *OPS.
1525 - Second we build the chains of multiplications or divisions for
1526 these candidates, counting the number of occurrences of (operand, code)
1527 pairs in all of the candidates chains.
1529 - Third we sort the (operand, code) pairs by number of occurrence and
1530 process them starting with the pair with the most uses.
1532 * For each such pair we walk the candidates again to build a
1533 second candidate bitmap noting all multiplication/division chains
1534 that have at least one occurrence of (operand, code).
1536 * We build an alternate addition chain only covering these
1537 candidates with one (operand, code) operation removed from their
1538 multiplication/division chain.
1540 * The first candidate gets replaced by the alternate addition chain
1541 multiplied/divided by the operand.
1543 * All candidate chains get disabled for further processing and
1544 processing of (operand, code) pairs continues.
1546 The alternate addition chains built are re-processed by the main
1547 reassociation algorithm which allows optimizing a * x * y + b * y * x
1548 to (a + b ) * x * y in one invocation of the reassociation pass. */
1551 undistribute_ops_list (enum tree_code opcode
,
1552 vec
<operand_entry
*> *ops
, struct loop
*loop
)
1554 unsigned int length
= ops
->length ();
1557 unsigned nr_candidates
, nr_candidates2
;
1558 sbitmap_iterator sbi0
;
1559 vec
<operand_entry
*> *subops
;
1560 bool changed
= false;
1561 unsigned int next_oecount_id
= 0;
1564 || opcode
!= PLUS_EXPR
)
1567 /* Build a list of candidates to process. */
1568 auto_sbitmap
candidates (length
);
1569 bitmap_clear (candidates
);
1571 FOR_EACH_VEC_ELT (*ops
, i
, oe1
)
1573 enum tree_code dcode
;
1576 if (TREE_CODE (oe1
->op
) != SSA_NAME
)
1578 oe1def
= SSA_NAME_DEF_STMT (oe1
->op
);
1579 if (!is_gimple_assign (oe1def
))
1581 dcode
= gimple_assign_rhs_code (oe1def
);
1582 if ((dcode
!= MULT_EXPR
1583 && dcode
!= RDIV_EXPR
)
1584 || !is_reassociable_op (oe1def
, dcode
, loop
))
1587 bitmap_set_bit (candidates
, i
);
1591 if (nr_candidates
< 2)
1594 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1596 fprintf (dump_file
, "searching for un-distribute opportunities ");
1597 print_generic_expr (dump_file
,
1598 (*ops
)[bitmap_first_set_bit (candidates
)]->op
, 0);
1599 fprintf (dump_file
, " %d\n", nr_candidates
);
1602 /* Build linearized sub-operand lists and the counting table. */
1605 hash_table
<oecount_hasher
> ctable (15);
1607 /* ??? Macro arguments cannot have multi-argument template types in
1608 them. This typedef is needed to workaround that limitation. */
1609 typedef vec
<operand_entry
*> vec_operand_entry_t_heap
;
1610 subops
= XCNEWVEC (vec_operand_entry_t_heap
, ops
->length ());
1611 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1614 enum tree_code oecode
;
1617 oedef
= SSA_NAME_DEF_STMT ((*ops
)[i
]->op
);
1618 oecode
= gimple_assign_rhs_code (oedef
);
1619 linearize_expr_tree (&subops
[i
], oedef
,
1620 associative_tree_code (oecode
), false);
1622 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1629 c
.id
= next_oecount_id
++;
1632 idx
= cvec
.length () + 41;
1633 slot
= ctable
.find_slot (idx
, INSERT
);
1641 cvec
[*slot
- 42].cnt
++;
1646 /* Sort the counting table. */
1647 cvec
.qsort (oecount_cmp
);
1649 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1652 fprintf (dump_file
, "Candidates:\n");
1653 FOR_EACH_VEC_ELT (cvec
, j
, c
)
1655 fprintf (dump_file
, " %u %s: ", c
->cnt
,
1656 c
->oecode
== MULT_EXPR
1657 ? "*" : c
->oecode
== RDIV_EXPR
? "/" : "?");
1658 print_generic_expr (dump_file
, c
->op
, 0);
1659 fprintf (dump_file
, "\n");
1663 /* Process the (operand, code) pairs in order of most occurrence. */
1664 auto_sbitmap
candidates2 (length
);
1665 while (!cvec
.is_empty ())
1667 oecount
*c
= &cvec
.last ();
1671 /* Now collect the operands in the outer chain that contain
1672 the common operand in their inner chain. */
1673 bitmap_clear (candidates2
);
1675 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1678 enum tree_code oecode
;
1680 tree op
= (*ops
)[i
]->op
;
1682 /* If we undistributed in this chain already this may be
1684 if (TREE_CODE (op
) != SSA_NAME
)
1687 oedef
= SSA_NAME_DEF_STMT (op
);
1688 oecode
= gimple_assign_rhs_code (oedef
);
1689 if (oecode
!= c
->oecode
)
1692 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1694 if (oe1
->op
== c
->op
)
1696 bitmap_set_bit (candidates2
, i
);
1703 if (nr_candidates2
>= 2)
1705 operand_entry
*oe1
, *oe2
;
1707 int first
= bitmap_first_set_bit (candidates2
);
1709 /* Build the new addition chain. */
1710 oe1
= (*ops
)[first
];
1711 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1713 fprintf (dump_file
, "Building (");
1714 print_generic_expr (dump_file
, oe1
->op
, 0);
1716 zero_one_operation (&oe1
->op
, c
->oecode
, c
->op
);
1717 EXECUTE_IF_SET_IN_BITMAP (candidates2
, first
+1, i
, sbi0
)
1721 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1723 fprintf (dump_file
, " + ");
1724 print_generic_expr (dump_file
, oe2
->op
, 0);
1726 zero_one_operation (&oe2
->op
, c
->oecode
, c
->op
);
1727 sum
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1728 oe1
->op
, oe2
->op
, opcode
);
1729 oe2
->op
= build_zero_cst (TREE_TYPE (oe2
->op
));
1731 oe1
->op
= gimple_get_lhs (sum
);
1734 /* Apply the multiplication/division. */
1735 prod
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1736 oe1
->op
, c
->op
, c
->oecode
);
1737 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1739 fprintf (dump_file
, ") %s ", c
->oecode
== MULT_EXPR
? "*" : "/");
1740 print_generic_expr (dump_file
, c
->op
, 0);
1741 fprintf (dump_file
, "\n");
1744 /* Record it in the addition chain and disable further
1745 undistribution with this op. */
1746 oe1
->op
= gimple_assign_lhs (prod
);
1747 oe1
->rank
= get_rank (oe1
->op
);
1748 subops
[first
].release ();
1756 for (i
= 0; i
< ops
->length (); ++i
)
1757 subops
[i
].release ();
1764 /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison
1765 expression, examine the other OPS to see if any of them are comparisons
1766 of the same values, which we may be able to combine or eliminate.
1767 For example, we can rewrite (a < b) | (a == b) as (a <= b). */
1770 eliminate_redundant_comparison (enum tree_code opcode
,
1771 vec
<operand_entry
*> *ops
,
1772 unsigned int currindex
,
1773 operand_entry
*curr
)
1776 enum tree_code lcode
, rcode
;
1777 gimple
*def1
, *def2
;
1781 if (opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
1784 /* Check that CURR is a comparison. */
1785 if (TREE_CODE (curr
->op
) != SSA_NAME
)
1787 def1
= SSA_NAME_DEF_STMT (curr
->op
);
1788 if (!is_gimple_assign (def1
))
1790 lcode
= gimple_assign_rhs_code (def1
);
1791 if (TREE_CODE_CLASS (lcode
) != tcc_comparison
)
1793 op1
= gimple_assign_rhs1 (def1
);
1794 op2
= gimple_assign_rhs2 (def1
);
1796 /* Now look for a similar comparison in the remaining OPS. */
1797 for (i
= currindex
+ 1; ops
->iterate (i
, &oe
); i
++)
1801 if (TREE_CODE (oe
->op
) != SSA_NAME
)
1803 def2
= SSA_NAME_DEF_STMT (oe
->op
);
1804 if (!is_gimple_assign (def2
))
1806 rcode
= gimple_assign_rhs_code (def2
);
1807 if (TREE_CODE_CLASS (rcode
) != tcc_comparison
)
1810 /* If we got here, we have a match. See if we can combine the
1812 if (opcode
== BIT_IOR_EXPR
)
1813 t
= maybe_fold_or_comparisons (lcode
, op1
, op2
,
1814 rcode
, gimple_assign_rhs1 (def2
),
1815 gimple_assign_rhs2 (def2
));
1817 t
= maybe_fold_and_comparisons (lcode
, op1
, op2
,
1818 rcode
, gimple_assign_rhs1 (def2
),
1819 gimple_assign_rhs2 (def2
));
1823 /* maybe_fold_and_comparisons and maybe_fold_or_comparisons
1824 always give us a boolean_type_node value back. If the original
1825 BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type,
1826 we need to convert. */
1827 if (!useless_type_conversion_p (TREE_TYPE (curr
->op
), TREE_TYPE (t
)))
1828 t
= fold_convert (TREE_TYPE (curr
->op
), t
);
1830 if (TREE_CODE (t
) != INTEGER_CST
1831 && !operand_equal_p (t
, curr
->op
, 0))
1833 enum tree_code subcode
;
1834 tree newop1
, newop2
;
1835 if (!COMPARISON_CLASS_P (t
))
1837 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1838 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1839 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1840 if (!is_gimple_val (newop1
) || !is_gimple_val (newop2
))
1844 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1846 fprintf (dump_file
, "Equivalence: ");
1847 print_generic_expr (dump_file
, curr
->op
, 0);
1848 fprintf (dump_file
, " %s ", op_symbol_code (opcode
));
1849 print_generic_expr (dump_file
, oe
->op
, 0);
1850 fprintf (dump_file
, " -> ");
1851 print_generic_expr (dump_file
, t
, 0);
1852 fprintf (dump_file
, "\n");
1855 /* Now we can delete oe, as it has been subsumed by the new combined
1857 ops
->ordered_remove (i
);
1858 reassociate_stats
.ops_eliminated
++;
1860 /* If t is the same as curr->op, we're done. Otherwise we must
1861 replace curr->op with t. Special case is if we got a constant
1862 back, in which case we add it to the end instead of in place of
1863 the current entry. */
1864 if (TREE_CODE (t
) == INTEGER_CST
)
1866 ops
->ordered_remove (currindex
);
1867 add_to_ops_vec (ops
, t
);
1869 else if (!operand_equal_p (t
, curr
->op
, 0))
1872 enum tree_code subcode
;
1875 gcc_assert (COMPARISON_CLASS_P (t
));
1876 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1877 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1878 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1879 gcc_checking_assert (is_gimple_val (newop1
)
1880 && is_gimple_val (newop2
));
1881 sum
= build_and_add_sum (TREE_TYPE (t
), newop1
, newop2
, subcode
);
1882 curr
->op
= gimple_get_lhs (sum
);
1891 /* Transform repeated addition of same values into multiply with
1894 transform_add_to_multiply (vec
<operand_entry
*> *ops
)
1897 tree op
= NULL_TREE
;
1899 int i
, start
= -1, end
= 0, count
= 0;
1900 auto_vec
<std::pair
<int, int> > indxs
;
1901 bool changed
= false;
1903 if (!INTEGRAL_TYPE_P (TREE_TYPE ((*ops
)[0]->op
))
1904 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE ((*ops
)[0]->op
))
1905 || !flag_unsafe_math_optimizations
))
1908 /* Look for repeated operands. */
1909 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
1917 else if (operand_equal_p (oe
->op
, op
, 0))
1925 indxs
.safe_push (std::make_pair (start
, end
));
1933 indxs
.safe_push (std::make_pair (start
, end
));
1935 for (j
= indxs
.length () - 1; j
>= 0; --j
)
1937 /* Convert repeated operand addition to multiplication. */
1938 start
= indxs
[j
].first
;
1939 end
= indxs
[j
].second
;
1940 op
= (*ops
)[start
]->op
;
1941 count
= end
- start
+ 1;
1942 for (i
= end
; i
>= start
; --i
)
1943 ops
->unordered_remove (i
);
1944 tree tmp
= make_ssa_name (TREE_TYPE (op
));
1945 tree cst
= build_int_cst (integer_type_node
, count
);
1947 = gimple_build_assign (tmp
, MULT_EXPR
,
1948 op
, fold_convert (TREE_TYPE (op
), cst
));
1949 gimple_set_visited (mul_stmt
, true);
1950 add_to_ops_vec (ops
, tmp
, mul_stmt
);
1958 /* Perform various identities and other optimizations on the list of
1959 operand entries, stored in OPS. The tree code for the binary
1960 operation between all the operands is OPCODE. */
1963 optimize_ops_list (enum tree_code opcode
,
1964 vec
<operand_entry
*> *ops
)
1966 unsigned int length
= ops
->length ();
1969 operand_entry
*oelast
= NULL
;
1970 bool iterate
= false;
1975 oelast
= ops
->last ();
1977 /* If the last two are constants, pop the constants off, merge them
1978 and try the next two. */
1979 if (oelast
->rank
== 0 && is_gimple_min_invariant (oelast
->op
))
1981 operand_entry
*oelm1
= (*ops
)[length
- 2];
1983 if (oelm1
->rank
== 0
1984 && is_gimple_min_invariant (oelm1
->op
)
1985 && useless_type_conversion_p (TREE_TYPE (oelm1
->op
),
1986 TREE_TYPE (oelast
->op
)))
1988 tree folded
= fold_binary (opcode
, TREE_TYPE (oelm1
->op
),
1989 oelm1
->op
, oelast
->op
);
1991 if (folded
&& is_gimple_min_invariant (folded
))
1993 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1994 fprintf (dump_file
, "Merging constants\n");
1999 add_to_ops_vec (ops
, folded
);
2000 reassociate_stats
.constants_eliminated
++;
2002 optimize_ops_list (opcode
, ops
);
2008 eliminate_using_constants (opcode
, ops
);
2011 for (i
= 0; ops
->iterate (i
, &oe
);)
2015 if (eliminate_not_pairs (opcode
, ops
, i
, oe
))
2017 if (eliminate_duplicate_pair (opcode
, ops
, &done
, i
, oe
, oelast
)
2018 || (!done
&& eliminate_plus_minus_pair (opcode
, ops
, i
, oe
))
2019 || (!done
&& eliminate_redundant_comparison (opcode
, ops
, i
, oe
)))
2031 length
= ops
->length ();
2032 oelast
= ops
->last ();
2035 optimize_ops_list (opcode
, ops
);
2038 /* The following functions are subroutines to optimize_range_tests and allow
2039 it to try to change a logical combination of comparisons into a range
2043 X == 2 || X == 5 || X == 3 || X == 4
2047 (unsigned) (X - 2) <= 3
2049 For more information see comments above fold_test_range in fold-const.c,
2050 this implementation is for GIMPLE. */
2058 bool strict_overflow_p
;
2059 unsigned int idx
, next
;
2062 /* This is similar to make_range in fold-const.c, but on top of
2063 GIMPLE instead of trees. If EXP is non-NULL, it should be
2064 an SSA_NAME and STMT argument is ignored, otherwise STMT
2065 argument should be a GIMPLE_COND. */
2068 init_range_entry (struct range_entry
*r
, tree exp
, gimple
*stmt
)
2072 bool is_bool
, strict_overflow_p
;
2076 r
->strict_overflow_p
= false;
2078 r
->high
= NULL_TREE
;
2079 if (exp
!= NULL_TREE
2080 && (TREE_CODE (exp
) != SSA_NAME
|| !INTEGRAL_TYPE_P (TREE_TYPE (exp
))))
2083 /* Start with simply saying "EXP != 0" and then look at the code of EXP
2084 and see if we can refine the range. Some of the cases below may not
2085 happen, but it doesn't seem worth worrying about this. We "continue"
2086 the outer loop when we've changed something; otherwise we "break"
2087 the switch, which will "break" the while. */
2088 low
= exp
? build_int_cst (TREE_TYPE (exp
), 0) : boolean_false_node
;
2091 strict_overflow_p
= false;
2093 if (exp
== NULL_TREE
)
2095 else if (TYPE_PRECISION (TREE_TYPE (exp
)) == 1)
2097 if (TYPE_UNSIGNED (TREE_TYPE (exp
)))
2102 else if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
)
2107 enum tree_code code
;
2108 tree arg0
, arg1
, exp_type
;
2112 if (exp
!= NULL_TREE
)
2114 if (TREE_CODE (exp
) != SSA_NAME
2115 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
))
2118 stmt
= SSA_NAME_DEF_STMT (exp
);
2119 if (!is_gimple_assign (stmt
))
2122 code
= gimple_assign_rhs_code (stmt
);
2123 arg0
= gimple_assign_rhs1 (stmt
);
2124 arg1
= gimple_assign_rhs2 (stmt
);
2125 exp_type
= TREE_TYPE (exp
);
2129 code
= gimple_cond_code (stmt
);
2130 arg0
= gimple_cond_lhs (stmt
);
2131 arg1
= gimple_cond_rhs (stmt
);
2132 exp_type
= boolean_type_node
;
2135 if (TREE_CODE (arg0
) != SSA_NAME
)
2137 loc
= gimple_location (stmt
);
2141 if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
2142 /* Ensure the range is either +[-,0], +[0,0],
2143 -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or
2144 -[1,1]. If it is e.g. +[-,-] or -[-,-]
2145 or similar expression of unconditional true or
2146 false, it should not be negated. */
2147 && ((high
&& integer_zerop (high
))
2148 || (low
&& integer_onep (low
))))
2161 if (TYPE_PRECISION (TREE_TYPE (arg0
)) == 1)
2163 if (TYPE_UNSIGNED (TREE_TYPE (arg0
)))
2168 else if (TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
)
2183 nexp
= make_range_step (loc
, code
, arg0
, arg1
, exp_type
,
2185 &strict_overflow_p
);
2186 if (nexp
!= NULL_TREE
)
2189 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
2202 r
->strict_overflow_p
= strict_overflow_p
;
2206 /* Comparison function for qsort. Sort entries
2207 without SSA_NAME exp first, then with SSA_NAMEs sorted
2208 by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs
2209 by increasing ->low and if ->low is the same, by increasing
2210 ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE
2214 range_entry_cmp (const void *a
, const void *b
)
2216 const struct range_entry
*p
= (const struct range_entry
*) a
;
2217 const struct range_entry
*q
= (const struct range_entry
*) b
;
2219 if (p
->exp
!= NULL_TREE
&& TREE_CODE (p
->exp
) == SSA_NAME
)
2221 if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
2223 /* Group range_entries for the same SSA_NAME together. */
2224 if (SSA_NAME_VERSION (p
->exp
) < SSA_NAME_VERSION (q
->exp
))
2226 else if (SSA_NAME_VERSION (p
->exp
) > SSA_NAME_VERSION (q
->exp
))
2228 /* If ->low is different, NULL low goes first, then by
2230 if (p
->low
!= NULL_TREE
)
2232 if (q
->low
!= NULL_TREE
)
2234 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
2236 if (tem
&& integer_onep (tem
))
2238 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2240 if (tem
&& integer_onep (tem
))
2246 else if (q
->low
!= NULL_TREE
)
2248 /* If ->high is different, NULL high goes last, before that by
2250 if (p
->high
!= NULL_TREE
)
2252 if (q
->high
!= NULL_TREE
)
2254 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
2256 if (tem
&& integer_onep (tem
))
2258 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2260 if (tem
&& integer_onep (tem
))
2266 else if (q
->high
!= NULL_TREE
)
2268 /* If both ranges are the same, sort below by ascending idx. */
2273 else if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
2276 if (p
->idx
< q
->idx
)
2280 gcc_checking_assert (p
->idx
> q
->idx
);
2285 /* Helper routine of optimize_range_test.
2286 [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for
2287 RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges,
2288 OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE
2289 is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to
2290 an array of COUNT pointers to other ranges. Return
2291 true if the range merge has been successful.
2292 If OPCODE is ERROR_MARK, this is called from within
2293 maybe_optimize_range_tests and is performing inter-bb range optimization.
2294 In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in
2298 update_range_test (struct range_entry
*range
, struct range_entry
*otherrange
,
2299 struct range_entry
**otherrangep
,
2300 unsigned int count
, enum tree_code opcode
,
2301 vec
<operand_entry
*> *ops
, tree exp
, gimple_seq seq
,
2302 bool in_p
, tree low
, tree high
, bool strict_overflow_p
)
2304 operand_entry
*oe
= (*ops
)[range
->idx
];
2306 gimple
*stmt
= op
? SSA_NAME_DEF_STMT (op
)
2307 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2308 location_t loc
= gimple_location (stmt
);
2309 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
2310 tree tem
= build_range_check (loc
, optype
, unshare_expr (exp
),
2312 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
2313 gimple_stmt_iterator gsi
;
2314 unsigned int i
, uid
;
2316 if (tem
== NULL_TREE
)
2319 /* If op is default def SSA_NAME, there is no place to insert the
2320 new comparison. Give up, unless we can use OP itself as the
2322 if (op
&& SSA_NAME_IS_DEFAULT_DEF (op
))
2324 if (op
== range
->exp
2325 && ((TYPE_PRECISION (optype
) == 1 && TYPE_UNSIGNED (optype
))
2326 || TREE_CODE (optype
) == BOOLEAN_TYPE
)
2328 || (TREE_CODE (tem
) == EQ_EXPR
2329 && TREE_OPERAND (tem
, 0) == op
2330 && integer_onep (TREE_OPERAND (tem
, 1))))
2331 && opcode
!= BIT_IOR_EXPR
2332 && (opcode
!= ERROR_MARK
|| oe
->rank
!= BIT_IOR_EXPR
))
2341 if (strict_overflow_p
&& issue_strict_overflow_warning (wc
))
2342 warning_at (loc
, OPT_Wstrict_overflow
,
2343 "assuming signed overflow does not occur "
2344 "when simplifying range test");
2346 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2348 struct range_entry
*r
;
2349 fprintf (dump_file
, "Optimizing range tests ");
2350 print_generic_expr (dump_file
, range
->exp
, 0);
2351 fprintf (dump_file
, " %c[", range
->in_p
? '+' : '-');
2352 print_generic_expr (dump_file
, range
->low
, 0);
2353 fprintf (dump_file
, ", ");
2354 print_generic_expr (dump_file
, range
->high
, 0);
2355 fprintf (dump_file
, "]");
2356 for (i
= 0; i
< count
; i
++)
2362 fprintf (dump_file
, " and %c[", r
->in_p
? '+' : '-');
2363 print_generic_expr (dump_file
, r
->low
, 0);
2364 fprintf (dump_file
, ", ");
2365 print_generic_expr (dump_file
, r
->high
, 0);
2366 fprintf (dump_file
, "]");
2368 fprintf (dump_file
, "\n into ");
2369 print_generic_expr (dump_file
, tem
, 0);
2370 fprintf (dump_file
, "\n");
2373 if (opcode
== BIT_IOR_EXPR
2374 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
2375 tem
= invert_truthvalue_loc (loc
, tem
);
2377 tem
= fold_convert_loc (loc
, optype
, tem
);
2380 gsi
= gsi_for_stmt (stmt
);
2381 uid
= gimple_uid (stmt
);
2389 gcc_checking_assert (tem
== op
);
2390 /* In rare cases range->exp can be equal to lhs of stmt.
2391 In that case we have to insert after the stmt rather then before
2392 it. If stmt is a PHI, insert it at the start of the basic block. */
2393 else if (op
!= range
->exp
)
2395 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2396 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, true,
2400 else if (gimple_code (stmt
) != GIMPLE_PHI
)
2402 gsi_insert_seq_after (&gsi
, seq
, GSI_CONTINUE_LINKING
);
2403 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, false,
2404 GSI_CONTINUE_LINKING
);
2408 gsi
= gsi_after_labels (gimple_bb (stmt
));
2409 if (!gsi_end_p (gsi
))
2410 uid
= gimple_uid (gsi_stmt (gsi
));
2413 gsi
= gsi_start_bb (gimple_bb (stmt
));
2415 while (!gsi_end_p (gsi
))
2417 uid
= gimple_uid (gsi_stmt (gsi
));
2421 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2422 tem
= force_gimple_operand_gsi (&gsi
, tem
, true, NULL_TREE
, true,
2424 if (gsi_end_p (gsi
))
2425 gsi
= gsi_last_bb (gimple_bb (stmt
));
2429 for (; !gsi_end_p (gsi
); gsi_prev (&gsi
))
2430 if (gimple_uid (gsi_stmt (gsi
)))
2433 gimple_set_uid (gsi_stmt (gsi
), uid
);
2440 range
->strict_overflow_p
= false;
2442 for (i
= 0; i
< count
; i
++)
2445 range
= otherrange
+ i
;
2447 range
= otherrangep
[i
];
2448 oe
= (*ops
)[range
->idx
];
2449 /* Now change all the other range test immediate uses, so that
2450 those tests will be optimized away. */
2451 if (opcode
== ERROR_MARK
)
2454 oe
->op
= build_int_cst (TREE_TYPE (oe
->op
),
2455 oe
->rank
== BIT_IOR_EXPR
? 0 : 1);
2457 oe
->op
= (oe
->rank
== BIT_IOR_EXPR
2458 ? boolean_false_node
: boolean_true_node
);
2461 oe
->op
= error_mark_node
;
2462 range
->exp
= NULL_TREE
;
2463 range
->low
= NULL_TREE
;
2464 range
->high
= NULL_TREE
;
2469 /* Optimize X == CST1 || X == CST2
2470 if popcount (CST1 ^ CST2) == 1 into
2471 (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)).
2472 Similarly for ranges. E.g.
2473 X != 2 && X != 3 && X != 10 && X != 11
2474 will be transformed by the previous optimization into
2475 !((X - 2U) <= 1U || (X - 10U) <= 1U)
2476 and this loop can transform that into
2477 !(((X & ~8) - 2U) <= 1U). */
2480 optimize_range_tests_xor (enum tree_code opcode
, tree type
,
2481 tree lowi
, tree lowj
, tree highi
, tree highj
,
2482 vec
<operand_entry
*> *ops
,
2483 struct range_entry
*rangei
,
2484 struct range_entry
*rangej
)
2486 tree lowxor
, highxor
, tem
, exp
;
2487 /* Check lowi ^ lowj == highi ^ highj and
2488 popcount (lowi ^ lowj) == 1. */
2489 lowxor
= fold_binary (BIT_XOR_EXPR
, type
, lowi
, lowj
);
2490 if (lowxor
== NULL_TREE
|| TREE_CODE (lowxor
) != INTEGER_CST
)
2492 if (!integer_pow2p (lowxor
))
2494 highxor
= fold_binary (BIT_XOR_EXPR
, type
, highi
, highj
);
2495 if (!tree_int_cst_equal (lowxor
, highxor
))
2498 tem
= fold_build1 (BIT_NOT_EXPR
, type
, lowxor
);
2499 exp
= fold_build2 (BIT_AND_EXPR
, type
, rangei
->exp
, tem
);
2500 lowj
= fold_build2 (BIT_AND_EXPR
, type
, lowi
, tem
);
2501 highj
= fold_build2 (BIT_AND_EXPR
, type
, highi
, tem
);
2502 if (update_range_test (rangei
, rangej
, NULL
, 1, opcode
, ops
, exp
,
2503 NULL
, rangei
->in_p
, lowj
, highj
,
2504 rangei
->strict_overflow_p
2505 || rangej
->strict_overflow_p
))
2510 /* Optimize X == CST1 || X == CST2
2511 if popcount (CST2 - CST1) == 1 into
2512 ((X - CST1) & ~(CST2 - CST1)) == 0.
2513 Similarly for ranges. E.g.
2514 X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46
2515 || X == 75 || X == 45
2516 will be transformed by the previous optimization into
2517 (X - 43U) <= 3U || (X - 75U) <= 3U
2518 and this loop can transform that into
2519 ((X - 43U) & ~(75U - 43U)) <= 3U. */
2521 optimize_range_tests_diff (enum tree_code opcode
, tree type
,
2522 tree lowi
, tree lowj
, tree highi
, tree highj
,
2523 vec
<operand_entry
*> *ops
,
2524 struct range_entry
*rangei
,
2525 struct range_entry
*rangej
)
2527 tree tem1
, tem2
, mask
;
2528 /* Check highi - lowi == highj - lowj. */
2529 tem1
= fold_binary (MINUS_EXPR
, type
, highi
, lowi
);
2530 if (tem1
== NULL_TREE
|| TREE_CODE (tem1
) != INTEGER_CST
)
2532 tem2
= fold_binary (MINUS_EXPR
, type
, highj
, lowj
);
2533 if (!tree_int_cst_equal (tem1
, tem2
))
2535 /* Check popcount (lowj - lowi) == 1. */
2536 tem1
= fold_binary (MINUS_EXPR
, type
, lowj
, lowi
);
2537 if (tem1
== NULL_TREE
|| TREE_CODE (tem1
) != INTEGER_CST
)
2539 if (!integer_pow2p (tem1
))
2542 type
= unsigned_type_for (type
);
2543 tem1
= fold_convert (type
, tem1
);
2544 tem2
= fold_convert (type
, tem2
);
2545 lowi
= fold_convert (type
, lowi
);
2546 mask
= fold_build1 (BIT_NOT_EXPR
, type
, tem1
);
2547 tem1
= fold_binary (MINUS_EXPR
, type
,
2548 fold_convert (type
, rangei
->exp
), lowi
);
2549 tem1
= fold_build2 (BIT_AND_EXPR
, type
, tem1
, mask
);
2550 lowj
= build_int_cst (type
, 0);
2551 if (update_range_test (rangei
, rangej
, NULL
, 1, opcode
, ops
, tem1
,
2552 NULL
, rangei
->in_p
, lowj
, tem2
,
2553 rangei
->strict_overflow_p
2554 || rangej
->strict_overflow_p
))
2559 /* It does some common checks for function optimize_range_tests_xor and
2560 optimize_range_tests_diff.
2561 If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor.
2562 Else it calls optimize_range_tests_diff. */
2565 optimize_range_tests_1 (enum tree_code opcode
, int first
, int length
,
2566 bool optimize_xor
, vec
<operand_entry
*> *ops
,
2567 struct range_entry
*ranges
)
2570 bool any_changes
= false;
2571 for (i
= first
; i
< length
; i
++)
2573 tree lowi
, highi
, lowj
, highj
, type
, tem
;
2575 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2577 type
= TREE_TYPE (ranges
[i
].exp
);
2578 if (!INTEGRAL_TYPE_P (type
))
2580 lowi
= ranges
[i
].low
;
2581 if (lowi
== NULL_TREE
)
2582 lowi
= TYPE_MIN_VALUE (type
);
2583 highi
= ranges
[i
].high
;
2584 if (highi
== NULL_TREE
)
2586 for (j
= i
+ 1; j
< length
&& j
< i
+ 64; j
++)
2589 if (ranges
[i
].exp
!= ranges
[j
].exp
|| ranges
[j
].in_p
)
2591 lowj
= ranges
[j
].low
;
2592 if (lowj
== NULL_TREE
)
2594 highj
= ranges
[j
].high
;
2595 if (highj
== NULL_TREE
)
2596 highj
= TYPE_MAX_VALUE (type
);
2597 /* Check lowj > highi. */
2598 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2600 if (tem
== NULL_TREE
|| !integer_onep (tem
))
2603 changes
= optimize_range_tests_xor (opcode
, type
, lowi
, lowj
,
2605 ranges
+ i
, ranges
+ j
);
2607 changes
= optimize_range_tests_diff (opcode
, type
, lowi
, lowj
,
2609 ranges
+ i
, ranges
+ j
);
2620 /* Helper function of optimize_range_tests_to_bit_test. Handle a single
2621 range, EXP, LOW, HIGH, compute bit mask of bits to test and return
2622 EXP on success, NULL otherwise. */
2625 extract_bit_test_mask (tree exp
, int prec
, tree totallow
, tree low
, tree high
,
2626 wide_int
*mask
, tree
*totallowp
)
2628 tree tem
= int_const_binop (MINUS_EXPR
, high
, low
);
2629 if (tem
== NULL_TREE
2630 || TREE_CODE (tem
) != INTEGER_CST
2631 || TREE_OVERFLOW (tem
)
2632 || tree_int_cst_sgn (tem
) == -1
2633 || compare_tree_int (tem
, prec
) != -1)
2636 unsigned HOST_WIDE_INT max
= tree_to_uhwi (tem
) + 1;
2637 *mask
= wi::shifted_mask (0, max
, false, prec
);
2638 if (TREE_CODE (exp
) == BIT_AND_EXPR
2639 && TREE_CODE (TREE_OPERAND (exp
, 1)) == INTEGER_CST
)
2641 widest_int msk
= wi::to_widest (TREE_OPERAND (exp
, 1));
2642 msk
= wi::zext (~msk
, TYPE_PRECISION (TREE_TYPE (exp
)));
2643 if (wi::popcount (msk
) == 1
2644 && wi::ltu_p (msk
, prec
- max
))
2646 *mask
|= wi::shifted_mask (msk
.to_uhwi (), max
, false, prec
);
2647 max
+= msk
.to_uhwi ();
2648 exp
= TREE_OPERAND (exp
, 0);
2649 if (integer_zerop (low
)
2650 && TREE_CODE (exp
) == PLUS_EXPR
2651 && TREE_CODE (TREE_OPERAND (exp
, 1)) == INTEGER_CST
)
2653 tree ret
= TREE_OPERAND (exp
, 0);
2656 = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp
, 1)),
2657 TYPE_PRECISION (TREE_TYPE (low
))));
2658 tree tbias
= wide_int_to_tree (TREE_TYPE (ret
), bias
);
2664 else if (!tree_int_cst_lt (totallow
, tbias
))
2666 bias
= wi::to_widest (tbias
);
2667 bias
-= wi::to_widest (totallow
);
2668 if (bias
>= 0 && bias
< prec
- max
)
2670 *mask
= wi::lshift (*mask
, bias
);
2678 if (!tree_int_cst_lt (totallow
, low
))
2680 tem
= int_const_binop (MINUS_EXPR
, low
, totallow
);
2681 if (tem
== NULL_TREE
2682 || TREE_CODE (tem
) != INTEGER_CST
2683 || TREE_OVERFLOW (tem
)
2684 || compare_tree_int (tem
, prec
- max
) == 1)
2687 *mask
= wi::lshift (*mask
, wi::to_widest (tem
));
2691 /* Attempt to optimize small range tests using bit test.
2693 X != 43 && X != 76 && X != 44 && X != 78 && X != 49
2694 && X != 77 && X != 46 && X != 75 && X != 45 && X != 82
2695 has been by earlier optimizations optimized into:
2696 ((X - 43U) & ~32U) > 3U && X != 49 && X != 82
2697 As all the 43 through 82 range is less than 64 numbers,
2698 for 64-bit word targets optimize that into:
2699 (X - 43U) > 40U && ((1 << (X - 43U)) & 0x8F0000004FULL) == 0 */
2702 optimize_range_tests_to_bit_test (enum tree_code opcode
, int first
, int length
,
2703 vec
<operand_entry
*> *ops
,
2704 struct range_entry
*ranges
)
2707 bool any_changes
= false;
2708 int prec
= GET_MODE_BITSIZE (word_mode
);
2709 auto_vec
<struct range_entry
*, 64> candidates
;
2711 for (i
= first
; i
< length
- 2; i
++)
2713 tree lowi
, highi
, lowj
, highj
, type
;
2715 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2717 type
= TREE_TYPE (ranges
[i
].exp
);
2718 if (!INTEGRAL_TYPE_P (type
))
2720 lowi
= ranges
[i
].low
;
2721 if (lowi
== NULL_TREE
)
2722 lowi
= TYPE_MIN_VALUE (type
);
2723 highi
= ranges
[i
].high
;
2724 if (highi
== NULL_TREE
)
2727 tree exp
= extract_bit_test_mask (ranges
[i
].exp
, prec
, lowi
, lowi
,
2728 highi
, &mask
, &lowi
);
2729 if (exp
== NULL_TREE
)
2731 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
2732 candidates
.truncate (0);
2733 int end
= MIN (i
+ 64, length
);
2734 for (j
= i
+ 1; j
< end
; j
++)
2737 if (ranges
[j
].exp
== NULL_TREE
|| ranges
[j
].in_p
)
2739 if (ranges
[j
].exp
== exp
)
2741 else if (TREE_CODE (ranges
[j
].exp
) == BIT_AND_EXPR
)
2743 exp2
= TREE_OPERAND (ranges
[j
].exp
, 0);
2746 else if (TREE_CODE (exp2
) == PLUS_EXPR
)
2748 exp2
= TREE_OPERAND (exp2
, 0);
2758 lowj
= ranges
[j
].low
;
2759 if (lowj
== NULL_TREE
)
2761 highj
= ranges
[j
].high
;
2762 if (highj
== NULL_TREE
)
2763 highj
= TYPE_MAX_VALUE (type
);
2765 exp2
= extract_bit_test_mask (ranges
[j
].exp
, prec
, lowi
, lowj
,
2766 highj
, &mask2
, NULL
);
2770 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
2771 candidates
.safe_push (&ranges
[j
]);
2774 /* If we need otherwise 3 or more comparisons, use a bit test. */
2775 if (candidates
.length () >= 2)
2777 tree high
= wide_int_to_tree (TREE_TYPE (lowi
),
2778 wi::to_widest (lowi
)
2779 + prec
- 1 - wi::clz (mask
));
2780 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
2782 gimple
*stmt
= op
? SSA_NAME_DEF_STMT (op
)
2783 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2784 location_t loc
= gimple_location (stmt
);
2785 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
2787 /* See if it isn't cheaper to pretend the minimum value of the
2788 range is 0, if maximum value is small enough.
2789 We can avoid then subtraction of the minimum value, but the
2790 mask constant could be perhaps more expensive. */
2791 if (compare_tree_int (lowi
, 0) > 0
2792 && compare_tree_int (high
, prec
) < 0)
2795 HOST_WIDE_INT m
= tree_to_uhwi (lowi
);
2796 rtx reg
= gen_raw_REG (word_mode
, 10000);
2797 bool speed_p
= optimize_bb_for_speed_p (gimple_bb (stmt
));
2798 cost_diff
= set_rtx_cost (gen_rtx_PLUS (word_mode
, reg
,
2799 GEN_INT (-m
)), speed_p
);
2800 rtx r
= immed_wide_int_const (mask
, word_mode
);
2801 cost_diff
+= set_src_cost (gen_rtx_AND (word_mode
, reg
, r
),
2802 word_mode
, speed_p
);
2803 r
= immed_wide_int_const (wi::lshift (mask
, m
), word_mode
);
2804 cost_diff
-= set_src_cost (gen_rtx_AND (word_mode
, reg
, r
),
2805 word_mode
, speed_p
);
2808 mask
= wi::lshift (mask
, m
);
2809 lowi
= build_zero_cst (TREE_TYPE (lowi
));
2813 tree tem
= build_range_check (loc
, optype
, unshare_expr (exp
),
2815 if (tem
== NULL_TREE
|| is_gimple_val (tem
))
2817 tree etype
= unsigned_type_for (TREE_TYPE (exp
));
2818 exp
= fold_build2_loc (loc
, MINUS_EXPR
, etype
,
2819 fold_convert_loc (loc
, etype
, exp
),
2820 fold_convert_loc (loc
, etype
, lowi
));
2821 exp
= fold_convert_loc (loc
, integer_type_node
, exp
);
2822 tree word_type
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
2823 exp
= fold_build2_loc (loc
, LSHIFT_EXPR
, word_type
,
2824 build_int_cst (word_type
, 1), exp
);
2825 exp
= fold_build2_loc (loc
, BIT_AND_EXPR
, word_type
, exp
,
2826 wide_int_to_tree (word_type
, mask
));
2827 exp
= fold_build2_loc (loc
, EQ_EXPR
, optype
, exp
,
2828 build_zero_cst (word_type
));
2829 if (is_gimple_val (exp
))
2832 /* The shift might have undefined behavior if TEM is true,
2833 but reassociate_bb isn't prepared to have basic blocks
2834 split when it is running. So, temporarily emit a code
2835 with BIT_IOR_EXPR instead of &&, and fix it up in
2838 tem
= force_gimple_operand (tem
, &seq
, true, NULL_TREE
);
2839 gcc_assert (TREE_CODE (tem
) == SSA_NAME
);
2840 gimple_set_visited (SSA_NAME_DEF_STMT (tem
), true);
2842 exp
= force_gimple_operand (exp
, &seq2
, true, NULL_TREE
);
2843 gimple_seq_add_seq_without_update (&seq
, seq2
);
2844 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
2845 gimple_set_visited (SSA_NAME_DEF_STMT (exp
), true);
2846 gimple
*g
= gimple_build_assign (make_ssa_name (optype
),
2847 BIT_IOR_EXPR
, tem
, exp
);
2848 gimple_set_location (g
, loc
);
2849 gimple_seq_add_stmt_without_update (&seq
, g
);
2850 exp
= gimple_assign_lhs (g
);
2851 tree val
= build_zero_cst (optype
);
2852 if (update_range_test (&ranges
[i
], NULL
, candidates
.address (),
2853 candidates
.length (), opcode
, ops
, exp
,
2854 seq
, false, val
, val
, strict_overflow_p
))
2857 reassoc_branch_fixups
.safe_push (tem
);
2860 gimple_seq_discard (seq
);
2866 /* Attempt to optimize for signed a and b where b is known to be >= 0:
2867 a >= 0 && a < b into (unsigned) a < (unsigned) b
2868 a >= 0 && a <= b into (unsigned) a <= (unsigned) b */
2871 optimize_range_tests_var_bound (enum tree_code opcode
, int first
, int length
,
2872 vec
<operand_entry
*> *ops
,
2873 struct range_entry
*ranges
)
2876 bool any_changes
= false;
2877 hash_map
<tree
, int> *map
= NULL
;
2879 for (i
= first
; i
< length
; i
++)
2881 if (ranges
[i
].exp
== NULL_TREE
2882 || TREE_CODE (ranges
[i
].exp
) != SSA_NAME
2886 tree type
= TREE_TYPE (ranges
[i
].exp
);
2887 if (!INTEGRAL_TYPE_P (type
)
2888 || TYPE_UNSIGNED (type
)
2889 || ranges
[i
].low
== NULL_TREE
2890 || !integer_zerop (ranges
[i
].low
)
2891 || ranges
[i
].high
!= NULL_TREE
)
2893 /* EXP >= 0 here. */
2895 map
= new hash_map
<tree
, int>;
2896 map
->put (ranges
[i
].exp
, i
);
2902 for (i
= 0; i
< length
; i
++)
2904 if (ranges
[i
].low
== NULL_TREE
2905 || ranges
[i
].high
== NULL_TREE
2906 || !integer_zerop (ranges
[i
].low
)
2907 || !integer_zerop (ranges
[i
].high
))
2915 if (TREE_CODE (ranges
[i
].exp
) != SSA_NAME
)
2917 stmt
= SSA_NAME_DEF_STMT (ranges
[i
].exp
);
2918 if (!is_gimple_assign (stmt
))
2920 ccode
= gimple_assign_rhs_code (stmt
);
2921 rhs1
= gimple_assign_rhs1 (stmt
);
2922 rhs2
= gimple_assign_rhs2 (stmt
);
2926 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
2927 stmt
= last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2928 if (gimple_code (stmt
) != GIMPLE_COND
)
2930 ccode
= gimple_cond_code (stmt
);
2931 rhs1
= gimple_cond_lhs (stmt
);
2932 rhs2
= gimple_cond_rhs (stmt
);
2935 if (TREE_CODE (rhs1
) != SSA_NAME
2936 || rhs2
== NULL_TREE
2937 || TREE_CODE (rhs2
) != SSA_NAME
)
2951 ccode
= invert_tree_comparison (ccode
, false);
2956 std::swap (rhs1
, rhs2
);
2957 ccode
= swap_tree_comparison (ccode
);
2966 int *idx
= map
->get (rhs1
);
2970 wide_int nz
= get_nonzero_bits (rhs2
);
2974 /* We have EXP < RHS2 or EXP <= RHS2 where EXP >= 0
2975 and RHS2 is known to be RHS2 >= 0. */
2976 tree utype
= unsigned_type_for (TREE_TYPE (rhs1
));
2978 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
2979 if ((ranges
[*idx
].strict_overflow_p
2980 || ranges
[i
].strict_overflow_p
)
2981 && issue_strict_overflow_warning (wc
))
2982 warning_at (gimple_location (stmt
), OPT_Wstrict_overflow
,
2983 "assuming signed overflow does not occur "
2984 "when simplifying range test");
2986 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2988 struct range_entry
*r
= &ranges
[*idx
];
2989 fprintf (dump_file
, "Optimizing range test ");
2990 print_generic_expr (dump_file
, r
->exp
, 0);
2991 fprintf (dump_file
, " +[");
2992 print_generic_expr (dump_file
, r
->low
, 0);
2993 fprintf (dump_file
, ", ");
2994 print_generic_expr (dump_file
, r
->high
, 0);
2995 fprintf (dump_file
, "] and comparison ");
2996 print_generic_expr (dump_file
, rhs1
, 0);
2997 fprintf (dump_file
, " %s ", op_symbol_code (ccode
));
2998 print_generic_expr (dump_file
, rhs2
, 0);
2999 fprintf (dump_file
, "\n into (");
3000 print_generic_expr (dump_file
, utype
, 0);
3001 fprintf (dump_file
, ") ");
3002 print_generic_expr (dump_file
, rhs1
, 0);
3003 fprintf (dump_file
, " %s (", op_symbol_code (ccode
));
3004 print_generic_expr (dump_file
, utype
, 0);
3005 fprintf (dump_file
, ") ");
3006 print_generic_expr (dump_file
, rhs2
, 0);
3007 fprintf (dump_file
, "\n");
3010 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
3012 if (opcode
== BIT_IOR_EXPR
3013 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
3016 ccode
= invert_tree_comparison (ccode
, false);
3019 unsigned int uid
= gimple_uid (stmt
);
3020 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3021 gimple
*g
= gimple_build_assign (make_ssa_name (utype
), NOP_EXPR
, rhs1
);
3022 gimple_set_uid (g
, uid
);
3023 rhs1
= gimple_assign_lhs (g
);
3024 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3025 g
= gimple_build_assign (make_ssa_name (utype
), NOP_EXPR
, rhs2
);
3026 gimple_set_uid (g
, uid
);
3027 rhs2
= gimple_assign_lhs (g
);
3028 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3029 if (tree_swap_operands_p (rhs1
, rhs2
))
3031 std::swap (rhs1
, rhs2
);
3032 ccode
= swap_tree_comparison (ccode
);
3034 if (gimple_code (stmt
) == GIMPLE_COND
)
3036 gcond
*c
= as_a
<gcond
*> (stmt
);
3037 gimple_cond_set_code (c
, ccode
);
3038 gimple_cond_set_lhs (c
, rhs1
);
3039 gimple_cond_set_rhs (c
, rhs2
);
3044 tree ctype
= oe
->op
? TREE_TYPE (oe
->op
) : boolean_type_node
;
3045 if (!INTEGRAL_TYPE_P (ctype
)
3046 || (TREE_CODE (ctype
) != BOOLEAN_TYPE
3047 && TYPE_PRECISION (ctype
) != 1))
3048 ctype
= boolean_type_node
;
3049 g
= gimple_build_assign (make_ssa_name (ctype
), ccode
, rhs1
, rhs2
);
3050 gimple_set_uid (g
, uid
);
3051 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3052 if (oe
->op
&& ctype
!= TREE_TYPE (oe
->op
))
3054 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (oe
->op
)),
3055 NOP_EXPR
, gimple_assign_lhs (g
));
3056 gimple_set_uid (g
, uid
);
3057 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3059 ranges
[i
].exp
= gimple_assign_lhs (g
);
3060 oe
->op
= ranges
[i
].exp
;
3061 ranges
[i
].low
= build_zero_cst (TREE_TYPE (ranges
[i
].exp
));
3062 ranges
[i
].high
= ranges
[i
].low
;
3064 ranges
[i
].strict_overflow_p
= false;
3065 oe
= (*ops
)[ranges
[*idx
].idx
];
3066 /* Now change all the other range test immediate uses, so that
3067 those tests will be optimized away. */
3068 if (opcode
== ERROR_MARK
)
3071 oe
->op
= build_int_cst (TREE_TYPE (oe
->op
),
3072 oe
->rank
== BIT_IOR_EXPR
? 0 : 1);
3074 oe
->op
= (oe
->rank
== BIT_IOR_EXPR
3075 ? boolean_false_node
: boolean_true_node
);
3078 oe
->op
= error_mark_node
;
3079 ranges
[*idx
].exp
= NULL_TREE
;
3080 ranges
[*idx
].low
= NULL_TREE
;
3081 ranges
[*idx
].high
= NULL_TREE
;
3089 /* Optimize range tests, similarly how fold_range_test optimizes
3090 it on trees. The tree code for the binary
3091 operation between all the operands is OPCODE.
3092 If OPCODE is ERROR_MARK, optimize_range_tests is called from within
3093 maybe_optimize_range_tests for inter-bb range optimization.
3094 In that case if oe->op is NULL, oe->id is bb->index whose
3095 GIMPLE_COND is && or ||ed into the test, and oe->rank says
3096 the actual opcode. */
3099 optimize_range_tests (enum tree_code opcode
,
3100 vec
<operand_entry
*> *ops
)
3102 unsigned int length
= ops
->length (), i
, j
, first
;
3104 struct range_entry
*ranges
;
3105 bool any_changes
= false;
3110 ranges
= XNEWVEC (struct range_entry
, length
);
3111 for (i
= 0; i
< length
; i
++)
3115 init_range_entry (ranges
+ i
, oe
->op
,
3118 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
)));
3119 /* For | invert it now, we will invert it again before emitting
3120 the optimized expression. */
3121 if (opcode
== BIT_IOR_EXPR
3122 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
3123 ranges
[i
].in_p
= !ranges
[i
].in_p
;
3126 qsort (ranges
, length
, sizeof (*ranges
), range_entry_cmp
);
3127 for (i
= 0; i
< length
; i
++)
3128 if (ranges
[i
].exp
!= NULL_TREE
&& TREE_CODE (ranges
[i
].exp
) == SSA_NAME
)
3131 /* Try to merge ranges. */
3132 for (first
= i
; i
< length
; i
++)
3134 tree low
= ranges
[i
].low
;
3135 tree high
= ranges
[i
].high
;
3136 int in_p
= ranges
[i
].in_p
;
3137 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
3138 int update_fail_count
= 0;
3140 for (j
= i
+ 1; j
< length
; j
++)
3142 if (ranges
[i
].exp
!= ranges
[j
].exp
)
3144 if (!merge_ranges (&in_p
, &low
, &high
, in_p
, low
, high
,
3145 ranges
[j
].in_p
, ranges
[j
].low
, ranges
[j
].high
))
3147 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
3153 if (update_range_test (ranges
+ i
, ranges
+ i
+ 1, NULL
, j
- i
- 1,
3154 opcode
, ops
, ranges
[i
].exp
, NULL
, in_p
,
3155 low
, high
, strict_overflow_p
))
3160 /* Avoid quadratic complexity if all merge_ranges calls would succeed,
3161 while update_range_test would fail. */
3162 else if (update_fail_count
== 64)
3165 ++update_fail_count
;
3168 any_changes
|= optimize_range_tests_1 (opcode
, first
, length
, true,
3171 if (BRANCH_COST (optimize_function_for_speed_p (cfun
), false) >= 2)
3172 any_changes
|= optimize_range_tests_1 (opcode
, first
, length
, false,
3174 if (lshift_cheap_p (optimize_function_for_speed_p (cfun
)))
3175 any_changes
|= optimize_range_tests_to_bit_test (opcode
, first
, length
,
3177 any_changes
|= optimize_range_tests_var_bound (opcode
, first
, length
, ops
,
3180 if (any_changes
&& opcode
!= ERROR_MARK
)
3183 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
3185 if (oe
->op
== error_mark_node
)
3194 XDELETEVEC (ranges
);
3198 /* A subroutine of optimize_vec_cond_expr to extract and canonicalize
3199 the operands of the VEC_COND_EXPR. Returns ERROR_MARK on failure,
3200 otherwise the comparison code. */
3203 ovce_extract_ops (tree var
, gassign
**rets
, bool *reti
)
3205 if (TREE_CODE (var
) != SSA_NAME
)
3208 gassign
*stmt
= dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (var
));
3212 /* ??? If we start creating more COND_EXPR, we could perform
3213 this same optimization with them. For now, simplify. */
3214 if (gimple_assign_rhs_code (stmt
) != VEC_COND_EXPR
)
3217 tree cond
= gimple_assign_rhs1 (stmt
);
3218 tree_code cmp
= TREE_CODE (cond
);
3219 if (TREE_CODE_CLASS (cmp
) != tcc_comparison
)
3222 /* ??? For now, allow only canonical true and false result vectors.
3223 We could expand this to other constants should the need arise,
3224 but at the moment we don't create them. */
3225 tree t
= gimple_assign_rhs2 (stmt
);
3226 tree f
= gimple_assign_rhs3 (stmt
);
3228 if (integer_all_onesp (t
))
3230 else if (integer_all_onesp (f
))
3232 cmp
= invert_tree_comparison (cmp
, false);
3237 if (!integer_zerop (f
))
3248 /* Optimize the condition of VEC_COND_EXPRs which have been combined
3249 with OPCODE (either BIT_AND_EXPR or BIT_IOR_EXPR). */
3252 optimize_vec_cond_expr (tree_code opcode
, vec
<operand_entry
*> *ops
)
3254 unsigned int length
= ops
->length (), i
, j
;
3255 bool any_changes
= false;
3260 for (i
= 0; i
< length
; ++i
)
3262 tree elt0
= (*ops
)[i
]->op
;
3266 tree_code cmp0
= ovce_extract_ops (elt0
, &stmt0
, &invert
);
3267 if (cmp0
== ERROR_MARK
)
3270 for (j
= i
+ 1; j
< length
; ++j
)
3272 tree
&elt1
= (*ops
)[j
]->op
;
3275 tree_code cmp1
= ovce_extract_ops (elt1
, &stmt1
, NULL
);
3276 if (cmp1
== ERROR_MARK
)
3279 tree cond0
= gimple_assign_rhs1 (stmt0
);
3280 tree x0
= TREE_OPERAND (cond0
, 0);
3281 tree y0
= TREE_OPERAND (cond0
, 1);
3283 tree cond1
= gimple_assign_rhs1 (stmt1
);
3284 tree x1
= TREE_OPERAND (cond1
, 0);
3285 tree y1
= TREE_OPERAND (cond1
, 1);
3288 if (opcode
== BIT_AND_EXPR
)
3289 comb
= maybe_fold_and_comparisons (cmp0
, x0
, y0
, cmp1
, x1
, y1
);
3290 else if (opcode
== BIT_IOR_EXPR
)
3291 comb
= maybe_fold_or_comparisons (cmp0
, x0
, y0
, cmp1
, x1
, y1
);
3298 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3300 fprintf (dump_file
, "Transforming ");
3301 print_generic_expr (dump_file
, cond0
, 0);
3302 fprintf (dump_file
, " %c ", opcode
== BIT_AND_EXPR
? '&' : '|');
3303 print_generic_expr (dump_file
, cond1
, 0);
3304 fprintf (dump_file
, " into ");
3305 print_generic_expr (dump_file
, comb
, 0);
3306 fputc ('\n', dump_file
);
3309 gimple_assign_set_rhs1 (stmt0
, comb
);
3311 std::swap (*gimple_assign_rhs2_ptr (stmt0
),
3312 *gimple_assign_rhs3_ptr (stmt0
));
3313 update_stmt (stmt0
);
3315 elt1
= error_mark_node
;
3324 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
3326 if (oe
->op
== error_mark_node
)
3338 /* Return true if STMT is a cast like:
3344 # _345 = PHI <_123(N), 1(...), 1(...)>
3345 where _234 has bool type, _123 has single use and
3346 bb N has a single successor M. This is commonly used in
3347 the last block of a range test.
3349 Also Return true if STMT is tcc_compare like:
3355 # _345 = PHI <_234(N), 1(...), 1(...)>
3357 where _234 has booltype, single use and
3358 bb N has a single successor M. This is commonly used in
3359 the last block of a range test. */
3362 final_range_test_p (gimple
*stmt
)
3364 basic_block bb
, rhs_bb
, lhs_bb
;
3367 use_operand_p use_p
;
3370 if (!gimple_assign_cast_p (stmt
)
3371 && (!is_gimple_assign (stmt
)
3372 || (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
3373 != tcc_comparison
)))
3375 bb
= gimple_bb (stmt
);
3376 if (!single_succ_p (bb
))
3378 e
= single_succ_edge (bb
);
3379 if (e
->flags
& EDGE_COMPLEX
)
3382 lhs
= gimple_assign_lhs (stmt
);
3383 rhs
= gimple_assign_rhs1 (stmt
);
3384 if (gimple_assign_cast_p (stmt
)
3385 && (!INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3386 || TREE_CODE (rhs
) != SSA_NAME
3387 || TREE_CODE (TREE_TYPE (rhs
)) != BOOLEAN_TYPE
))
3390 if (!gimple_assign_cast_p (stmt
)
3391 && (TREE_CODE (TREE_TYPE (lhs
)) != BOOLEAN_TYPE
))
3394 /* Test whether lhs is consumed only by a PHI in the only successor bb. */
3395 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
3398 if (gimple_code (use_stmt
) != GIMPLE_PHI
3399 || gimple_bb (use_stmt
) != e
->dest
)
3402 /* And that the rhs is defined in the same loop. */
3403 if (gimple_assign_cast_p (stmt
))
3405 if (TREE_CODE (rhs
) != SSA_NAME
3406 || !(rhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (rhs
)))
3407 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), rhs_bb
))
3412 if (TREE_CODE (lhs
) != SSA_NAME
3413 || !(lhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (lhs
)))
3414 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), lhs_bb
))
3421 /* Return true if BB is suitable basic block for inter-bb range test
3422 optimization. If BACKWARD is true, BB should be the only predecessor
3423 of TEST_BB, and *OTHER_BB is either NULL and filled by the routine,
3424 or compared with to find a common basic block to which all conditions
3425 branch to if true resp. false. If BACKWARD is false, TEST_BB should
3426 be the only predecessor of BB. */
3429 suitable_cond_bb (basic_block bb
, basic_block test_bb
, basic_block
*other_bb
,
3432 edge_iterator ei
, ei2
;
3436 bool other_edge_seen
= false;
3441 /* Check last stmt first. */
3442 stmt
= last_stmt (bb
);
3444 || (gimple_code (stmt
) != GIMPLE_COND
3445 && (backward
|| !final_range_test_p (stmt
)))
3446 || gimple_visited_p (stmt
)
3447 || stmt_could_throw_p (stmt
)
3450 is_cond
= gimple_code (stmt
) == GIMPLE_COND
;
3453 /* If last stmt is GIMPLE_COND, verify that one of the succ edges
3454 goes to the next bb (if BACKWARD, it is TEST_BB), and the other
3455 to *OTHER_BB (if not set yet, try to find it out). */
3456 if (EDGE_COUNT (bb
->succs
) != 2)
3458 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3460 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
3462 if (e
->dest
== test_bb
)
3471 if (*other_bb
== NULL
)
3473 FOR_EACH_EDGE (e2
, ei2
, test_bb
->succs
)
3474 if (!(e2
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
3476 else if (e
->dest
== e2
->dest
)
3477 *other_bb
= e
->dest
;
3478 if (*other_bb
== NULL
)
3481 if (e
->dest
== *other_bb
)
3482 other_edge_seen
= true;
3486 if (*other_bb
== NULL
|| !other_edge_seen
)
3489 else if (single_succ (bb
) != *other_bb
)
3492 /* Now check all PHIs of *OTHER_BB. */
3493 e
= find_edge (bb
, *other_bb
);
3494 e2
= find_edge (test_bb
, *other_bb
);
3495 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3497 gphi
*phi
= gsi
.phi ();
3498 /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments
3499 corresponding to BB and TEST_BB predecessor must be the same. */
3500 if (!operand_equal_p (gimple_phi_arg_def (phi
, e
->dest_idx
),
3501 gimple_phi_arg_def (phi
, e2
->dest_idx
), 0))
3503 /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND,
3504 one of the PHIs should have the lhs of the last stmt in
3505 that block as PHI arg and that PHI should have 0 or 1
3506 corresponding to it in all other range test basic blocks
3510 if (gimple_phi_arg_def (phi
, e
->dest_idx
)
3511 == gimple_assign_lhs (stmt
)
3512 && (integer_zerop (gimple_phi_arg_def (phi
, e2
->dest_idx
))
3513 || integer_onep (gimple_phi_arg_def (phi
,
3519 gimple
*test_last
= last_stmt (test_bb
);
3520 if (gimple_code (test_last
) != GIMPLE_COND
3521 && gimple_phi_arg_def (phi
, e2
->dest_idx
)
3522 == gimple_assign_lhs (test_last
)
3523 && (integer_zerop (gimple_phi_arg_def (phi
, e
->dest_idx
))
3524 || integer_onep (gimple_phi_arg_def (phi
, e
->dest_idx
))))
3534 /* Return true if BB doesn't have side-effects that would disallow
3535 range test optimization, all SSA_NAMEs set in the bb are consumed
3536 in the bb and there are no PHIs. */
3539 no_side_effect_bb (basic_block bb
)
3541 gimple_stmt_iterator gsi
;
3544 if (!gimple_seq_empty_p (phi_nodes (bb
)))
3546 last
= last_stmt (bb
);
3547 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3549 gimple
*stmt
= gsi_stmt (gsi
);
3551 imm_use_iterator imm_iter
;
3552 use_operand_p use_p
;
3554 if (is_gimple_debug (stmt
))
3556 if (gimple_has_side_effects (stmt
))
3560 if (!is_gimple_assign (stmt
))
3562 lhs
= gimple_assign_lhs (stmt
);
3563 if (TREE_CODE (lhs
) != SSA_NAME
)
3565 if (gimple_assign_rhs_could_trap_p (stmt
))
3567 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
3569 gimple
*use_stmt
= USE_STMT (use_p
);
3570 if (is_gimple_debug (use_stmt
))
3572 if (gimple_bb (use_stmt
) != bb
)
3579 /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable,
3580 return true and fill in *OPS recursively. */
3583 get_ops (tree var
, enum tree_code code
, vec
<operand_entry
*> *ops
,
3586 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
3590 if (!is_reassociable_op (stmt
, code
, loop
))
3593 rhs
[0] = gimple_assign_rhs1 (stmt
);
3594 rhs
[1] = gimple_assign_rhs2 (stmt
);
3595 gimple_set_visited (stmt
, true);
3596 for (i
= 0; i
< 2; i
++)
3597 if (TREE_CODE (rhs
[i
]) == SSA_NAME
3598 && !get_ops (rhs
[i
], code
, ops
, loop
)
3599 && has_single_use (rhs
[i
]))
3601 operand_entry
*oe
= operand_entry_pool
.allocate ();
3607 oe
->stmt_to_insert
= NULL
;
3608 ops
->safe_push (oe
);
3613 /* Find the ops that were added by get_ops starting from VAR, see if
3614 they were changed during update_range_test and if yes, create new
3618 update_ops (tree var
, enum tree_code code
, vec
<operand_entry
*> ops
,
3619 unsigned int *pidx
, struct loop
*loop
)
3621 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
3625 if (!is_reassociable_op (stmt
, code
, loop
))
3628 rhs
[0] = gimple_assign_rhs1 (stmt
);
3629 rhs
[1] = gimple_assign_rhs2 (stmt
);
3632 for (i
= 0; i
< 2; i
++)
3633 if (TREE_CODE (rhs
[i
]) == SSA_NAME
)
3635 rhs
[2 + i
] = update_ops (rhs
[i
], code
, ops
, pidx
, loop
);
3636 if (rhs
[2 + i
] == NULL_TREE
)
3638 if (has_single_use (rhs
[i
]))
3639 rhs
[2 + i
] = ops
[(*pidx
)++]->op
;
3641 rhs
[2 + i
] = rhs
[i
];
3644 if ((rhs
[2] != rhs
[0] || rhs
[3] != rhs
[1])
3645 && (rhs
[2] != rhs
[1] || rhs
[3] != rhs
[0]))
3647 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3648 var
= make_ssa_name (TREE_TYPE (var
));
3649 gassign
*g
= gimple_build_assign (var
, gimple_assign_rhs_code (stmt
),
3651 gimple_set_uid (g
, gimple_uid (stmt
));
3652 gimple_set_visited (g
, true);
3653 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3658 /* Structure to track the initial value passed to get_ops and
3659 the range in the ops vector for each basic block. */
3661 struct inter_bb_range_test_entry
3664 unsigned int first_idx
, last_idx
;
3667 /* Inter-bb range test optimization.
3669 Returns TRUE if a gimple conditional is optimized to a true/false,
3670 otherwise return FALSE.
3672 This indicates to the caller that it should run a CFG cleanup pass
3673 once reassociation is completed. */
3676 maybe_optimize_range_tests (gimple
*stmt
)
3678 basic_block first_bb
= gimple_bb (stmt
);
3679 basic_block last_bb
= first_bb
;
3680 basic_block other_bb
= NULL
;
3684 auto_vec
<operand_entry
*> ops
;
3685 auto_vec
<inter_bb_range_test_entry
> bbinfo
;
3686 bool any_changes
= false;
3687 bool cfg_cleanup_needed
= false;
3689 /* Consider only basic blocks that end with GIMPLE_COND or
3690 a cast statement satisfying final_range_test_p. All
3691 but the last bb in the first_bb .. last_bb range
3692 should end with GIMPLE_COND. */
3693 if (gimple_code (stmt
) == GIMPLE_COND
)
3695 if (EDGE_COUNT (first_bb
->succs
) != 2)
3696 return cfg_cleanup_needed
;
3698 else if (final_range_test_p (stmt
))
3699 other_bb
= single_succ (first_bb
);
3701 return cfg_cleanup_needed
;
3703 if (stmt_could_throw_p (stmt
))
3704 return cfg_cleanup_needed
;
3706 /* As relative ordering of post-dominator sons isn't fixed,
3707 maybe_optimize_range_tests can be called first on any
3708 bb in the range we want to optimize. So, start searching
3709 backwards, if first_bb can be set to a predecessor. */
3710 while (single_pred_p (first_bb
))
3712 basic_block pred_bb
= single_pred (first_bb
);
3713 if (!suitable_cond_bb (pred_bb
, first_bb
, &other_bb
, true))
3715 if (!no_side_effect_bb (first_bb
))
3719 /* If first_bb is last_bb, other_bb hasn't been computed yet.
3720 Before starting forward search in last_bb successors, find
3721 out the other_bb. */
3722 if (first_bb
== last_bb
)
3725 /* As non-GIMPLE_COND last stmt always terminates the range,
3726 if forward search didn't discover anything, just give up. */
3727 if (gimple_code (stmt
) != GIMPLE_COND
)
3728 return cfg_cleanup_needed
;
3729 /* Look at both successors. Either it ends with a GIMPLE_COND
3730 and satisfies suitable_cond_bb, or ends with a cast and
3731 other_bb is that cast's successor. */
3732 FOR_EACH_EDGE (e
, ei
, first_bb
->succs
)
3733 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
))
3734 || e
->dest
== first_bb
)
3735 return cfg_cleanup_needed
;
3736 else if (single_pred_p (e
->dest
))
3738 stmt
= last_stmt (e
->dest
);
3740 && gimple_code (stmt
) == GIMPLE_COND
3741 && EDGE_COUNT (e
->dest
->succs
) == 2)
3743 if (suitable_cond_bb (first_bb
, e
->dest
, &other_bb
, true))
3749 && final_range_test_p (stmt
)
3750 && find_edge (first_bb
, single_succ (e
->dest
)))
3752 other_bb
= single_succ (e
->dest
);
3753 if (other_bb
== first_bb
)
3757 if (other_bb
== NULL
)
3758 return cfg_cleanup_needed
;
3760 /* Now do the forward search, moving last_bb to successor bbs
3761 that aren't other_bb. */
3762 while (EDGE_COUNT (last_bb
->succs
) == 2)
3764 FOR_EACH_EDGE (e
, ei
, last_bb
->succs
)
3765 if (e
->dest
!= other_bb
)
3769 if (!single_pred_p (e
->dest
))
3771 if (!suitable_cond_bb (e
->dest
, last_bb
, &other_bb
, false))
3773 if (!no_side_effect_bb (e
->dest
))
3777 if (first_bb
== last_bb
)
3778 return cfg_cleanup_needed
;
3779 /* Here basic blocks first_bb through last_bb's predecessor
3780 end with GIMPLE_COND, all of them have one of the edges to
3781 other_bb and another to another block in the range,
3782 all blocks except first_bb don't have side-effects and
3783 last_bb ends with either GIMPLE_COND, or cast satisfying
3784 final_range_test_p. */
3785 for (bb
= last_bb
; ; bb
= single_pred (bb
))
3787 enum tree_code code
;
3789 inter_bb_range_test_entry bb_ent
;
3791 bb_ent
.op
= NULL_TREE
;
3792 bb_ent
.first_idx
= ops
.length ();
3793 bb_ent
.last_idx
= bb_ent
.first_idx
;
3794 e
= find_edge (bb
, other_bb
);
3795 stmt
= last_stmt (bb
);
3796 gimple_set_visited (stmt
, true);
3797 if (gimple_code (stmt
) != GIMPLE_COND
)
3799 use_operand_p use_p
;
3804 lhs
= gimple_assign_lhs (stmt
);
3805 rhs
= gimple_assign_rhs1 (stmt
);
3806 gcc_assert (bb
== last_bb
);
3815 # _345 = PHI <_123(N), 1(...), 1(...)>
3817 or 0 instead of 1. If it is 0, the _234
3818 range test is anded together with all the
3819 other range tests, if it is 1, it is ored with
3821 single_imm_use (lhs
, &use_p
, &phi
);
3822 gcc_assert (gimple_code (phi
) == GIMPLE_PHI
);
3823 e2
= find_edge (first_bb
, other_bb
);
3825 gcc_assert (gimple_phi_arg_def (phi
, e
->dest_idx
) == lhs
);
3826 if (integer_zerop (gimple_phi_arg_def (phi
, d
)))
3827 code
= BIT_AND_EXPR
;
3830 gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi
, d
)));
3831 code
= BIT_IOR_EXPR
;
3834 /* If _234 SSA_NAME_DEF_STMT is
3836 (or &, corresponding to 1/0 in the phi arguments,
3837 push into ops the individual range test arguments
3838 of the bitwise or resp. and, recursively. */
3839 if (TREE_CODE (rhs
) == SSA_NAME
3840 && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
3842 && !get_ops (rhs
, code
, &ops
,
3843 loop_containing_stmt (stmt
))
3844 && has_single_use (rhs
))
3846 /* Otherwise, push the _234 range test itself. */
3847 operand_entry
*oe
= operand_entry_pool
.allocate ();
3853 oe
->stmt_to_insert
= NULL
;
3858 else if (is_gimple_assign (stmt
)
3859 && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
3861 && !get_ops (lhs
, code
, &ops
,
3862 loop_containing_stmt (stmt
))
3863 && has_single_use (lhs
))
3865 operand_entry
*oe
= operand_entry_pool
.allocate ();
3876 bb_ent
.last_idx
= ops
.length ();
3879 bbinfo
.safe_push (bb_ent
);
3882 /* Otherwise stmt is GIMPLE_COND. */
3883 code
= gimple_cond_code (stmt
);
3884 lhs
= gimple_cond_lhs (stmt
);
3885 rhs
= gimple_cond_rhs (stmt
);
3886 if (TREE_CODE (lhs
) == SSA_NAME
3887 && INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3888 && ((code
!= EQ_EXPR
&& code
!= NE_EXPR
)
3889 || rhs
!= boolean_false_node
3890 /* Either push into ops the individual bitwise
3891 or resp. and operands, depending on which
3892 edge is other_bb. */
3893 || !get_ops (lhs
, (((e
->flags
& EDGE_TRUE_VALUE
) == 0)
3894 ^ (code
== EQ_EXPR
))
3895 ? BIT_AND_EXPR
: BIT_IOR_EXPR
, &ops
,
3896 loop_containing_stmt (stmt
))))
3898 /* Or push the GIMPLE_COND stmt itself. */
3899 operand_entry
*oe
= operand_entry_pool
.allocate ();
3902 oe
->rank
= (e
->flags
& EDGE_TRUE_VALUE
)
3903 ? BIT_IOR_EXPR
: BIT_AND_EXPR
;
3904 /* oe->op = NULL signs that there is no SSA_NAME
3905 for the range test, and oe->id instead is the
3906 basic block number, at which's end the GIMPLE_COND
3910 oe
->stmt_to_insert
= NULL
;
3915 else if (ops
.length () > bb_ent
.first_idx
)
3918 bb_ent
.last_idx
= ops
.length ();
3920 bbinfo
.safe_push (bb_ent
);
3924 if (ops
.length () > 1)
3925 any_changes
= optimize_range_tests (ERROR_MARK
, &ops
);
3928 unsigned int idx
, max_idx
= 0;
3929 /* update_ops relies on has_single_use predicates returning the
3930 same values as it did during get_ops earlier. Additionally it
3931 never removes statements, only adds new ones and it should walk
3932 from the single imm use and check the predicate already before
3933 making those changes.
3934 On the other side, the handling of GIMPLE_COND directly can turn
3935 previously multiply used SSA_NAMEs into single use SSA_NAMEs, so
3936 it needs to be done in a separate loop afterwards. */
3937 for (bb
= last_bb
, idx
= 0; ; bb
= single_pred (bb
), idx
++)
3939 if (bbinfo
[idx
].first_idx
< bbinfo
[idx
].last_idx
3940 && bbinfo
[idx
].op
!= NULL_TREE
)
3945 stmt
= last_stmt (bb
);
3946 new_op
= update_ops (bbinfo
[idx
].op
,
3948 ops
[bbinfo
[idx
].first_idx
]->rank
,
3949 ops
, &bbinfo
[idx
].first_idx
,
3950 loop_containing_stmt (stmt
));
3951 if (new_op
== NULL_TREE
)
3953 gcc_assert (bb
== last_bb
);
3954 new_op
= ops
[bbinfo
[idx
].first_idx
++]->op
;
3956 if (bbinfo
[idx
].op
!= new_op
)
3958 imm_use_iterator iter
;
3959 use_operand_p use_p
;
3960 gimple
*use_stmt
, *cast_or_tcc_cmp_stmt
= NULL
;
3962 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, bbinfo
[idx
].op
)
3963 if (is_gimple_debug (use_stmt
))
3965 else if (gimple_code (use_stmt
) == GIMPLE_COND
3966 || gimple_code (use_stmt
) == GIMPLE_PHI
)
3967 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3968 SET_USE (use_p
, new_op
);
3969 else if ((is_gimple_assign (use_stmt
)
3971 (gimple_assign_rhs_code (use_stmt
))
3972 == tcc_comparison
)))
3973 cast_or_tcc_cmp_stmt
= use_stmt
;
3974 else if (gimple_assign_cast_p (use_stmt
))
3975 cast_or_tcc_cmp_stmt
= use_stmt
;
3979 if (cast_or_tcc_cmp_stmt
)
3981 gcc_assert (bb
== last_bb
);
3982 tree lhs
= gimple_assign_lhs (cast_or_tcc_cmp_stmt
);
3983 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
3984 enum tree_code rhs_code
3985 = gimple_assign_cast_p (cast_or_tcc_cmp_stmt
)
3986 ? gimple_assign_rhs_code (cast_or_tcc_cmp_stmt
)
3989 if (is_gimple_min_invariant (new_op
))
3991 new_op
= fold_convert (TREE_TYPE (lhs
), new_op
);
3992 g
= gimple_build_assign (new_lhs
, new_op
);
3995 g
= gimple_build_assign (new_lhs
, rhs_code
, new_op
);
3996 gimple_stmt_iterator gsi
3997 = gsi_for_stmt (cast_or_tcc_cmp_stmt
);
3998 gimple_set_uid (g
, gimple_uid (cast_or_tcc_cmp_stmt
));
3999 gimple_set_visited (g
, true);
4000 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
4001 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4002 if (is_gimple_debug (use_stmt
))
4004 else if (gimple_code (use_stmt
) == GIMPLE_COND
4005 || gimple_code (use_stmt
) == GIMPLE_PHI
)
4006 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4007 SET_USE (use_p
, new_lhs
);
4016 for (bb
= last_bb
, idx
= 0; ; bb
= single_pred (bb
), idx
++)
4018 if (bbinfo
[idx
].first_idx
< bbinfo
[idx
].last_idx
4019 && bbinfo
[idx
].op
== NULL_TREE
4020 && ops
[bbinfo
[idx
].first_idx
]->op
!= NULL_TREE
)
4022 gcond
*cond_stmt
= as_a
<gcond
*> (last_stmt (bb
));
4027 /* If we collapse the conditional to a true/false
4028 condition, then bubble that knowledge up to our caller. */
4029 if (integer_zerop (ops
[bbinfo
[idx
].first_idx
]->op
))
4031 gimple_cond_make_false (cond_stmt
);
4032 cfg_cleanup_needed
= true;
4034 else if (integer_onep (ops
[bbinfo
[idx
].first_idx
]->op
))
4036 gimple_cond_make_true (cond_stmt
);
4037 cfg_cleanup_needed
= true;
4041 gimple_cond_set_code (cond_stmt
, NE_EXPR
);
4042 gimple_cond_set_lhs (cond_stmt
,
4043 ops
[bbinfo
[idx
].first_idx
]->op
);
4044 gimple_cond_set_rhs (cond_stmt
, boolean_false_node
);
4046 update_stmt (cond_stmt
);
4052 /* The above changes could result in basic blocks after the first
4053 modified one, up to and including last_bb, to be executed even if
4054 they would not be in the original program. If the value ranges of
4055 assignment lhs' in those bbs were dependent on the conditions
4056 guarding those basic blocks which now can change, the VRs might
4057 be incorrect. As no_side_effect_bb should ensure those SSA_NAMEs
4058 are only used within the same bb, it should be not a big deal if
4059 we just reset all the VRs in those bbs. See PR68671. */
4060 for (bb
= last_bb
, idx
= 0; idx
< max_idx
; bb
= single_pred (bb
), idx
++)
4061 reset_flow_sensitive_info_in_bb (bb
);
4063 return cfg_cleanup_needed
;
4066 /* Return true if OPERAND is defined by a PHI node which uses the LHS
4067 of STMT in it's operands. This is also known as a "destructive
4068 update" operation. */
4071 is_phi_for_stmt (gimple
*stmt
, tree operand
)
4076 use_operand_p arg_p
;
4079 if (TREE_CODE (operand
) != SSA_NAME
)
4082 lhs
= gimple_assign_lhs (stmt
);
4084 def_stmt
= SSA_NAME_DEF_STMT (operand
);
4085 def_phi
= dyn_cast
<gphi
*> (def_stmt
);
4089 FOR_EACH_PHI_ARG (arg_p
, def_phi
, i
, SSA_OP_USE
)
4090 if (lhs
== USE_FROM_PTR (arg_p
))
4095 /* Remove def stmt of VAR if VAR has zero uses and recurse
4096 on rhs1 operand if so. */
4099 remove_visited_stmt_chain (tree var
)
4102 gimple_stmt_iterator gsi
;
4106 if (TREE_CODE (var
) != SSA_NAME
|| !has_zero_uses (var
))
4108 stmt
= SSA_NAME_DEF_STMT (var
);
4109 if (is_gimple_assign (stmt
) && gimple_visited_p (stmt
))
4111 var
= gimple_assign_rhs1 (stmt
);
4112 gsi
= gsi_for_stmt (stmt
);
4113 reassoc_remove_stmt (&gsi
);
4114 release_defs (stmt
);
4121 /* This function checks three consequtive operands in
4122 passed operands vector OPS starting from OPINDEX and
4123 swaps two operands if it is profitable for binary operation
4124 consuming OPINDEX + 1 abnd OPINDEX + 2 operands.
4126 We pair ops with the same rank if possible.
4128 The alternative we try is to see if STMT is a destructive
4129 update style statement, which is like:
4132 In that case, we want to use the destructive update form to
4133 expose the possible vectorizer sum reduction opportunity.
4134 In that case, the third operand will be the phi node. This
4135 check is not performed if STMT is null.
4137 We could, of course, try to be better as noted above, and do a
4138 lot of work to try to find these opportunities in >3 operand
4139 cases, but it is unlikely to be worth it. */
4142 swap_ops_for_binary_stmt (vec
<operand_entry
*> ops
,
4143 unsigned int opindex
, gimple
*stmt
)
4145 operand_entry
*oe1
, *oe2
, *oe3
;
4148 oe2
= ops
[opindex
+ 1];
4149 oe3
= ops
[opindex
+ 2];
4151 if ((oe1
->rank
== oe2
->rank
4152 && oe2
->rank
!= oe3
->rank
)
4153 || (stmt
&& is_phi_for_stmt (stmt
, oe3
->op
)
4154 && !is_phi_for_stmt (stmt
, oe1
->op
)
4155 && !is_phi_for_stmt (stmt
, oe2
->op
)))
4156 std::swap (*oe1
, *oe3
);
4157 else if ((oe1
->rank
== oe3
->rank
4158 && oe2
->rank
!= oe3
->rank
)
4159 || (stmt
&& is_phi_for_stmt (stmt
, oe2
->op
)
4160 && !is_phi_for_stmt (stmt
, oe1
->op
)
4161 && !is_phi_for_stmt (stmt
, oe3
->op
)))
4162 std::swap (*oe1
, *oe2
);
4165 /* If definition of RHS1 or RHS2 dominates STMT, return the later of those
4166 two definitions, otherwise return STMT. */
4168 static inline gimple
*
4169 find_insert_point (gimple
*stmt
, tree rhs1
, tree rhs2
)
4171 if (TREE_CODE (rhs1
) == SSA_NAME
4172 && reassoc_stmt_dominates_stmt_p (stmt
, SSA_NAME_DEF_STMT (rhs1
)))
4173 stmt
= SSA_NAME_DEF_STMT (rhs1
);
4174 if (TREE_CODE (rhs2
) == SSA_NAME
4175 && reassoc_stmt_dominates_stmt_p (stmt
, SSA_NAME_DEF_STMT (rhs2
)))
4176 stmt
= SSA_NAME_DEF_STMT (rhs2
);
4180 /* If the stmt that defines operand has to be inserted, insert it
4183 insert_stmt_before_use (gimple
*stmt
, gimple
*stmt_to_insert
)
4185 gcc_assert (is_gimple_assign (stmt_to_insert
));
4186 tree rhs1
= gimple_assign_rhs1 (stmt_to_insert
);
4187 tree rhs2
= gimple_assign_rhs2 (stmt_to_insert
);
4188 gimple
*insert_point
= find_insert_point (stmt
, rhs1
, rhs2
);
4189 gimple_stmt_iterator gsi
= gsi_for_stmt (insert_point
);
4190 gimple_set_uid (stmt_to_insert
, gimple_uid (insert_point
));
4192 /* If the insert point is not stmt, then insert_point would be
4193 the point where operand rhs1 or rhs2 is defined. In this case,
4194 stmt_to_insert has to be inserted afterwards. This would
4195 only happen when the stmt insertion point is flexible. */
4196 if (stmt
== insert_point
)
4197 gsi_insert_before (&gsi
, stmt_to_insert
, GSI_NEW_STMT
);
4199 insert_stmt_after (stmt_to_insert
, insert_point
);
4203 /* Recursively rewrite our linearized statements so that the operators
4204 match those in OPS[OPINDEX], putting the computation in rank
4205 order. Return new lhs.
4206 CHANGED is true if we shouldn't reuse the lhs SSA_NAME both in
4207 the current stmt and during recursive invocations.
4208 NEXT_CHANGED is true if we shouldn't reuse the lhs SSA_NAME in
4209 recursive invocations. */
4212 rewrite_expr_tree (gimple
*stmt
, unsigned int opindex
,
4213 vec
<operand_entry
*> ops
, bool changed
, bool next_changed
)
4215 tree rhs1
= gimple_assign_rhs1 (stmt
);
4216 tree rhs2
= gimple_assign_rhs2 (stmt
);
4217 tree lhs
= gimple_assign_lhs (stmt
);
4220 /* The final recursion case for this function is that you have
4221 exactly two operations left.
4222 If we had exactly one op in the entire list to start with, we
4223 would have never called this function, and the tail recursion
4224 rewrites them one at a time. */
4225 if (opindex
+ 2 == ops
.length ())
4227 operand_entry
*oe1
, *oe2
;
4230 oe2
= ops
[opindex
+ 1];
4232 if (rhs1
!= oe1
->op
|| rhs2
!= oe2
->op
)
4234 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4235 unsigned int uid
= gimple_uid (stmt
);
4237 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4239 fprintf (dump_file
, "Transforming ");
4240 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4243 /* If the stmt that defines operand has to be inserted, insert it
4245 if (oe1
->stmt_to_insert
)
4246 insert_stmt_before_use (stmt
, oe1
->stmt_to_insert
);
4247 if (oe2
->stmt_to_insert
)
4248 insert_stmt_before_use (stmt
, oe2
->stmt_to_insert
);
4249 /* Even when changed is false, reassociation could have e.g. removed
4250 some redundant operations, so unless we are just swapping the
4251 arguments or unless there is no change at all (then we just
4252 return lhs), force creation of a new SSA_NAME. */
4253 if (changed
|| ((rhs1
!= oe2
->op
|| rhs2
!= oe1
->op
) && opindex
))
4255 gimple
*insert_point
4256 = find_insert_point (stmt
, oe1
->op
, oe2
->op
);
4257 lhs
= make_ssa_name (TREE_TYPE (lhs
));
4259 = gimple_build_assign (lhs
, gimple_assign_rhs_code (stmt
),
4261 gimple_set_uid (stmt
, uid
);
4262 gimple_set_visited (stmt
, true);
4263 if (insert_point
== gsi_stmt (gsi
))
4264 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4266 insert_stmt_after (stmt
, insert_point
);
4270 gcc_checking_assert (find_insert_point (stmt
, oe1
->op
, oe2
->op
)
4272 gimple_assign_set_rhs1 (stmt
, oe1
->op
);
4273 gimple_assign_set_rhs2 (stmt
, oe2
->op
);
4277 if (rhs1
!= oe1
->op
&& rhs1
!= oe2
->op
)
4278 remove_visited_stmt_chain (rhs1
);
4280 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4282 fprintf (dump_file
, " into ");
4283 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4289 /* If we hit here, we should have 3 or more ops left. */
4290 gcc_assert (opindex
+ 2 < ops
.length ());
4292 /* Rewrite the next operator. */
4295 /* If the stmt that defines operand has to be inserted, insert it
4297 if (oe
->stmt_to_insert
)
4298 insert_stmt_before_use (stmt
, oe
->stmt_to_insert
);
4300 /* Recurse on the LHS of the binary operator, which is guaranteed to
4301 be the non-leaf side. */
4303 = rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1
), opindex
+ 1, ops
,
4304 changed
|| oe
->op
!= rhs2
|| next_changed
,
4307 if (oe
->op
!= rhs2
|| new_rhs1
!= rhs1
)
4309 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4311 fprintf (dump_file
, "Transforming ");
4312 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4315 /* If changed is false, this is either opindex == 0
4316 or all outer rhs2's were equal to corresponding oe->op,
4317 and powi_result is NULL.
4318 That means lhs is equivalent before and after reassociation.
4319 Otherwise ensure the old lhs SSA_NAME is not reused and
4320 create a new stmt as well, so that any debug stmts will be
4321 properly adjusted. */
4324 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4325 unsigned int uid
= gimple_uid (stmt
);
4326 gimple
*insert_point
= find_insert_point (stmt
, new_rhs1
, oe
->op
);
4328 lhs
= make_ssa_name (TREE_TYPE (lhs
));
4329 stmt
= gimple_build_assign (lhs
, gimple_assign_rhs_code (stmt
),
4331 gimple_set_uid (stmt
, uid
);
4332 gimple_set_visited (stmt
, true);
4333 if (insert_point
== gsi_stmt (gsi
))
4334 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4336 insert_stmt_after (stmt
, insert_point
);
4340 gcc_checking_assert (find_insert_point (stmt
, new_rhs1
, oe
->op
)
4342 gimple_assign_set_rhs1 (stmt
, new_rhs1
);
4343 gimple_assign_set_rhs2 (stmt
, oe
->op
);
4347 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4349 fprintf (dump_file
, " into ");
4350 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4356 /* Find out how many cycles we need to compute statements chain.
4357 OPS_NUM holds number os statements in a chain. CPU_WIDTH is a
4358 maximum number of independent statements we may execute per cycle. */
4361 get_required_cycles (int ops_num
, int cpu_width
)
4367 /* While we have more than 2 * cpu_width operands
4368 we may reduce number of operands by cpu_width
4370 res
= ops_num
/ (2 * cpu_width
);
4372 /* Remained operands count may be reduced twice per cycle
4373 until we have only one operand. */
4374 rest
= (unsigned)(ops_num
- res
* cpu_width
);
4375 elog
= exact_log2 (rest
);
4379 res
+= floor_log2 (rest
) + 1;
4384 /* Returns an optimal number of registers to use for computation of
4385 given statements. */
4388 get_reassociation_width (int ops_num
, enum tree_code opc
,
4391 int param_width
= PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH
);
4396 if (param_width
> 0)
4397 width
= param_width
;
4399 width
= targetm
.sched
.reassociation_width (opc
, mode
);
4404 /* Get the minimal time required for sequence computation. */
4405 cycles_best
= get_required_cycles (ops_num
, width
);
4407 /* Check if we may use less width and still compute sequence for
4408 the same time. It will allow us to reduce registers usage.
4409 get_required_cycles is monotonically increasing with lower width
4410 so we can perform a binary search for the minimal width that still
4411 results in the optimal cycle count. */
4413 while (width
> width_min
)
4415 int width_mid
= (width
+ width_min
) / 2;
4417 if (get_required_cycles (ops_num
, width_mid
) == cycles_best
)
4419 else if (width_min
< width_mid
)
4420 width_min
= width_mid
;
4428 /* Recursively rewrite our linearized statements so that the operators
4429 match those in OPS[OPINDEX], putting the computation in rank
4430 order and trying to allow operations to be executed in
4434 rewrite_expr_tree_parallel (gassign
*stmt
, int width
,
4435 vec
<operand_entry
*> ops
)
4437 enum tree_code opcode
= gimple_assign_rhs_code (stmt
);
4438 int op_num
= ops
.length ();
4439 gcc_assert (op_num
> 0);
4440 int stmt_num
= op_num
- 1;
4441 gimple
**stmts
= XALLOCAVEC (gimple
*, stmt_num
);
4442 int op_index
= op_num
- 1;
4444 int ready_stmts_end
= 0;
4446 gimple
*stmt1
= NULL
, *stmt2
= NULL
;
4447 tree last_rhs1
= gimple_assign_rhs1 (stmt
);
4449 /* We start expression rewriting from the top statements.
4450 So, in this loop we create a full list of statements
4451 we will work with. */
4452 stmts
[stmt_num
- 1] = stmt
;
4453 for (i
= stmt_num
- 2; i
>= 0; i
--)
4454 stmts
[i
] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts
[i
+1]));
4456 for (i
= 0; i
< stmt_num
; i
++)
4460 /* Determine whether we should use results of
4461 already handled statements or not. */
4462 if (ready_stmts_end
== 0
4463 && (i
- stmt_index
>= width
|| op_index
< 1))
4464 ready_stmts_end
= i
;
4466 /* Now we choose operands for the next statement. Non zero
4467 value in ready_stmts_end means here that we should use
4468 the result of already generated statements as new operand. */
4469 if (ready_stmts_end
> 0)
4471 op1
= gimple_assign_lhs (stmts
[stmt_index
++]);
4472 if (ready_stmts_end
> stmt_index
)
4473 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
4474 else if (op_index
>= 0)
4476 operand_entry
*oe
= ops
[op_index
--];
4477 stmt2
= oe
->stmt_to_insert
;
4482 gcc_assert (stmt_index
< i
);
4483 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
4486 if (stmt_index
>= ready_stmts_end
)
4487 ready_stmts_end
= 0;
4492 swap_ops_for_binary_stmt (ops
, op_index
- 2, NULL
);
4493 operand_entry
*oe2
= ops
[op_index
--];
4494 operand_entry
*oe1
= ops
[op_index
--];
4496 stmt2
= oe2
->stmt_to_insert
;
4498 stmt1
= oe1
->stmt_to_insert
;
4501 /* If we emit the last statement then we should put
4502 operands into the last statement. It will also
4504 if (op_index
< 0 && stmt_index
== i
)
4507 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4509 fprintf (dump_file
, "Transforming ");
4510 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
4513 /* If the stmt that defines operand has to be inserted, insert it
4516 insert_stmt_before_use (stmts
[i
], stmt1
);
4518 insert_stmt_before_use (stmts
[i
], stmt2
);
4519 stmt1
= stmt2
= NULL
;
4521 /* We keep original statement only for the last one. All
4522 others are recreated. */
4523 if (i
== stmt_num
- 1)
4525 gimple_assign_set_rhs1 (stmts
[i
], op1
);
4526 gimple_assign_set_rhs2 (stmts
[i
], op2
);
4527 update_stmt (stmts
[i
]);
4531 stmts
[i
] = build_and_add_sum (TREE_TYPE (last_rhs1
), op1
, op2
, opcode
);
4533 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4535 fprintf (dump_file
, " into ");
4536 print_gimple_stmt (dump_file
, stmts
[i
], 0, 0);
4540 remove_visited_stmt_chain (last_rhs1
);
4543 /* Transform STMT, which is really (A +B) + (C + D) into the left
4544 linear form, ((A+B)+C)+D.
4545 Recurse on D if necessary. */
4548 linearize_expr (gimple
*stmt
)
4550 gimple_stmt_iterator gsi
;
4551 gimple
*binlhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
4552 gimple
*binrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
4553 gimple
*oldbinrhs
= binrhs
;
4554 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
4555 gimple
*newbinrhs
= NULL
;
4556 struct loop
*loop
= loop_containing_stmt (stmt
);
4557 tree lhs
= gimple_assign_lhs (stmt
);
4559 gcc_assert (is_reassociable_op (binlhs
, rhscode
, loop
)
4560 && is_reassociable_op (binrhs
, rhscode
, loop
));
4562 gsi
= gsi_for_stmt (stmt
);
4564 gimple_assign_set_rhs2 (stmt
, gimple_assign_rhs1 (binrhs
));
4565 binrhs
= gimple_build_assign (make_ssa_name (TREE_TYPE (lhs
)),
4566 gimple_assign_rhs_code (binrhs
),
4567 gimple_assign_lhs (binlhs
),
4568 gimple_assign_rhs2 (binrhs
));
4569 gimple_assign_set_rhs1 (stmt
, gimple_assign_lhs (binrhs
));
4570 gsi_insert_before (&gsi
, binrhs
, GSI_SAME_STMT
);
4571 gimple_set_uid (binrhs
, gimple_uid (stmt
));
4573 if (TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
)
4574 newbinrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
4576 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4578 fprintf (dump_file
, "Linearized: ");
4579 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4582 reassociate_stats
.linearized
++;
4585 gsi
= gsi_for_stmt (oldbinrhs
);
4586 reassoc_remove_stmt (&gsi
);
4587 release_defs (oldbinrhs
);
4589 gimple_set_visited (stmt
, true);
4590 gimple_set_visited (binlhs
, true);
4591 gimple_set_visited (binrhs
, true);
4593 /* Tail recurse on the new rhs if it still needs reassociation. */
4594 if (newbinrhs
&& is_reassociable_op (newbinrhs
, rhscode
, loop
))
4595 /* ??? This should probably be linearize_expr (newbinrhs) but I don't
4596 want to change the algorithm while converting to tuples. */
4597 linearize_expr (stmt
);
4600 /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return
4601 it. Otherwise, return NULL. */
4604 get_single_immediate_use (tree lhs
)
4606 use_operand_p immuse
;
4609 if (TREE_CODE (lhs
) == SSA_NAME
4610 && single_imm_use (lhs
, &immuse
, &immusestmt
)
4611 && is_gimple_assign (immusestmt
))
4617 /* Recursively negate the value of TONEGATE, and return the SSA_NAME
4618 representing the negated value. Insertions of any necessary
4619 instructions go before GSI.
4620 This function is recursive in that, if you hand it "a_5" as the
4621 value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will
4622 transform b_3 + b_4 into a_5 = -b_3 + -b_4. */
4625 negate_value (tree tonegate
, gimple_stmt_iterator
*gsip
)
4627 gimple
*negatedefstmt
= NULL
;
4628 tree resultofnegate
;
4629 gimple_stmt_iterator gsi
;
4632 /* If we are trying to negate a name, defined by an add, negate the
4633 add operands instead. */
4634 if (TREE_CODE (tonegate
) == SSA_NAME
)
4635 negatedefstmt
= SSA_NAME_DEF_STMT (tonegate
);
4636 if (TREE_CODE (tonegate
) == SSA_NAME
4637 && is_gimple_assign (negatedefstmt
)
4638 && TREE_CODE (gimple_assign_lhs (negatedefstmt
)) == SSA_NAME
4639 && has_single_use (gimple_assign_lhs (negatedefstmt
))
4640 && gimple_assign_rhs_code (negatedefstmt
) == PLUS_EXPR
)
4642 tree rhs1
= gimple_assign_rhs1 (negatedefstmt
);
4643 tree rhs2
= gimple_assign_rhs2 (negatedefstmt
);
4644 tree lhs
= gimple_assign_lhs (negatedefstmt
);
4647 gsi
= gsi_for_stmt (negatedefstmt
);
4648 rhs1
= negate_value (rhs1
, &gsi
);
4650 gsi
= gsi_for_stmt (negatedefstmt
);
4651 rhs2
= negate_value (rhs2
, &gsi
);
4653 gsi
= gsi_for_stmt (negatedefstmt
);
4654 lhs
= make_ssa_name (TREE_TYPE (lhs
));
4655 gimple_set_visited (negatedefstmt
, true);
4656 g
= gimple_build_assign (lhs
, PLUS_EXPR
, rhs1
, rhs2
);
4657 gimple_set_uid (g
, gimple_uid (negatedefstmt
));
4658 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
4662 tonegate
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (tonegate
), tonegate
);
4663 resultofnegate
= force_gimple_operand_gsi (gsip
, tonegate
, true,
4664 NULL_TREE
, true, GSI_SAME_STMT
);
4666 uid
= gimple_uid (gsi_stmt (gsi
));
4667 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
4669 gimple
*stmt
= gsi_stmt (gsi
);
4670 if (gimple_uid (stmt
) != 0)
4672 gimple_set_uid (stmt
, uid
);
4674 return resultofnegate
;
4677 /* Return true if we should break up the subtract in STMT into an add
4678 with negate. This is true when we the subtract operands are really
4679 adds, or the subtract itself is used in an add expression. In
4680 either case, breaking up the subtract into an add with negate
4681 exposes the adds to reassociation. */
4684 should_break_up_subtract (gimple
*stmt
)
4686 tree lhs
= gimple_assign_lhs (stmt
);
4687 tree binlhs
= gimple_assign_rhs1 (stmt
);
4688 tree binrhs
= gimple_assign_rhs2 (stmt
);
4690 struct loop
*loop
= loop_containing_stmt (stmt
);
4692 if (TREE_CODE (binlhs
) == SSA_NAME
4693 && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs
), PLUS_EXPR
, loop
))
4696 if (TREE_CODE (binrhs
) == SSA_NAME
4697 && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
), PLUS_EXPR
, loop
))
4700 if (TREE_CODE (lhs
) == SSA_NAME
4701 && (immusestmt
= get_single_immediate_use (lhs
))
4702 && is_gimple_assign (immusestmt
)
4703 && (gimple_assign_rhs_code (immusestmt
) == PLUS_EXPR
4704 || gimple_assign_rhs_code (immusestmt
) == MULT_EXPR
))
4709 /* Transform STMT from A - B into A + -B. */
4712 break_up_subtract (gimple
*stmt
, gimple_stmt_iterator
*gsip
)
4714 tree rhs1
= gimple_assign_rhs1 (stmt
);
4715 tree rhs2
= gimple_assign_rhs2 (stmt
);
4717 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4719 fprintf (dump_file
, "Breaking up subtract ");
4720 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4723 rhs2
= negate_value (rhs2
, gsip
);
4724 gimple_assign_set_rhs_with_ops (gsip
, PLUS_EXPR
, rhs1
, rhs2
);
4728 /* Determine whether STMT is a builtin call that raises an SSA name
4729 to an integer power and has only one use. If so, and this is early
4730 reassociation and unsafe math optimizations are permitted, place
4731 the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE.
4732 If any of these conditions does not hold, return FALSE. */
4735 acceptable_pow_call (gcall
*stmt
, tree
*base
, HOST_WIDE_INT
*exponent
)
4738 REAL_VALUE_TYPE c
, cint
;
4740 switch (gimple_call_combined_fn (stmt
))
4743 if (flag_errno_math
)
4746 *base
= gimple_call_arg (stmt
, 0);
4747 arg1
= gimple_call_arg (stmt
, 1);
4749 if (TREE_CODE (arg1
) != REAL_CST
)
4752 c
= TREE_REAL_CST (arg1
);
4754 if (REAL_EXP (&c
) > HOST_BITS_PER_WIDE_INT
)
4757 *exponent
= real_to_integer (&c
);
4758 real_from_integer (&cint
, VOIDmode
, *exponent
, SIGNED
);
4759 if (!real_identical (&c
, &cint
))
4765 *base
= gimple_call_arg (stmt
, 0);
4766 arg1
= gimple_call_arg (stmt
, 1);
4768 if (!tree_fits_shwi_p (arg1
))
4771 *exponent
= tree_to_shwi (arg1
);
4778 /* Expanding negative exponents is generally unproductive, so we don't
4779 complicate matters with those. Exponents of zero and one should
4780 have been handled by expression folding. */
4781 if (*exponent
< 2 || TREE_CODE (*base
) != SSA_NAME
)
4787 /* Try to derive and add operand entry for OP to *OPS. Return false if
4791 try_special_add_to_ops (vec
<operand_entry
*> *ops
,
4792 enum tree_code code
,
4793 tree op
, gimple
* def_stmt
)
4795 tree base
= NULL_TREE
;
4796 HOST_WIDE_INT exponent
= 0;
4798 if (TREE_CODE (op
) != SSA_NAME
4799 || ! has_single_use (op
))
4802 if (code
== MULT_EXPR
4803 && reassoc_insert_powi_p
4804 && flag_unsafe_math_optimizations
4805 && is_gimple_call (def_stmt
)
4806 && acceptable_pow_call (as_a
<gcall
*> (def_stmt
), &base
, &exponent
))
4808 add_repeat_to_ops_vec (ops
, base
, exponent
);
4809 gimple_set_visited (def_stmt
, true);
4812 else if (code
== MULT_EXPR
4813 && is_gimple_assign (def_stmt
)
4814 && gimple_assign_rhs_code (def_stmt
) == NEGATE_EXPR
4815 && !HONOR_SNANS (TREE_TYPE (op
))
4816 && (!HONOR_SIGNED_ZEROS (TREE_TYPE (op
))
4817 || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (op
))))
4819 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4820 tree cst
= build_minus_one_cst (TREE_TYPE (op
));
4821 add_to_ops_vec (ops
, rhs1
);
4822 add_to_ops_vec (ops
, cst
);
4823 gimple_set_visited (def_stmt
, true);
4830 /* Recursively linearize a binary expression that is the RHS of STMT.
4831 Place the operands of the expression tree in the vector named OPS. */
4834 linearize_expr_tree (vec
<operand_entry
*> *ops
, gimple
*stmt
,
4835 bool is_associative
, bool set_visited
)
4837 tree binlhs
= gimple_assign_rhs1 (stmt
);
4838 tree binrhs
= gimple_assign_rhs2 (stmt
);
4839 gimple
*binlhsdef
= NULL
, *binrhsdef
= NULL
;
4840 bool binlhsisreassoc
= false;
4841 bool binrhsisreassoc
= false;
4842 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
4843 struct loop
*loop
= loop_containing_stmt (stmt
);
4846 gimple_set_visited (stmt
, true);
4848 if (TREE_CODE (binlhs
) == SSA_NAME
)
4850 binlhsdef
= SSA_NAME_DEF_STMT (binlhs
);
4851 binlhsisreassoc
= (is_reassociable_op (binlhsdef
, rhscode
, loop
)
4852 && !stmt_could_throw_p (binlhsdef
));
4855 if (TREE_CODE (binrhs
) == SSA_NAME
)
4857 binrhsdef
= SSA_NAME_DEF_STMT (binrhs
);
4858 binrhsisreassoc
= (is_reassociable_op (binrhsdef
, rhscode
, loop
)
4859 && !stmt_could_throw_p (binrhsdef
));
4862 /* If the LHS is not reassociable, but the RHS is, we need to swap
4863 them. If neither is reassociable, there is nothing we can do, so
4864 just put them in the ops vector. If the LHS is reassociable,
4865 linearize it. If both are reassociable, then linearize the RHS
4868 if (!binlhsisreassoc
)
4870 /* If this is not a associative operation like division, give up. */
4871 if (!is_associative
)
4873 add_to_ops_vec (ops
, binrhs
);
4877 if (!binrhsisreassoc
)
4879 if (!try_special_add_to_ops (ops
, rhscode
, binrhs
, binrhsdef
))
4880 add_to_ops_vec (ops
, binrhs
);
4882 if (!try_special_add_to_ops (ops
, rhscode
, binlhs
, binlhsdef
))
4883 add_to_ops_vec (ops
, binlhs
);
4888 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4890 fprintf (dump_file
, "swapping operands of ");
4891 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4894 swap_ssa_operands (stmt
,
4895 gimple_assign_rhs1_ptr (stmt
),
4896 gimple_assign_rhs2_ptr (stmt
));
4899 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4901 fprintf (dump_file
, " is now ");
4902 print_gimple_stmt (dump_file
, stmt
, 0, 0);
4905 /* We want to make it so the lhs is always the reassociative op,
4907 std::swap (binlhs
, binrhs
);
4909 else if (binrhsisreassoc
)
4911 linearize_expr (stmt
);
4912 binlhs
= gimple_assign_rhs1 (stmt
);
4913 binrhs
= gimple_assign_rhs2 (stmt
);
4916 gcc_assert (TREE_CODE (binrhs
) != SSA_NAME
4917 || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
),
4919 linearize_expr_tree (ops
, SSA_NAME_DEF_STMT (binlhs
),
4920 is_associative
, set_visited
);
4922 if (!try_special_add_to_ops (ops
, rhscode
, binrhs
, binrhsdef
))
4923 add_to_ops_vec (ops
, binrhs
);
4926 /* Repropagate the negates back into subtracts, since no other pass
4927 currently does it. */
4930 repropagate_negates (void)
4935 FOR_EACH_VEC_ELT (plus_negates
, i
, negate
)
4937 gimple
*user
= get_single_immediate_use (negate
);
4939 if (!user
|| !is_gimple_assign (user
))
4942 /* The negate operand can be either operand of a PLUS_EXPR
4943 (it can be the LHS if the RHS is a constant for example).
4945 Force the negate operand to the RHS of the PLUS_EXPR, then
4946 transform the PLUS_EXPR into a MINUS_EXPR. */
4947 if (gimple_assign_rhs_code (user
) == PLUS_EXPR
)
4949 /* If the negated operand appears on the LHS of the
4950 PLUS_EXPR, exchange the operands of the PLUS_EXPR
4951 to force the negated operand to the RHS of the PLUS_EXPR. */
4952 if (gimple_assign_rhs1 (user
) == negate
)
4954 swap_ssa_operands (user
,
4955 gimple_assign_rhs1_ptr (user
),
4956 gimple_assign_rhs2_ptr (user
));
4959 /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace
4960 the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */
4961 if (gimple_assign_rhs2 (user
) == negate
)
4963 tree rhs1
= gimple_assign_rhs1 (user
);
4964 tree rhs2
= gimple_assign_rhs1 (SSA_NAME_DEF_STMT (negate
));
4965 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
4966 gimple_assign_set_rhs_with_ops (&gsi
, MINUS_EXPR
, rhs1
, rhs2
);
4970 else if (gimple_assign_rhs_code (user
) == MINUS_EXPR
)
4972 if (gimple_assign_rhs1 (user
) == negate
)
4977 which we transform into
4980 This pushes down the negate which we possibly can merge
4981 into some other operation, hence insert it into the
4982 plus_negates vector. */
4983 gimple
*feed
= SSA_NAME_DEF_STMT (negate
);
4984 tree a
= gimple_assign_rhs1 (feed
);
4985 tree b
= gimple_assign_rhs2 (user
);
4986 gimple_stmt_iterator gsi
= gsi_for_stmt (feed
);
4987 gimple_stmt_iterator gsi2
= gsi_for_stmt (user
);
4988 tree x
= make_ssa_name (TREE_TYPE (gimple_assign_lhs (feed
)));
4989 gimple
*g
= gimple_build_assign (x
, PLUS_EXPR
, a
, b
);
4990 gsi_insert_before (&gsi2
, g
, GSI_SAME_STMT
);
4991 gimple_assign_set_rhs_with_ops (&gsi2
, NEGATE_EXPR
, x
);
4992 user
= gsi_stmt (gsi2
);
4994 reassoc_remove_stmt (&gsi
);
4995 release_defs (feed
);
4996 plus_negates
.safe_push (gimple_assign_lhs (user
));
5000 /* Transform "x = -a; y = b - x" into "y = b + a", getting
5001 rid of one operation. */
5002 gimple
*feed
= SSA_NAME_DEF_STMT (negate
);
5003 tree a
= gimple_assign_rhs1 (feed
);
5004 tree rhs1
= gimple_assign_rhs1 (user
);
5005 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
5006 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, rhs1
, a
);
5007 update_stmt (gsi_stmt (gsi
));
5013 /* Returns true if OP is of a type for which we can do reassociation.
5014 That is for integral or non-saturating fixed-point types, and for
5015 floating point type when associative-math is enabled. */
5018 can_reassociate_p (tree op
)
5020 tree type
= TREE_TYPE (op
);
5021 if (TREE_CODE (op
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
5023 if ((ANY_INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_WRAPS (type
))
5024 || NON_SAT_FIXED_POINT_TYPE_P (type
)
5025 || (flag_associative_math
&& FLOAT_TYPE_P (type
)))
5030 /* Break up subtract operations in block BB.
5032 We do this top down because we don't know whether the subtract is
5033 part of a possible chain of reassociation except at the top.
5042 we want to break up k = t - q, but we won't until we've transformed q
5043 = b - r, which won't be broken up until we transform b = c - d.
5045 En passant, clear the GIMPLE visited flag on every statement
5046 and set UIDs within each basic block. */
5049 break_up_subtract_bb (basic_block bb
)
5051 gimple_stmt_iterator gsi
;
5053 unsigned int uid
= 1;
5055 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5057 gimple
*stmt
= gsi_stmt (gsi
);
5058 gimple_set_visited (stmt
, false);
5059 gimple_set_uid (stmt
, uid
++);
5061 if (!is_gimple_assign (stmt
)
5062 || !can_reassociate_p (gimple_assign_lhs (stmt
)))
5065 /* Look for simple gimple subtract operations. */
5066 if (gimple_assign_rhs_code (stmt
) == MINUS_EXPR
)
5068 if (!can_reassociate_p (gimple_assign_rhs1 (stmt
))
5069 || !can_reassociate_p (gimple_assign_rhs2 (stmt
)))
5072 /* Check for a subtract used only in an addition. If this
5073 is the case, transform it into add of a negate for better
5074 reassociation. IE transform C = A-B into C = A + -B if C
5075 is only used in an addition. */
5076 if (should_break_up_subtract (stmt
))
5077 break_up_subtract (stmt
, &gsi
);
5079 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
5080 && can_reassociate_p (gimple_assign_rhs1 (stmt
)))
5081 plus_negates
.safe_push (gimple_assign_lhs (stmt
));
5083 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5085 son
= next_dom_son (CDI_DOMINATORS
, son
))
5086 break_up_subtract_bb (son
);
5089 /* Used for repeated factor analysis. */
5090 struct repeat_factor
5092 /* An SSA name that occurs in a multiply chain. */
5095 /* Cached rank of the factor. */
5098 /* Number of occurrences of the factor in the chain. */
5099 HOST_WIDE_INT count
;
5101 /* An SSA name representing the product of this factor and
5102 all factors appearing later in the repeated factor vector. */
5107 static vec
<repeat_factor
> repeat_factor_vec
;
5109 /* Used for sorting the repeat factor vector. Sort primarily by
5110 ascending occurrence count, secondarily by descending rank. */
5113 compare_repeat_factors (const void *x1
, const void *x2
)
5115 const repeat_factor
*rf1
= (const repeat_factor
*) x1
;
5116 const repeat_factor
*rf2
= (const repeat_factor
*) x2
;
5118 if (rf1
->count
!= rf2
->count
)
5119 return rf1
->count
- rf2
->count
;
5121 return rf2
->rank
- rf1
->rank
;
5124 /* Look for repeated operands in OPS in the multiply tree rooted at
5125 STMT. Replace them with an optimal sequence of multiplies and powi
5126 builtin calls, and remove the used operands from OPS. Return an
5127 SSA name representing the value of the replacement sequence. */
5130 attempt_builtin_powi (gimple
*stmt
, vec
<operand_entry
*> *ops
)
5132 unsigned i
, j
, vec_len
;
5135 repeat_factor
*rf1
, *rf2
;
5136 repeat_factor rfnew
;
5137 tree result
= NULL_TREE
;
5138 tree target_ssa
, iter_result
;
5139 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
5140 tree powi_fndecl
= mathfn_built_in (type
, BUILT_IN_POWI
);
5141 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
5142 gimple
*mul_stmt
, *pow_stmt
;
5144 /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and
5149 /* Allocate the repeated factor vector. */
5150 repeat_factor_vec
.create (10);
5152 /* Scan the OPS vector for all SSA names in the product and build
5153 up a vector of occurrence counts for each factor. */
5154 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
5156 if (TREE_CODE (oe
->op
) == SSA_NAME
)
5158 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
5160 if (rf1
->factor
== oe
->op
)
5162 rf1
->count
+= oe
->count
;
5167 if (j
>= repeat_factor_vec
.length ())
5169 rfnew
.factor
= oe
->op
;
5170 rfnew
.rank
= oe
->rank
;
5171 rfnew
.count
= oe
->count
;
5172 rfnew
.repr
= NULL_TREE
;
5173 repeat_factor_vec
.safe_push (rfnew
);
5178 /* Sort the repeated factor vector by (a) increasing occurrence count,
5179 and (b) decreasing rank. */
5180 repeat_factor_vec
.qsort (compare_repeat_factors
);
5182 /* It is generally best to combine as many base factors as possible
5183 into a product before applying __builtin_powi to the result.
5184 However, the sort order chosen for the repeated factor vector
5185 allows us to cache partial results for the product of the base
5186 factors for subsequent use. When we already have a cached partial
5187 result from a previous iteration, it is best to make use of it
5188 before looking for another __builtin_pow opportunity.
5190 As an example, consider x * x * y * y * y * z * z * z * z.
5191 We want to first compose the product x * y * z, raise it to the
5192 second power, then multiply this by y * z, and finally multiply
5193 by z. This can be done in 5 multiplies provided we cache y * z
5194 for use in both expressions:
5202 If we instead ignored the cached y * z and first multiplied by
5203 the __builtin_pow opportunity z * z, we would get the inferior:
5212 vec_len
= repeat_factor_vec
.length ();
5214 /* Repeatedly look for opportunities to create a builtin_powi call. */
5217 HOST_WIDE_INT power
;
5219 /* First look for the largest cached product of factors from
5220 preceding iterations. If found, create a builtin_powi for
5221 it if the minimum occurrence count for its factors is at
5222 least 2, or just use this cached product as our next
5223 multiplicand if the minimum occurrence count is 1. */
5224 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
5226 if (rf1
->repr
&& rf1
->count
> 0)
5236 iter_result
= rf1
->repr
;
5238 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5242 fputs ("Multiplying by cached product ", dump_file
);
5243 for (elt
= j
; elt
< vec_len
; elt
++)
5245 rf
= &repeat_factor_vec
[elt
];
5246 print_generic_expr (dump_file
, rf
->factor
, 0);
5247 if (elt
< vec_len
- 1)
5248 fputs (" * ", dump_file
);
5250 fputs ("\n", dump_file
);
5255 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5256 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
5257 build_int_cst (integer_type_node
,
5259 gimple_call_set_lhs (pow_stmt
, iter_result
);
5260 gimple_set_location (pow_stmt
, gimple_location (stmt
));
5261 gimple_set_uid (pow_stmt
, gimple_uid (stmt
));
5262 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
5264 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5268 fputs ("Building __builtin_pow call for cached product (",
5270 for (elt
= j
; elt
< vec_len
; elt
++)
5272 rf
= &repeat_factor_vec
[elt
];
5273 print_generic_expr (dump_file
, rf
->factor
, 0);
5274 if (elt
< vec_len
- 1)
5275 fputs (" * ", dump_file
);
5277 fprintf (dump_file
, ")^" HOST_WIDE_INT_PRINT_DEC
"\n",
5284 /* Otherwise, find the first factor in the repeated factor
5285 vector whose occurrence count is at least 2. If no such
5286 factor exists, there are no builtin_powi opportunities
5288 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
5290 if (rf1
->count
>= 2)
5299 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5303 fputs ("Building __builtin_pow call for (", dump_file
);
5304 for (elt
= j
; elt
< vec_len
; elt
++)
5306 rf
= &repeat_factor_vec
[elt
];
5307 print_generic_expr (dump_file
, rf
->factor
, 0);
5308 if (elt
< vec_len
- 1)
5309 fputs (" * ", dump_file
);
5311 fprintf (dump_file
, ")^" HOST_WIDE_INT_PRINT_DEC
"\n", power
);
5314 reassociate_stats
.pows_created
++;
5316 /* Visit each element of the vector in reverse order (so that
5317 high-occurrence elements are visited first, and within the
5318 same occurrence count, lower-ranked elements are visited
5319 first). Form a linear product of all elements in this order
5320 whose occurrencce count is at least that of element J.
5321 Record the SSA name representing the product of each element
5322 with all subsequent elements in the vector. */
5323 if (j
== vec_len
- 1)
5324 rf1
->repr
= rf1
->factor
;
5327 for (ii
= vec_len
- 2; ii
>= (int)j
; ii
--)
5331 rf1
= &repeat_factor_vec
[ii
];
5332 rf2
= &repeat_factor_vec
[ii
+ 1];
5334 /* Init the last factor's representative to be itself. */
5336 rf2
->repr
= rf2
->factor
;
5341 target_ssa
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5342 mul_stmt
= gimple_build_assign (target_ssa
, MULT_EXPR
,
5344 gimple_set_location (mul_stmt
, gimple_location (stmt
));
5345 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
5346 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
5347 rf1
->repr
= target_ssa
;
5349 /* Don't reprocess the multiply we just introduced. */
5350 gimple_set_visited (mul_stmt
, true);
5354 /* Form a call to __builtin_powi for the maximum product
5355 just formed, raised to the power obtained earlier. */
5356 rf1
= &repeat_factor_vec
[j
];
5357 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5358 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
5359 build_int_cst (integer_type_node
,
5361 gimple_call_set_lhs (pow_stmt
, iter_result
);
5362 gimple_set_location (pow_stmt
, gimple_location (stmt
));
5363 gimple_set_uid (pow_stmt
, gimple_uid (stmt
));
5364 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
5367 /* If we previously formed at least one other builtin_powi call,
5368 form the product of this one and those others. */
5371 tree new_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5372 mul_stmt
= gimple_build_assign (new_result
, MULT_EXPR
,
5373 result
, iter_result
);
5374 gimple_set_location (mul_stmt
, gimple_location (stmt
));
5375 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
5376 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
5377 gimple_set_visited (mul_stmt
, true);
5378 result
= new_result
;
5381 result
= iter_result
;
5383 /* Decrement the occurrence count of each element in the product
5384 by the count found above, and remove this many copies of each
5386 for (i
= j
; i
< vec_len
; i
++)
5391 rf1
= &repeat_factor_vec
[i
];
5392 rf1
->count
-= power
;
5394 FOR_EACH_VEC_ELT_REVERSE (*ops
, n
, oe
)
5396 if (oe
->op
== rf1
->factor
)
5400 ops
->ordered_remove (n
);
5416 /* At this point all elements in the repeated factor vector have a
5417 remaining occurrence count of 0 or 1, and those with a count of 1
5418 don't have cached representatives. Re-sort the ops vector and
5420 ops
->qsort (sort_by_operand_rank
);
5421 repeat_factor_vec
.release ();
5423 /* Return the final product computed herein. Note that there may
5424 still be some elements with single occurrence count left in OPS;
5425 those will be handled by the normal reassociation logic. */
5429 /* Attempt to optimize
5430 CST1 * copysign (CST2, y) -> copysign (CST1 * CST2, y) if CST1 > 0, or
5431 CST1 * copysign (CST2, y) -> -copysign (CST1 * CST2, y) if CST1 < 0. */
5434 attempt_builtin_copysign (vec
<operand_entry
*> *ops
)
5438 unsigned int length
= ops
->length ();
5439 tree cst
= ops
->last ()->op
;
5441 if (length
== 1 || TREE_CODE (cst
) != REAL_CST
)
5444 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
5446 if (TREE_CODE (oe
->op
) == SSA_NAME
5447 && has_single_use (oe
->op
))
5449 gimple
*def_stmt
= SSA_NAME_DEF_STMT (oe
->op
);
5450 if (gcall
*old_call
= dyn_cast
<gcall
*> (def_stmt
))
5453 switch (gimple_call_combined_fn (old_call
))
5456 arg0
= gimple_call_arg (old_call
, 0);
5457 arg1
= gimple_call_arg (old_call
, 1);
5458 /* The first argument of copysign must be a constant,
5459 otherwise there's nothing to do. */
5460 if (TREE_CODE (arg0
) == REAL_CST
)
5462 tree type
= TREE_TYPE (arg0
);
5463 tree mul
= const_binop (MULT_EXPR
, type
, cst
, arg0
);
5464 /* If we couldn't fold to a single constant, skip it.
5465 That happens e.g. for inexact multiplication when
5467 if (mul
== NULL_TREE
)
5469 /* Instead of adjusting OLD_CALL, let's build a new
5470 call to not leak the LHS and prevent keeping bogus
5471 debug statements. DCE will clean up the old call. */
5473 if (gimple_call_internal_p (old_call
))
5474 new_call
= gimple_build_call_internal
5475 (IFN_COPYSIGN
, 2, mul
, arg1
);
5477 new_call
= gimple_build_call
5478 (gimple_call_fndecl (old_call
), 2, mul
, arg1
);
5479 tree lhs
= make_ssa_name (type
);
5480 gimple_call_set_lhs (new_call
, lhs
);
5481 gimple_set_location (new_call
,
5482 gimple_location (old_call
));
5483 insert_stmt_after (new_call
, old_call
);
5484 /* We've used the constant, get rid of it. */
5486 bool cst1_neg
= real_isneg (TREE_REAL_CST_PTR (cst
));
5487 /* Handle the CST1 < 0 case by negating the result. */
5490 tree negrhs
= make_ssa_name (TREE_TYPE (lhs
));
5492 = gimple_build_assign (negrhs
, NEGATE_EXPR
, lhs
);
5493 insert_stmt_after (negate_stmt
, new_call
);
5498 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5500 fprintf (dump_file
, "Optimizing copysign: ");
5501 print_generic_expr (dump_file
, cst
, 0);
5502 fprintf (dump_file
, " * COPYSIGN (");
5503 print_generic_expr (dump_file
, arg0
, 0);
5504 fprintf (dump_file
, ", ");
5505 print_generic_expr (dump_file
, arg1
, 0);
5506 fprintf (dump_file
, ") into %sCOPYSIGN (",
5507 cst1_neg
? "-" : "");
5508 print_generic_expr (dump_file
, mul
, 0);
5509 fprintf (dump_file
, ", ");
5510 print_generic_expr (dump_file
, arg1
, 0);
5511 fprintf (dump_file
, "\n");
5524 /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */
5527 transform_stmt_to_copy (gimple_stmt_iterator
*gsi
, gimple
*stmt
, tree new_rhs
)
5531 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5533 fprintf (dump_file
, "Transforming ");
5534 print_gimple_stmt (dump_file
, stmt
, 0, 0);
5537 rhs1
= gimple_assign_rhs1 (stmt
);
5538 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
5540 remove_visited_stmt_chain (rhs1
);
5542 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5544 fprintf (dump_file
, " into ");
5545 print_gimple_stmt (dump_file
, stmt
, 0, 0);
5549 /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */
5552 transform_stmt_to_multiply (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
5553 tree rhs1
, tree rhs2
)
5555 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5557 fprintf (dump_file
, "Transforming ");
5558 print_gimple_stmt (dump_file
, stmt
, 0, 0);
5561 gimple_assign_set_rhs_with_ops (gsi
, MULT_EXPR
, rhs1
, rhs2
);
5562 update_stmt (gsi_stmt (*gsi
));
5563 remove_visited_stmt_chain (rhs1
);
5565 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5567 fprintf (dump_file
, " into ");
5568 print_gimple_stmt (dump_file
, stmt
, 0, 0);
5572 /* Reassociate expressions in basic block BB and its post-dominator as
5575 Bubble up return status from maybe_optimize_range_tests. */
5578 reassociate_bb (basic_block bb
)
5580 gimple_stmt_iterator gsi
;
5582 gimple
*stmt
= last_stmt (bb
);
5583 bool cfg_cleanup_needed
= false;
5585 if (stmt
&& !gimple_visited_p (stmt
))
5586 cfg_cleanup_needed
|= maybe_optimize_range_tests (stmt
);
5588 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
5590 stmt
= gsi_stmt (gsi
);
5592 if (is_gimple_assign (stmt
)
5593 && !stmt_could_throw_p (stmt
))
5595 tree lhs
, rhs1
, rhs2
;
5596 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
5598 /* If this is not a gimple binary expression, there is
5599 nothing for us to do with it. */
5600 if (get_gimple_rhs_class (rhs_code
) != GIMPLE_BINARY_RHS
)
5603 /* If this was part of an already processed statement,
5604 we don't need to touch it again. */
5605 if (gimple_visited_p (stmt
))
5607 /* This statement might have become dead because of previous
5609 if (has_zero_uses (gimple_get_lhs (stmt
)))
5611 reassoc_remove_stmt (&gsi
);
5612 release_defs (stmt
);
5613 /* We might end up removing the last stmt above which
5614 places the iterator to the end of the sequence.
5615 Reset it to the last stmt in this case which might
5616 be the end of the sequence as well if we removed
5617 the last statement of the sequence. In which case
5618 we need to bail out. */
5619 if (gsi_end_p (gsi
))
5621 gsi
= gsi_last_bb (bb
);
5622 if (gsi_end_p (gsi
))
5629 lhs
= gimple_assign_lhs (stmt
);
5630 rhs1
= gimple_assign_rhs1 (stmt
);
5631 rhs2
= gimple_assign_rhs2 (stmt
);
5633 /* For non-bit or min/max operations we can't associate
5634 all types. Verify that here. */
5635 if (rhs_code
!= BIT_IOR_EXPR
5636 && rhs_code
!= BIT_AND_EXPR
5637 && rhs_code
!= BIT_XOR_EXPR
5638 && rhs_code
!= MIN_EXPR
5639 && rhs_code
!= MAX_EXPR
5640 && (!can_reassociate_p (lhs
)
5641 || !can_reassociate_p (rhs1
)
5642 || !can_reassociate_p (rhs2
)))
5645 if (associative_tree_code (rhs_code
))
5647 auto_vec
<operand_entry
*> ops
;
5648 tree powi_result
= NULL_TREE
;
5649 bool is_vector
= VECTOR_TYPE_P (TREE_TYPE (lhs
));
5651 /* There may be no immediate uses left by the time we
5652 get here because we may have eliminated them all. */
5653 if (TREE_CODE (lhs
) == SSA_NAME
&& has_zero_uses (lhs
))
5656 gimple_set_visited (stmt
, true);
5657 linearize_expr_tree (&ops
, stmt
, true, true);
5658 ops
.qsort (sort_by_operand_rank
);
5659 int orig_len
= ops
.length ();
5660 optimize_ops_list (rhs_code
, &ops
);
5661 if (undistribute_ops_list (rhs_code
, &ops
,
5662 loop_containing_stmt (stmt
)))
5664 ops
.qsort (sort_by_operand_rank
);
5665 optimize_ops_list (rhs_code
, &ops
);
5668 if (rhs_code
== PLUS_EXPR
5669 && transform_add_to_multiply (&ops
))
5670 ops
.qsort (sort_by_operand_rank
);
5672 if (rhs_code
== BIT_IOR_EXPR
|| rhs_code
== BIT_AND_EXPR
)
5675 optimize_vec_cond_expr (rhs_code
, &ops
);
5677 optimize_range_tests (rhs_code
, &ops
);
5680 if (rhs_code
== MULT_EXPR
&& !is_vector
)
5682 attempt_builtin_copysign (&ops
);
5684 if (reassoc_insert_powi_p
5685 && flag_unsafe_math_optimizations
)
5686 powi_result
= attempt_builtin_powi (stmt
, &ops
);
5689 operand_entry
*last
;
5690 bool negate_result
= false;
5691 if (ops
.length () > 1
5692 && rhs_code
== MULT_EXPR
)
5695 if ((integer_minus_onep (last
->op
)
5696 || real_minus_onep (last
->op
))
5697 && !HONOR_SNANS (TREE_TYPE (lhs
))
5698 && (!HONOR_SIGNED_ZEROS (TREE_TYPE (lhs
))
5699 || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (lhs
))))
5702 negate_result
= true;
5707 /* If the operand vector is now empty, all operands were
5708 consumed by the __builtin_powi optimization. */
5709 if (ops
.length () == 0)
5710 transform_stmt_to_copy (&gsi
, stmt
, powi_result
);
5711 else if (ops
.length () == 1)
5713 tree last_op
= ops
.last ()->op
;
5715 /* If the stmt that defines operand has to be inserted, insert it
5717 if (ops
.last ()->stmt_to_insert
)
5718 insert_stmt_before_use (stmt
, ops
.last ()->stmt_to_insert
);
5720 transform_stmt_to_multiply (&gsi
, stmt
, last_op
,
5723 transform_stmt_to_copy (&gsi
, stmt
, last_op
);
5727 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
5728 int ops_num
= ops
.length ();
5729 int width
= get_reassociation_width (ops_num
, rhs_code
, mode
);
5731 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5733 "Width = %d was chosen for reassociation\n", width
);
5736 && ops
.length () > 3)
5737 rewrite_expr_tree_parallel (as_a
<gassign
*> (stmt
),
5741 /* When there are three operands left, we want
5742 to make sure the ones that get the double
5743 binary op are chosen wisely. */
5744 int len
= ops
.length ();
5746 swap_ops_for_binary_stmt (ops
, len
- 3, stmt
);
5748 new_lhs
= rewrite_expr_tree (stmt
, 0, ops
,
5754 /* If we combined some repeated factors into a
5755 __builtin_powi call, multiply that result by the
5756 reassociated operands. */
5759 gimple
*mul_stmt
, *lhs_stmt
= SSA_NAME_DEF_STMT (lhs
);
5760 tree type
= TREE_TYPE (lhs
);
5761 tree target_ssa
= make_temp_ssa_name (type
, NULL
,
5763 gimple_set_lhs (lhs_stmt
, target_ssa
);
5764 update_stmt (lhs_stmt
);
5767 target_ssa
= new_lhs
;
5770 mul_stmt
= gimple_build_assign (lhs
, MULT_EXPR
,
5771 powi_result
, target_ssa
);
5772 gimple_set_location (mul_stmt
, gimple_location (stmt
));
5773 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
5774 gsi_insert_after (&gsi
, mul_stmt
, GSI_NEW_STMT
);
5780 stmt
= SSA_NAME_DEF_STMT (lhs
);
5781 tree tmp
= make_ssa_name (TREE_TYPE (lhs
));
5782 gimple_set_lhs (stmt
, tmp
);
5785 gassign
*neg_stmt
= gimple_build_assign (lhs
, NEGATE_EXPR
,
5787 gimple_set_uid (neg_stmt
, gimple_uid (stmt
));
5788 gsi_insert_after (&gsi
, neg_stmt
, GSI_NEW_STMT
);
5794 for (son
= first_dom_son (CDI_POST_DOMINATORS
, bb
);
5796 son
= next_dom_son (CDI_POST_DOMINATORS
, son
))
5797 cfg_cleanup_needed
|= reassociate_bb (son
);
5799 return cfg_cleanup_needed
;
5802 /* Add jumps around shifts for range tests turned into bit tests.
5803 For each SSA_NAME VAR we have code like:
5804 VAR = ...; // final stmt of range comparison
5805 // bit test here...;
5806 OTHERVAR = ...; // final stmt of the bit test sequence
5807 RES = VAR | OTHERVAR;
5808 Turn the above into:
5815 // bit test here...;
5818 # RES = PHI<1(l1), OTHERVAR(l2)>; */
5826 FOR_EACH_VEC_ELT (reassoc_branch_fixups
, i
, var
)
5828 gimple
*def_stmt
= SSA_NAME_DEF_STMT (var
);
5831 bool ok
= single_imm_use (var
, &use
, &use_stmt
);
5833 && is_gimple_assign (use_stmt
)
5834 && gimple_assign_rhs_code (use_stmt
) == BIT_IOR_EXPR
5835 && gimple_bb (def_stmt
) == gimple_bb (use_stmt
));
5837 basic_block cond_bb
= gimple_bb (def_stmt
);
5838 basic_block then_bb
= split_block (cond_bb
, def_stmt
)->dest
;
5839 basic_block merge_bb
= split_block (then_bb
, use_stmt
)->dest
;
5841 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5842 gimple
*g
= gimple_build_cond (NE_EXPR
, var
,
5843 build_zero_cst (TREE_TYPE (var
)),
5844 NULL_TREE
, NULL_TREE
);
5845 location_t loc
= gimple_location (use_stmt
);
5846 gimple_set_location (g
, loc
);
5847 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
5849 edge etrue
= make_edge (cond_bb
, merge_bb
, EDGE_TRUE_VALUE
);
5850 etrue
->probability
= REG_BR_PROB_BASE
/ 2;
5851 etrue
->count
= cond_bb
->count
/ 2;
5852 edge efalse
= find_edge (cond_bb
, then_bb
);
5853 efalse
->flags
= EDGE_FALSE_VALUE
;
5854 efalse
->probability
-= etrue
->probability
;
5855 efalse
->count
-= etrue
->count
;
5856 then_bb
->count
-= etrue
->count
;
5858 tree othervar
= NULL_TREE
;
5859 if (gimple_assign_rhs1 (use_stmt
) == var
)
5860 othervar
= gimple_assign_rhs2 (use_stmt
);
5861 else if (gimple_assign_rhs2 (use_stmt
) == var
)
5862 othervar
= gimple_assign_rhs1 (use_stmt
);
5865 tree lhs
= gimple_assign_lhs (use_stmt
);
5866 gphi
*phi
= create_phi_node (lhs
, merge_bb
);
5867 add_phi_arg (phi
, build_one_cst (TREE_TYPE (lhs
)), etrue
, loc
);
5868 add_phi_arg (phi
, othervar
, single_succ_edge (then_bb
), loc
);
5869 gsi
= gsi_for_stmt (use_stmt
);
5870 gsi_remove (&gsi
, true);
5872 set_immediate_dominator (CDI_DOMINATORS
, merge_bb
, cond_bb
);
5873 set_immediate_dominator (CDI_POST_DOMINATORS
, cond_bb
, merge_bb
);
5875 reassoc_branch_fixups
.release ();
5878 void dump_ops_vector (FILE *file
, vec
<operand_entry
*> ops
);
5879 void debug_ops_vector (vec
<operand_entry
*> ops
);
5881 /* Dump the operand entry vector OPS to FILE. */
5884 dump_ops_vector (FILE *file
, vec
<operand_entry
*> ops
)
5889 FOR_EACH_VEC_ELT (ops
, i
, oe
)
5891 fprintf (file
, "Op %d -> rank: %d, tree: ", i
, oe
->rank
);
5892 print_generic_expr (file
, oe
->op
, 0);
5893 fprintf (file
, "\n");
5897 /* Dump the operand entry vector OPS to STDERR. */
5900 debug_ops_vector (vec
<operand_entry
*> ops
)
5902 dump_ops_vector (stderr
, ops
);
5905 /* Bubble up return status from reassociate_bb. */
5910 break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
5911 return reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun
));
5914 /* Initialize the reassociation pass. */
5921 int *bbs
= XNEWVEC (int, n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
);
5923 /* Find the loops, so that we can prevent moving calculations in
5925 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
5927 memset (&reassociate_stats
, 0, sizeof (reassociate_stats
));
5929 next_operand_entry_id
= 0;
5931 /* Reverse RPO (Reverse Post Order) will give us something where
5932 deeper loops come later. */
5933 pre_and_rev_post_order_compute (NULL
, bbs
, false);
5934 bb_rank
= XCNEWVEC (long, last_basic_block_for_fn (cfun
));
5935 operand_rank
= new hash_map
<tree
, long>;
5937 /* Give each default definition a distinct rank. This includes
5938 parameters and the static chain. Walk backwards over all
5939 SSA names so that we get proper rank ordering according
5940 to tree_swap_operands_p. */
5941 for (i
= num_ssa_names
- 1; i
> 0; --i
)
5943 tree name
= ssa_name (i
);
5944 if (name
&& SSA_NAME_IS_DEFAULT_DEF (name
))
5945 insert_operand_rank (name
, ++rank
);
5948 /* Set up rank for each BB */
5949 for (i
= 0; i
< n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
; i
++)
5950 bb_rank
[bbs
[i
]] = ++rank
<< 16;
5953 calculate_dominance_info (CDI_POST_DOMINATORS
);
5954 plus_negates
= vNULL
;
5957 /* Cleanup after the reassociation pass, and print stats if
5963 statistics_counter_event (cfun
, "Linearized",
5964 reassociate_stats
.linearized
);
5965 statistics_counter_event (cfun
, "Constants eliminated",
5966 reassociate_stats
.constants_eliminated
);
5967 statistics_counter_event (cfun
, "Ops eliminated",
5968 reassociate_stats
.ops_eliminated
);
5969 statistics_counter_event (cfun
, "Statements rewritten",
5970 reassociate_stats
.rewritten
);
5971 statistics_counter_event (cfun
, "Built-in pow[i] calls encountered",
5972 reassociate_stats
.pows_encountered
);
5973 statistics_counter_event (cfun
, "Built-in powi calls created",
5974 reassociate_stats
.pows_created
);
5976 delete operand_rank
;
5977 operand_entry_pool
.release ();
5979 plus_negates
.release ();
5980 free_dominance_info (CDI_POST_DOMINATORS
);
5981 loop_optimizer_finalize ();
5984 /* Gate and execute functions for Reassociation. If INSERT_POWI_P, enable
5985 insertion of __builtin_powi calls.
5987 Returns TODO_cfg_cleanup if a CFG cleanup pass is desired due to
5988 optimization of a gimple conditional. Otherwise returns zero. */
5991 execute_reassoc (bool insert_powi_p
)
5993 reassoc_insert_powi_p
= insert_powi_p
;
5997 bool cfg_cleanup_needed
;
5998 cfg_cleanup_needed
= do_reassoc ();
5999 repropagate_negates ();
6003 return cfg_cleanup_needed
? TODO_cleanup_cfg
: 0;
6008 const pass_data pass_data_reassoc
=
6010 GIMPLE_PASS
, /* type */
6011 "reassoc", /* name */
6012 OPTGROUP_NONE
, /* optinfo_flags */
6013 TV_TREE_REASSOC
, /* tv_id */
6014 ( PROP_cfg
| PROP_ssa
), /* properties_required */
6015 0, /* properties_provided */
6016 0, /* properties_destroyed */
6017 0, /* todo_flags_start */
6018 TODO_update_ssa_only_virtuals
, /* todo_flags_finish */
6021 class pass_reassoc
: public gimple_opt_pass
6024 pass_reassoc (gcc::context
*ctxt
)
6025 : gimple_opt_pass (pass_data_reassoc
, ctxt
), insert_powi_p (false)
6028 /* opt_pass methods: */
6029 opt_pass
* clone () { return new pass_reassoc (m_ctxt
); }
6030 void set_pass_param (unsigned int n
, bool param
)
6032 gcc_assert (n
== 0);
6033 insert_powi_p
= param
;
6035 virtual bool gate (function
*) { return flag_tree_reassoc
!= 0; }
6036 virtual unsigned int execute (function
*)
6037 { return execute_reassoc (insert_powi_p
); }
6040 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
6041 point 3a in the pass header comment. */
6043 }; // class pass_reassoc
6048 make_pass_reassoc (gcc::context
*ctxt
)
6050 return new pass_reassoc (ctxt
);