1 /* Reassociation for trees.
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "alloc-pool.h"
31 #include "tree-pass.h"
35 #include "optabs-tree.h"
36 #include "gimple-pretty-print.h"
37 #include "diagnostic-core.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
41 #include "gimple-fold.h"
43 #include "gimple-iterator.h"
44 #include "gimplify-me.h"
46 #include "tree-ssa-loop.h"
49 #include "langhooks.h"
54 #include "case-cfn-macros.h"
56 /* This is a simple global reassociation pass. It is, in part, based
57 on the LLVM pass of the same name (They do some things more/less
58 than we do, in different orders, etc).
60 It consists of five steps:
62 1. Breaking up subtract operations into addition + negate, where
63 it would promote the reassociation of adds.
65 2. Left linearization of the expression trees, so that (A+B)+(C+D)
66 becomes (((A+B)+C)+D), which is easier for us to rewrite later.
67 During linearization, we place the operands of the binary
68 expressions into a vector of operand_entry_*
70 3. Optimization of the operand lists, eliminating things like a +
73 3a. Combine repeated factors with the same occurrence counts
74 into a __builtin_powi call that will later be optimized into
75 an optimal number of multiplies.
77 4. Rewrite the expression trees we linearized and optimized so
78 they are in proper rank order.
80 5. Repropagate negates, as nothing else will clean it up ATM.
82 A bit of theory on #4, since nobody seems to write anything down
83 about why it makes sense to do it the way they do it:
85 We could do this much nicer theoretically, but don't (for reasons
86 explained after how to do it theoretically nice :P).
88 In order to promote the most redundancy elimination, you want
89 binary expressions whose operands are the same rank (or
90 preferably, the same value) exposed to the redundancy eliminator,
91 for possible elimination.
93 So the way to do this if we really cared, is to build the new op
94 tree from the leaves to the roots, merging as you go, and putting the
95 new op on the end of the worklist, until you are left with one
96 thing on the worklist.
98 IE if you have to rewrite the following set of operands (listed with
99 rank in parentheses), with opcode PLUS_EXPR:
101 a (1), b (1), c (1), d (2), e (2)
104 We start with our merge worklist empty, and the ops list with all of
107 You want to first merge all leaves of the same rank, as much as
110 So first build a binary op of
112 mergetmp = a + b, and put "mergetmp" on the merge worklist.
114 Because there is no three operand form of PLUS_EXPR, c is not going to
115 be exposed to redundancy elimination as a rank 1 operand.
117 So you might as well throw it on the merge worklist (you could also
118 consider it to now be a rank two operand, and merge it with d and e,
119 but in this case, you then have evicted e from a binary op. So at
120 least in this situation, you can't win.)
122 Then build a binary op of d + e
125 and put mergetmp2 on the merge worklist.
127 so merge worklist = {mergetmp, c, mergetmp2}
129 Continue building binary ops of these operations until you have only
130 one operation left on the worklist.
135 mergetmp3 = mergetmp + c
137 worklist = {mergetmp2, mergetmp3}
139 mergetmp4 = mergetmp2 + mergetmp3
141 worklist = {mergetmp4}
143 because we have one operation left, we can now just set the original
144 statement equal to the result of that operation.
146 This will at least expose a + b and d + e to redundancy elimination
147 as binary operations.
149 For extra points, you can reuse the old statements to build the
150 mergetmps, since you shouldn't run out.
152 So why don't we do this?
154 Because it's expensive, and rarely will help. Most trees we are
155 reassociating have 3 or less ops. If they have 2 ops, they already
156 will be written into a nice single binary op. If you have 3 ops, a
157 single simple check suffices to tell you whether the first two are of the
158 same rank. If so, you know to order it
161 newstmt = mergetmp + op3
165 newstmt = mergetmp + op1
167 If all three are of the same rank, you can't expose them all in a
168 single binary operator anyway, so the above is *still* the best you
171 Thus, this is what we do. When we have three ops left, we check to see
172 what order to put them in, and call it a day. As a nod to vector sum
173 reduction, we check if any of the ops are really a phi node that is a
174 destructive update for the associating op, and keep the destructive
175 update together for vector sum reduction recognition. */
177 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
178 point 3a in the pass header comment. */
179 static bool reassoc_insert_powi_p
;
185 int constants_eliminated
;
188 int pows_encountered
;
192 /* Operator, rank pair. */
199 gimple
*stmt_to_insert
;
202 static object_allocator
<operand_entry
> operand_entry_pool
203 ("operand entry pool");
205 /* This is used to assign a unique ID to each struct operand_entry
206 so that qsort results are identical on different hosts. */
207 static unsigned int next_operand_entry_id
;
209 /* Starting rank number for a given basic block, so that we can rank
210 operations using unmovable instructions in that BB based on the bb
212 static long *bb_rank
;
214 /* Operand->rank hashtable. */
215 static hash_map
<tree
, long> *operand_rank
;
217 /* Vector of SSA_NAMEs on which after reassociate_bb is done with
218 all basic blocks the CFG should be adjusted - basic blocks
219 split right after that SSA_NAME's definition statement and before
220 the only use, which must be a bit ior. */
221 static vec
<tree
> reassoc_branch_fixups
;
224 static long get_rank (tree
);
225 static bool reassoc_stmt_dominates_stmt_p (gimple
*, gimple
*);
227 /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts
228 possibly added by gsi_remove. */
231 reassoc_remove_stmt (gimple_stmt_iterator
*gsi
)
233 gimple
*stmt
= gsi_stmt (*gsi
);
235 if (!MAY_HAVE_DEBUG_STMTS
|| gimple_code (stmt
) == GIMPLE_PHI
)
236 return gsi_remove (gsi
, true);
238 gimple_stmt_iterator prev
= *gsi
;
240 unsigned uid
= gimple_uid (stmt
);
241 basic_block bb
= gimple_bb (stmt
);
242 bool ret
= gsi_remove (gsi
, true);
243 if (!gsi_end_p (prev
))
246 prev
= gsi_start_bb (bb
);
247 gimple
*end_stmt
= gsi_stmt (*gsi
);
248 while ((stmt
= gsi_stmt (prev
)) != end_stmt
)
250 gcc_assert (stmt
&& is_gimple_debug (stmt
) && gimple_uid (stmt
) == 0);
251 gimple_set_uid (stmt
, uid
);
257 /* Bias amount for loop-carried phis. We want this to be larger than
258 the depth of any reassociation tree we can see, but not larger than
259 the rank difference between two blocks. */
260 #define PHI_LOOP_BIAS (1 << 15)
262 /* Rank assigned to a phi statement. If STMT is a loop-carried phi of
263 an innermost loop, and the phi has only a single use which is inside
264 the loop, then the rank is the block rank of the loop latch plus an
265 extra bias for the loop-carried dependence. This causes expressions
266 calculated into an accumulator variable to be independent for each
267 iteration of the loop. If STMT is some other phi, the rank is the
268 block rank of its containing block. */
270 phi_rank (gimple
*stmt
)
272 basic_block bb
= gimple_bb (stmt
);
273 struct loop
*father
= bb
->loop_father
;
279 /* We only care about real loops (those with a latch). */
281 return bb_rank
[bb
->index
];
283 /* Interesting phis must be in headers of innermost loops. */
284 if (bb
!= father
->header
286 return bb_rank
[bb
->index
];
288 /* Ignore virtual SSA_NAMEs. */
289 res
= gimple_phi_result (stmt
);
290 if (virtual_operand_p (res
))
291 return bb_rank
[bb
->index
];
293 /* The phi definition must have a single use, and that use must be
294 within the loop. Otherwise this isn't an accumulator pattern. */
295 if (!single_imm_use (res
, &use
, &use_stmt
)
296 || gimple_bb (use_stmt
)->loop_father
!= father
)
297 return bb_rank
[bb
->index
];
299 /* Look for phi arguments from within the loop. If found, bias this phi. */
300 for (i
= 0; i
< gimple_phi_num_args (stmt
); i
++)
302 tree arg
= gimple_phi_arg_def (stmt
, i
);
303 if (TREE_CODE (arg
) == SSA_NAME
304 && !SSA_NAME_IS_DEFAULT_DEF (arg
))
306 gimple
*def_stmt
= SSA_NAME_DEF_STMT (arg
);
307 if (gimple_bb (def_stmt
)->loop_father
== father
)
308 return bb_rank
[father
->latch
->index
] + PHI_LOOP_BIAS
;
312 /* Must be an uninteresting phi. */
313 return bb_rank
[bb
->index
];
316 /* If EXP is an SSA_NAME defined by a PHI statement that represents a
317 loop-carried dependence of an innermost loop, return TRUE; else
320 loop_carried_phi (tree exp
)
325 if (TREE_CODE (exp
) != SSA_NAME
326 || SSA_NAME_IS_DEFAULT_DEF (exp
))
329 phi_stmt
= SSA_NAME_DEF_STMT (exp
);
331 if (gimple_code (SSA_NAME_DEF_STMT (exp
)) != GIMPLE_PHI
)
334 /* Non-loop-carried phis have block rank. Loop-carried phis have
335 an additional bias added in. If this phi doesn't have block rank,
336 it's biased and should not be propagated. */
337 block_rank
= bb_rank
[gimple_bb (phi_stmt
)->index
];
339 if (phi_rank (phi_stmt
) != block_rank
)
345 /* Return the maximum of RANK and the rank that should be propagated
346 from expression OP. For most operands, this is just the rank of OP.
347 For loop-carried phis, the value is zero to avoid undoing the bias
348 in favor of the phi. */
350 propagate_rank (long rank
, tree op
)
354 if (loop_carried_phi (op
))
357 op_rank
= get_rank (op
);
359 return MAX (rank
, op_rank
);
362 /* Look up the operand rank structure for expression E. */
365 find_operand_rank (tree e
)
367 long *slot
= operand_rank
->get (e
);
368 return slot
? *slot
: -1;
371 /* Insert {E,RANK} into the operand rank hashtable. */
374 insert_operand_rank (tree e
, long rank
)
376 gcc_assert (rank
> 0);
377 gcc_assert (!operand_rank
->put (e
, rank
));
380 /* Given an expression E, return the rank of the expression. */
385 /* SSA_NAME's have the rank of the expression they are the result
387 For globals and uninitialized values, the rank is 0.
388 For function arguments, use the pre-setup rank.
389 For PHI nodes, stores, asm statements, etc, we use the rank of
391 For simple operations, the rank is the maximum rank of any of
392 its operands, or the bb_rank, whichever is less.
393 I make no claims that this is optimal, however, it gives good
396 /* We make an exception to the normal ranking system to break
397 dependences of accumulator variables in loops. Suppose we
398 have a simple one-block loop containing:
405 As shown, each iteration of the calculation into x is fully
406 dependent upon the iteration before it. We would prefer to
407 see this in the form:
414 If the loop is unrolled, the calculations of b and c from
415 different iterations can be interleaved.
417 To obtain this result during reassociation, we bias the rank
418 of the phi definition x_1 upward, when it is recognized as an
419 accumulator pattern. The artificial rank causes it to be
420 added last, providing the desired independence. */
422 if (TREE_CODE (e
) == SSA_NAME
)
429 if (SSA_NAME_IS_DEFAULT_DEF (e
))
430 return find_operand_rank (e
);
432 stmt
= SSA_NAME_DEF_STMT (e
);
433 if (gimple_code (stmt
) == GIMPLE_PHI
)
434 return phi_rank (stmt
);
436 if (!is_gimple_assign (stmt
))
437 return bb_rank
[gimple_bb (stmt
)->index
];
439 /* If we already have a rank for this expression, use that. */
440 rank
= find_operand_rank (e
);
444 /* Otherwise, find the maximum rank for the operands. As an
445 exception, remove the bias from loop-carried phis when propagating
446 the rank so that dependent operations are not also biased. */
447 /* Simply walk over all SSA uses - this takes advatage of the
448 fact that non-SSA operands are is_gimple_min_invariant and
451 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
452 rank
= propagate_rank (rank
, op
);
454 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
456 fprintf (dump_file
, "Rank for ");
457 print_generic_expr (dump_file
, e
);
458 fprintf (dump_file
, " is %ld\n", (rank
+ 1));
461 /* Note the rank in the hashtable so we don't recompute it. */
462 insert_operand_rank (e
, (rank
+ 1));
466 /* Constants, globals, etc., are rank 0 */
471 /* We want integer ones to end up last no matter what, since they are
472 the ones we can do the most with. */
473 #define INTEGER_CONST_TYPE 1 << 3
474 #define FLOAT_CONST_TYPE 1 << 2
475 #define OTHER_CONST_TYPE 1 << 1
477 /* Classify an invariant tree into integer, float, or other, so that
478 we can sort them to be near other constants of the same type. */
480 constant_type (tree t
)
482 if (INTEGRAL_TYPE_P (TREE_TYPE (t
)))
483 return INTEGER_CONST_TYPE
;
484 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t
)))
485 return FLOAT_CONST_TYPE
;
487 return OTHER_CONST_TYPE
;
490 /* qsort comparison function to sort operand entries PA and PB by rank
491 so that the sorted array is ordered by rank in decreasing order. */
493 sort_by_operand_rank (const void *pa
, const void *pb
)
495 const operand_entry
*oea
= *(const operand_entry
*const *)pa
;
496 const operand_entry
*oeb
= *(const operand_entry
*const *)pb
;
498 /* It's nicer for optimize_expression if constants that are likely
499 to fold when added/multiplied//whatever are put next to each
500 other. Since all constants have rank 0, order them by type. */
501 if (oeb
->rank
== 0 && oea
->rank
== 0)
503 if (constant_type (oeb
->op
) != constant_type (oea
->op
))
504 return constant_type (oeb
->op
) - constant_type (oea
->op
);
506 /* To make sorting result stable, we use unique IDs to determine
508 return oeb
->id
> oea
->id
? 1 : -1;
511 /* Lastly, make sure the versions that are the same go next to each
513 if (oeb
->rank
== oea
->rank
514 && TREE_CODE (oea
->op
) == SSA_NAME
515 && TREE_CODE (oeb
->op
) == SSA_NAME
)
517 /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse
518 versions of removed SSA_NAMEs, so if possible, prefer to sort
519 based on basic block and gimple_uid of the SSA_NAME_DEF_STMT.
521 if (!SSA_NAME_IS_DEFAULT_DEF (oea
->op
)
522 && !SSA_NAME_IS_DEFAULT_DEF (oeb
->op
)
523 && !oea
->stmt_to_insert
524 && !oeb
->stmt_to_insert
525 && SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
527 gimple
*stmta
= SSA_NAME_DEF_STMT (oea
->op
);
528 gimple
*stmtb
= SSA_NAME_DEF_STMT (oeb
->op
);
529 basic_block bba
= gimple_bb (stmta
);
530 basic_block bbb
= gimple_bb (stmtb
);
533 if (bb_rank
[bbb
->index
] != bb_rank
[bba
->index
])
534 return bb_rank
[bbb
->index
] - bb_rank
[bba
->index
];
538 bool da
= reassoc_stmt_dominates_stmt_p (stmta
, stmtb
);
539 bool db
= reassoc_stmt_dominates_stmt_p (stmtb
, stmta
);
545 if (SSA_NAME_VERSION (oeb
->op
) != SSA_NAME_VERSION (oea
->op
))
546 return SSA_NAME_VERSION (oeb
->op
) > SSA_NAME_VERSION (oea
->op
) ? 1 : -1;
548 return oeb
->id
> oea
->id
? 1 : -1;
551 if (oeb
->rank
!= oea
->rank
)
552 return oeb
->rank
> oea
->rank
? 1 : -1;
554 return oeb
->id
> oea
->id
? 1 : -1;
557 /* Add an operand entry to *OPS for the tree operand OP. */
560 add_to_ops_vec (vec
<operand_entry
*> *ops
, tree op
, gimple
*stmt_to_insert
= NULL
)
562 operand_entry
*oe
= operand_entry_pool
.allocate ();
565 oe
->rank
= get_rank (op
);
566 oe
->id
= next_operand_entry_id
++;
568 oe
->stmt_to_insert
= stmt_to_insert
;
572 /* Add an operand entry to *OPS for the tree operand OP with repeat
576 add_repeat_to_ops_vec (vec
<operand_entry
*> *ops
, tree op
,
577 HOST_WIDE_INT repeat
)
579 operand_entry
*oe
= operand_entry_pool
.allocate ();
582 oe
->rank
= get_rank (op
);
583 oe
->id
= next_operand_entry_id
++;
585 oe
->stmt_to_insert
= NULL
;
588 reassociate_stats
.pows_encountered
++;
591 /* Return true if STMT is reassociable operation containing a binary
592 operation with tree code CODE, and is inside LOOP. */
595 is_reassociable_op (gimple
*stmt
, enum tree_code code
, struct loop
*loop
)
597 basic_block bb
= gimple_bb (stmt
);
599 if (gimple_bb (stmt
) == NULL
)
602 if (!flow_bb_inside_loop_p (loop
, bb
))
605 if (is_gimple_assign (stmt
)
606 && gimple_assign_rhs_code (stmt
) == code
607 && has_single_use (gimple_assign_lhs (stmt
)))
609 tree rhs1
= gimple_assign_rhs1 (stmt
);
610 tree rhs2
= gimple_assign_rhs1 (stmt
);
611 if (TREE_CODE (rhs1
) == SSA_NAME
612 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1
))
615 && TREE_CODE (rhs2
) == SSA_NAME
616 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2
))
625 /* Return true if STMT is a nop-conversion. */
628 gimple_nop_conversion_p (gimple
*stmt
)
630 if (gassign
*ass
= dyn_cast
<gassign
*> (stmt
))
632 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass
))
633 && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (ass
)),
634 TREE_TYPE (gimple_assign_rhs1 (ass
))))
640 /* Given NAME, if NAME is defined by a unary operation OPCODE, return the
641 operand of the negate operation. Otherwise, return NULL. */
644 get_unary_op (tree name
, enum tree_code opcode
)
646 gimple
*stmt
= SSA_NAME_DEF_STMT (name
);
648 /* Look through nop conversions (sign changes). */
649 if (gimple_nop_conversion_p (stmt
)
650 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
651 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
653 if (!is_gimple_assign (stmt
))
656 if (gimple_assign_rhs_code (stmt
) == opcode
)
657 return gimple_assign_rhs1 (stmt
);
661 /* Return true if OP1 and OP2 have the same value if casted to either type. */
664 ops_equal_values_p (tree op1
, tree op2
)
670 if (TREE_CODE (op1
) == SSA_NAME
)
672 gimple
*stmt
= SSA_NAME_DEF_STMT (op1
);
673 if (gimple_nop_conversion_p (stmt
))
675 op1
= gimple_assign_rhs1 (stmt
);
681 if (TREE_CODE (op2
) == SSA_NAME
)
683 gimple
*stmt
= SSA_NAME_DEF_STMT (op2
);
684 if (gimple_nop_conversion_p (stmt
))
686 op2
= gimple_assign_rhs1 (stmt
);
697 /* If CURR and LAST are a pair of ops that OPCODE allows us to
698 eliminate through equivalences, do so, remove them from OPS, and
699 return true. Otherwise, return false. */
702 eliminate_duplicate_pair (enum tree_code opcode
,
703 vec
<operand_entry
*> *ops
,
710 /* If we have two of the same op, and the opcode is & |, min, or max,
711 we can eliminate one of them.
712 If we have two of the same op, and the opcode is ^, we can
713 eliminate both of them. */
715 if (last
&& last
->op
== curr
->op
)
723 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
725 fprintf (dump_file
, "Equivalence: ");
726 print_generic_expr (dump_file
, curr
->op
);
727 fprintf (dump_file
, " [&|minmax] ");
728 print_generic_expr (dump_file
, last
->op
);
729 fprintf (dump_file
, " -> ");
730 print_generic_stmt (dump_file
, last
->op
);
733 ops
->ordered_remove (i
);
734 reassociate_stats
.ops_eliminated
++;
739 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
741 fprintf (dump_file
, "Equivalence: ");
742 print_generic_expr (dump_file
, curr
->op
);
743 fprintf (dump_file
, " ^ ");
744 print_generic_expr (dump_file
, last
->op
);
745 fprintf (dump_file
, " -> nothing\n");
748 reassociate_stats
.ops_eliminated
+= 2;
750 if (ops
->length () == 2)
753 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (last
->op
)));
758 ops
->ordered_remove (i
-1);
759 ops
->ordered_remove (i
-1);
771 static vec
<tree
> plus_negates
;
773 /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
774 expression, look in OPS for a corresponding positive operation to cancel
775 it out. If we find one, remove the other from OPS, replace
776 OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise,
780 eliminate_plus_minus_pair (enum tree_code opcode
,
781 vec
<operand_entry
*> *ops
,
782 unsigned int currindex
,
790 if (opcode
!= PLUS_EXPR
|| TREE_CODE (curr
->op
) != SSA_NAME
)
793 negateop
= get_unary_op (curr
->op
, NEGATE_EXPR
);
794 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
795 if (negateop
== NULL_TREE
&& notop
== NULL_TREE
)
798 /* Any non-negated version will have a rank that is one less than
799 the current rank. So once we hit those ranks, if we don't find
802 for (i
= currindex
+ 1;
803 ops
->iterate (i
, &oe
)
804 && oe
->rank
>= curr
->rank
- 1 ;
808 && ops_equal_values_p (oe
->op
, negateop
))
810 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
812 fprintf (dump_file
, "Equivalence: ");
813 print_generic_expr (dump_file
, negateop
);
814 fprintf (dump_file
, " + -");
815 print_generic_expr (dump_file
, oe
->op
);
816 fprintf (dump_file
, " -> 0\n");
819 ops
->ordered_remove (i
);
820 add_to_ops_vec (ops
, build_zero_cst (TREE_TYPE (oe
->op
)));
821 ops
->ordered_remove (currindex
);
822 reassociate_stats
.ops_eliminated
++;
827 && ops_equal_values_p (oe
->op
, notop
))
829 tree op_type
= TREE_TYPE (oe
->op
);
831 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
833 fprintf (dump_file
, "Equivalence: ");
834 print_generic_expr (dump_file
, notop
);
835 fprintf (dump_file
, " + ~");
836 print_generic_expr (dump_file
, oe
->op
);
837 fprintf (dump_file
, " -> -1\n");
840 ops
->ordered_remove (i
);
841 add_to_ops_vec (ops
, build_all_ones_cst (op_type
));
842 ops
->ordered_remove (currindex
);
843 reassociate_stats
.ops_eliminated
++;
849 /* If CURR->OP is a negate expr without nop conversion in a plus expr:
850 save it for later inspection in repropagate_negates(). */
851 if (negateop
!= NULL_TREE
852 && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (curr
->op
)) == NEGATE_EXPR
)
853 plus_negates
.safe_push (curr
->op
);
858 /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a
859 bitwise not expression, look in OPS for a corresponding operand to
860 cancel it out. If we find one, remove the other from OPS, replace
861 OPS[CURRINDEX] with 0, and return true. Otherwise, return
865 eliminate_not_pairs (enum tree_code opcode
,
866 vec
<operand_entry
*> *ops
,
867 unsigned int currindex
,
874 if ((opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
875 || TREE_CODE (curr
->op
) != SSA_NAME
)
878 notop
= get_unary_op (curr
->op
, BIT_NOT_EXPR
);
879 if (notop
== NULL_TREE
)
882 /* Any non-not version will have a rank that is one less than
883 the current rank. So once we hit those ranks, if we don't find
886 for (i
= currindex
+ 1;
887 ops
->iterate (i
, &oe
)
888 && oe
->rank
>= curr
->rank
- 1;
893 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
895 fprintf (dump_file
, "Equivalence: ");
896 print_generic_expr (dump_file
, notop
);
897 if (opcode
== BIT_AND_EXPR
)
898 fprintf (dump_file
, " & ~");
899 else if (opcode
== BIT_IOR_EXPR
)
900 fprintf (dump_file
, " | ~");
901 print_generic_expr (dump_file
, oe
->op
);
902 if (opcode
== BIT_AND_EXPR
)
903 fprintf (dump_file
, " -> 0\n");
904 else if (opcode
== BIT_IOR_EXPR
)
905 fprintf (dump_file
, " -> -1\n");
908 if (opcode
== BIT_AND_EXPR
)
909 oe
->op
= build_zero_cst (TREE_TYPE (oe
->op
));
910 else if (opcode
== BIT_IOR_EXPR
)
911 oe
->op
= build_all_ones_cst (TREE_TYPE (oe
->op
));
913 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
915 ops
->quick_push (oe
);
923 /* Use constant value that may be present in OPS to try to eliminate
924 operands. Note that this function is only really used when we've
925 eliminated ops for other reasons, or merged constants. Across
926 single statements, fold already does all of this, plus more. There
927 is little point in duplicating logic, so I've only included the
928 identities that I could ever construct testcases to trigger. */
931 eliminate_using_constants (enum tree_code opcode
,
932 vec
<operand_entry
*> *ops
)
934 operand_entry
*oelast
= ops
->last ();
935 tree type
= TREE_TYPE (oelast
->op
);
937 if (oelast
->rank
== 0
938 && (ANY_INTEGRAL_TYPE_P (type
) || FLOAT_TYPE_P (type
)))
943 if (integer_zerop (oelast
->op
))
945 if (ops
->length () != 1)
947 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
948 fprintf (dump_file
, "Found & 0, removing all other ops\n");
950 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
953 ops
->quick_push (oelast
);
957 else if (integer_all_onesp (oelast
->op
))
959 if (ops
->length () != 1)
961 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
962 fprintf (dump_file
, "Found & -1, removing\n");
964 reassociate_stats
.ops_eliminated
++;
969 if (integer_all_onesp (oelast
->op
))
971 if (ops
->length () != 1)
973 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
974 fprintf (dump_file
, "Found | -1, removing all other ops\n");
976 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
979 ops
->quick_push (oelast
);
983 else if (integer_zerop (oelast
->op
))
985 if (ops
->length () != 1)
987 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
988 fprintf (dump_file
, "Found | 0, removing\n");
990 reassociate_stats
.ops_eliminated
++;
995 if (integer_zerop (oelast
->op
)
996 || (FLOAT_TYPE_P (type
)
997 && !HONOR_NANS (type
)
998 && !HONOR_SIGNED_ZEROS (type
)
999 && real_zerop (oelast
->op
)))
1001 if (ops
->length () != 1)
1003 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1004 fprintf (dump_file
, "Found * 0, removing all other ops\n");
1006 reassociate_stats
.ops_eliminated
+= ops
->length () - 1;
1008 ops
->quick_push (oelast
);
1012 else if (integer_onep (oelast
->op
)
1013 || (FLOAT_TYPE_P (type
)
1014 && !HONOR_SNANS (type
)
1015 && real_onep (oelast
->op
)))
1017 if (ops
->length () != 1)
1019 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1020 fprintf (dump_file
, "Found * 1, removing\n");
1022 reassociate_stats
.ops_eliminated
++;
1030 if (integer_zerop (oelast
->op
)
1031 || (FLOAT_TYPE_P (type
)
1032 && (opcode
== PLUS_EXPR
|| opcode
== MINUS_EXPR
)
1033 && fold_real_zero_addition_p (type
, oelast
->op
,
1034 opcode
== MINUS_EXPR
)))
1036 if (ops
->length () != 1)
1038 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1039 fprintf (dump_file
, "Found [|^+] 0, removing\n");
1041 reassociate_stats
.ops_eliminated
++;
1053 static void linearize_expr_tree (vec
<operand_entry
*> *, gimple
*,
1056 /* Structure for tracking and counting operands. */
1060 enum tree_code oecode
;
1065 /* The heap for the oecount hashtable and the sorted list of operands. */
1066 static vec
<oecount
> cvec
;
1069 /* Oecount hashtable helpers. */
1071 struct oecount_hasher
: int_hash
<int, 0, 1>
1073 static inline hashval_t
hash (int);
1074 static inline bool equal (int, int);
1077 /* Hash function for oecount. */
1080 oecount_hasher::hash (int p
)
1082 const oecount
*c
= &cvec
[p
- 42];
1083 return htab_hash_pointer (c
->op
) ^ (hashval_t
)c
->oecode
;
1086 /* Comparison function for oecount. */
1089 oecount_hasher::equal (int p1
, int p2
)
1091 const oecount
*c1
= &cvec
[p1
- 42];
1092 const oecount
*c2
= &cvec
[p2
- 42];
1093 return c1
->oecode
== c2
->oecode
&& c1
->op
== c2
->op
;
1096 /* Comparison function for qsort sorting oecount elements by count. */
1099 oecount_cmp (const void *p1
, const void *p2
)
1101 const oecount
*c1
= (const oecount
*)p1
;
1102 const oecount
*c2
= (const oecount
*)p2
;
1103 if (c1
->cnt
!= c2
->cnt
)
1104 return c1
->cnt
> c2
->cnt
? 1 : -1;
1106 /* If counts are identical, use unique IDs to stabilize qsort. */
1107 return c1
->id
> c2
->id
? 1 : -1;
1110 /* Return TRUE iff STMT represents a builtin call that raises OP
1111 to some exponent. */
1114 stmt_is_power_of_op (gimple
*stmt
, tree op
)
1116 if (!is_gimple_call (stmt
))
1119 switch (gimple_call_combined_fn (stmt
))
1123 return (operand_equal_p (gimple_call_arg (stmt
, 0), op
, 0));
1130 /* Given STMT which is a __builtin_pow* call, decrement its exponent
1131 in place and return the result. Assumes that stmt_is_power_of_op
1132 was previously called for STMT and returned TRUE. */
1134 static HOST_WIDE_INT
1135 decrement_power (gimple
*stmt
)
1137 REAL_VALUE_TYPE c
, cint
;
1138 HOST_WIDE_INT power
;
1141 switch (gimple_call_combined_fn (stmt
))
1144 arg1
= gimple_call_arg (stmt
, 1);
1145 c
= TREE_REAL_CST (arg1
);
1146 power
= real_to_integer (&c
) - 1;
1147 real_from_integer (&cint
, VOIDmode
, power
, SIGNED
);
1148 gimple_call_set_arg (stmt
, 1, build_real (TREE_TYPE (arg1
), cint
));
1152 arg1
= gimple_call_arg (stmt
, 1);
1153 power
= TREE_INT_CST_LOW (arg1
) - 1;
1154 gimple_call_set_arg (stmt
, 1, build_int_cst (TREE_TYPE (arg1
), power
));
1162 /* Replace SSA defined by STMT and replace all its uses with new
1163 SSA. Also return the new SSA. */
1166 make_new_ssa_for_def (gimple
*stmt
, enum tree_code opcode
, tree op
)
1170 imm_use_iterator iter
;
1171 tree new_lhs
, new_debug_lhs
= NULL_TREE
;
1172 tree lhs
= gimple_get_lhs (stmt
);
1174 new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
1175 gimple_set_lhs (stmt
, new_lhs
);
1177 /* Also need to update GIMPLE_DEBUGs. */
1178 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
1180 tree repl
= new_lhs
;
1181 if (is_gimple_debug (use_stmt
))
1183 if (new_debug_lhs
== NULL_TREE
)
1185 new_debug_lhs
= make_node (DEBUG_EXPR_DECL
);
1187 = gimple_build_debug_bind (new_debug_lhs
,
1188 build2 (opcode
, TREE_TYPE (lhs
),
1191 DECL_ARTIFICIAL (new_debug_lhs
) = 1;
1192 TREE_TYPE (new_debug_lhs
) = TREE_TYPE (lhs
);
1193 SET_DECL_MODE (new_debug_lhs
, TYPE_MODE (TREE_TYPE (lhs
)));
1194 gimple_set_uid (def_temp
, gimple_uid (stmt
));
1195 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1196 gsi_insert_after (&gsi
, def_temp
, GSI_SAME_STMT
);
1198 repl
= new_debug_lhs
;
1200 FOR_EACH_IMM_USE_ON_STMT (use
, iter
)
1201 SET_USE (use
, repl
);
1202 update_stmt (use_stmt
);
1207 /* Replace all SSAs defined in STMTS_TO_FIX and replace its
1208 uses with new SSAs. Also do this for the stmt that defines DEF
1209 if *DEF is not OP. */
1212 make_new_ssa_for_all_defs (tree
*def
, enum tree_code opcode
, tree op
,
1213 vec
<gimple
*> &stmts_to_fix
)
1219 && TREE_CODE (*def
) == SSA_NAME
1220 && (stmt
= SSA_NAME_DEF_STMT (*def
))
1221 && gimple_code (stmt
) != GIMPLE_NOP
)
1222 *def
= make_new_ssa_for_def (stmt
, opcode
, op
);
1224 FOR_EACH_VEC_ELT (stmts_to_fix
, i
, stmt
)
1225 make_new_ssa_for_def (stmt
, opcode
, op
);
1228 /* Find the single immediate use of STMT's LHS, and replace it
1229 with OP. Remove STMT. If STMT's LHS is the same as *DEF,
1230 replace *DEF with OP as well. */
1233 propagate_op_to_single_use (tree op
, gimple
*stmt
, tree
*def
)
1238 gimple_stmt_iterator gsi
;
1240 if (is_gimple_call (stmt
))
1241 lhs
= gimple_call_lhs (stmt
);
1243 lhs
= gimple_assign_lhs (stmt
);
1245 gcc_assert (has_single_use (lhs
));
1246 single_imm_use (lhs
, &use
, &use_stmt
);
1250 if (TREE_CODE (op
) != SSA_NAME
)
1251 update_stmt (use_stmt
);
1252 gsi
= gsi_for_stmt (stmt
);
1253 unlink_stmt_vdef (stmt
);
1254 reassoc_remove_stmt (&gsi
);
1255 release_defs (stmt
);
1258 /* Walks the linear chain with result *DEF searching for an operation
1259 with operand OP and code OPCODE removing that from the chain. *DEF
1260 is updated if there is only one operand but no operation left. */
1263 zero_one_operation (tree
*def
, enum tree_code opcode
, tree op
)
1265 tree orig_def
= *def
;
1266 gimple
*stmt
= SSA_NAME_DEF_STMT (*def
);
1267 /* PR72835 - Record the stmt chain that has to be updated such that
1268 we dont use the same LHS when the values computed are different. */
1269 auto_vec
<gimple
*, 64> stmts_to_fix
;
1275 if (opcode
== MULT_EXPR
)
1277 if (stmt_is_power_of_op (stmt
, op
))
1279 if (decrement_power (stmt
) == 1)
1281 if (stmts_to_fix
.length () > 0)
1282 stmts_to_fix
.pop ();
1283 propagate_op_to_single_use (op
, stmt
, def
);
1287 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
)
1289 if (gimple_assign_rhs1 (stmt
) == op
)
1291 tree cst
= build_minus_one_cst (TREE_TYPE (op
));
1292 if (stmts_to_fix
.length () > 0)
1293 stmts_to_fix
.pop ();
1294 propagate_op_to_single_use (cst
, stmt
, def
);
1297 else if (integer_minus_onep (op
)
1298 || real_minus_onep (op
))
1300 gimple_assign_set_rhs_code
1301 (stmt
, TREE_CODE (gimple_assign_rhs1 (stmt
)));
1307 name
= gimple_assign_rhs1 (stmt
);
1309 /* If this is the operation we look for and one of the operands
1310 is ours simply propagate the other operand into the stmts
1312 if (gimple_assign_rhs_code (stmt
) == opcode
1314 || gimple_assign_rhs2 (stmt
) == op
))
1317 name
= gimple_assign_rhs2 (stmt
);
1318 if (stmts_to_fix
.length () > 0)
1319 stmts_to_fix
.pop ();
1320 propagate_op_to_single_use (name
, stmt
, def
);
1324 /* We might have a multiply of two __builtin_pow* calls, and
1325 the operand might be hiding in the rightmost one. Likewise
1326 this can happen for a negate. */
1327 if (opcode
== MULT_EXPR
1328 && gimple_assign_rhs_code (stmt
) == opcode
1329 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
1330 && has_single_use (gimple_assign_rhs2 (stmt
)))
1332 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
1333 if (stmt_is_power_of_op (stmt2
, op
))
1335 if (decrement_power (stmt2
) == 1)
1336 propagate_op_to_single_use (op
, stmt2
, def
);
1338 stmts_to_fix
.safe_push (stmt2
);
1341 else if (is_gimple_assign (stmt2
)
1342 && gimple_assign_rhs_code (stmt2
) == NEGATE_EXPR
)
1344 if (gimple_assign_rhs1 (stmt2
) == op
)
1346 tree cst
= build_minus_one_cst (TREE_TYPE (op
));
1347 propagate_op_to_single_use (cst
, stmt2
, def
);
1350 else if (integer_minus_onep (op
)
1351 || real_minus_onep (op
))
1353 stmts_to_fix
.safe_push (stmt2
);
1354 gimple_assign_set_rhs_code
1355 (stmt2
, TREE_CODE (gimple_assign_rhs1 (stmt2
)));
1361 /* Continue walking the chain. */
1362 gcc_assert (name
!= op
1363 && TREE_CODE (name
) == SSA_NAME
);
1364 stmt
= SSA_NAME_DEF_STMT (name
);
1365 stmts_to_fix
.safe_push (stmt
);
1369 if (stmts_to_fix
.length () > 0 || *def
== orig_def
)
1370 make_new_ssa_for_all_defs (def
, opcode
, op
, stmts_to_fix
);
1373 /* Returns true if statement S1 dominates statement S2. Like
1374 stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */
1377 reassoc_stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
1379 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
1381 /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D)
1382 SSA_NAME. Assume it lives at the beginning of function and
1383 thus dominates everything. */
1384 if (!bb1
|| s1
== s2
)
1387 /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */
1393 /* PHIs in the same basic block are assumed to be
1394 executed all in parallel, if only one stmt is a PHI,
1395 it dominates the other stmt in the same basic block. */
1396 if (gimple_code (s1
) == GIMPLE_PHI
)
1399 if (gimple_code (s2
) == GIMPLE_PHI
)
1402 gcc_assert (gimple_uid (s1
) && gimple_uid (s2
));
1404 if (gimple_uid (s1
) < gimple_uid (s2
))
1407 if (gimple_uid (s1
) > gimple_uid (s2
))
1410 gimple_stmt_iterator gsi
= gsi_for_stmt (s1
);
1411 unsigned int uid
= gimple_uid (s1
);
1412 for (gsi_next (&gsi
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1414 gimple
*s
= gsi_stmt (gsi
);
1415 if (gimple_uid (s
) != uid
)
1424 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
1427 /* Insert STMT after INSERT_POINT. */
1430 insert_stmt_after (gimple
*stmt
, gimple
*insert_point
)
1432 gimple_stmt_iterator gsi
;
1435 if (gimple_code (insert_point
) == GIMPLE_PHI
)
1436 bb
= gimple_bb (insert_point
);
1437 else if (!stmt_ends_bb_p (insert_point
))
1439 gsi
= gsi_for_stmt (insert_point
);
1440 gimple_set_uid (stmt
, gimple_uid (insert_point
));
1441 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1445 /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME,
1446 thus if it must end a basic block, it should be a call that can
1447 throw, or some assignment that can throw. If it throws, the LHS
1448 of it will not be initialized though, so only valid places using
1449 the SSA_NAME should be dominated by the fallthru edge. */
1450 bb
= find_fallthru_edge (gimple_bb (insert_point
)->succs
)->dest
;
1451 gsi
= gsi_after_labels (bb
);
1452 if (gsi_end_p (gsi
))
1454 gimple_stmt_iterator gsi2
= gsi_last_bb (bb
);
1455 gimple_set_uid (stmt
,
1456 gsi_end_p (gsi2
) ? 1 : gimple_uid (gsi_stmt (gsi2
)));
1459 gimple_set_uid (stmt
, gimple_uid (gsi_stmt (gsi
)));
1460 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1463 /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for
1464 the result. Places the statement after the definition of either
1465 OP1 or OP2. Returns the new statement. */
1468 build_and_add_sum (tree type
, tree op1
, tree op2
, enum tree_code opcode
)
1470 gimple
*op1def
= NULL
, *op2def
= NULL
;
1471 gimple_stmt_iterator gsi
;
1475 /* Create the addition statement. */
1476 op
= make_ssa_name (type
);
1477 sum
= gimple_build_assign (op
, opcode
, op1
, op2
);
1479 /* Find an insertion place and insert. */
1480 if (TREE_CODE (op1
) == SSA_NAME
)
1481 op1def
= SSA_NAME_DEF_STMT (op1
);
1482 if (TREE_CODE (op2
) == SSA_NAME
)
1483 op2def
= SSA_NAME_DEF_STMT (op2
);
1484 if ((!op1def
|| gimple_nop_p (op1def
))
1485 && (!op2def
|| gimple_nop_p (op2def
)))
1487 gsi
= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
1488 if (gsi_end_p (gsi
))
1490 gimple_stmt_iterator gsi2
1491 = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
1492 gimple_set_uid (sum
,
1493 gsi_end_p (gsi2
) ? 1 : gimple_uid (gsi_stmt (gsi2
)));
1496 gimple_set_uid (sum
, gimple_uid (gsi_stmt (gsi
)));
1497 gsi_insert_before (&gsi
, sum
, GSI_NEW_STMT
);
1501 gimple
*insert_point
;
1502 if ((!op1def
|| gimple_nop_p (op1def
))
1503 || (op2def
&& !gimple_nop_p (op2def
)
1504 && reassoc_stmt_dominates_stmt_p (op1def
, op2def
)))
1505 insert_point
= op2def
;
1507 insert_point
= op1def
;
1508 insert_stmt_after (sum
, insert_point
);
1515 /* Perform un-distribution of divisions and multiplications.
1516 A * X + B * X is transformed into (A + B) * X and A / X + B / X
1517 to (A + B) / X for real X.
1519 The algorithm is organized as follows.
1521 - First we walk the addition chain *OPS looking for summands that
1522 are defined by a multiplication or a real division. This results
1523 in the candidates bitmap with relevant indices into *OPS.
1525 - Second we build the chains of multiplications or divisions for
1526 these candidates, counting the number of occurrences of (operand, code)
1527 pairs in all of the candidates chains.
1529 - Third we sort the (operand, code) pairs by number of occurrence and
1530 process them starting with the pair with the most uses.
1532 * For each such pair we walk the candidates again to build a
1533 second candidate bitmap noting all multiplication/division chains
1534 that have at least one occurrence of (operand, code).
1536 * We build an alternate addition chain only covering these
1537 candidates with one (operand, code) operation removed from their
1538 multiplication/division chain.
1540 * The first candidate gets replaced by the alternate addition chain
1541 multiplied/divided by the operand.
1543 * All candidate chains get disabled for further processing and
1544 processing of (operand, code) pairs continues.
1546 The alternate addition chains built are re-processed by the main
1547 reassociation algorithm which allows optimizing a * x * y + b * y * x
1548 to (a + b ) * x * y in one invocation of the reassociation pass. */
1551 undistribute_ops_list (enum tree_code opcode
,
1552 vec
<operand_entry
*> *ops
, struct loop
*loop
)
1554 unsigned int length
= ops
->length ();
1557 unsigned nr_candidates
, nr_candidates2
;
1558 sbitmap_iterator sbi0
;
1559 vec
<operand_entry
*> *subops
;
1560 bool changed
= false;
1561 unsigned int next_oecount_id
= 0;
1564 || opcode
!= PLUS_EXPR
)
1567 /* Build a list of candidates to process. */
1568 auto_sbitmap
candidates (length
);
1569 bitmap_clear (candidates
);
1571 FOR_EACH_VEC_ELT (*ops
, i
, oe1
)
1573 enum tree_code dcode
;
1576 if (TREE_CODE (oe1
->op
) != SSA_NAME
)
1578 oe1def
= SSA_NAME_DEF_STMT (oe1
->op
);
1579 if (!is_gimple_assign (oe1def
))
1581 dcode
= gimple_assign_rhs_code (oe1def
);
1582 if ((dcode
!= MULT_EXPR
1583 && dcode
!= RDIV_EXPR
)
1584 || !is_reassociable_op (oe1def
, dcode
, loop
))
1587 bitmap_set_bit (candidates
, i
);
1591 if (nr_candidates
< 2)
1594 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1596 fprintf (dump_file
, "searching for un-distribute opportunities ");
1597 print_generic_expr (dump_file
,
1598 (*ops
)[bitmap_first_set_bit (candidates
)]->op
, 0);
1599 fprintf (dump_file
, " %d\n", nr_candidates
);
1602 /* Build linearized sub-operand lists and the counting table. */
1605 hash_table
<oecount_hasher
> ctable (15);
1607 /* ??? Macro arguments cannot have multi-argument template types in
1608 them. This typedef is needed to workaround that limitation. */
1609 typedef vec
<operand_entry
*> vec_operand_entry_t_heap
;
1610 subops
= XCNEWVEC (vec_operand_entry_t_heap
, ops
->length ());
1611 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1614 enum tree_code oecode
;
1617 oedef
= SSA_NAME_DEF_STMT ((*ops
)[i
]->op
);
1618 oecode
= gimple_assign_rhs_code (oedef
);
1619 linearize_expr_tree (&subops
[i
], oedef
,
1620 associative_tree_code (oecode
), false);
1622 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1629 c
.id
= next_oecount_id
++;
1632 idx
= cvec
.length () + 41;
1633 slot
= ctable
.find_slot (idx
, INSERT
);
1641 cvec
[*slot
- 42].cnt
++;
1646 /* Sort the counting table. */
1647 cvec
.qsort (oecount_cmp
);
1649 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1652 fprintf (dump_file
, "Candidates:\n");
1653 FOR_EACH_VEC_ELT (cvec
, j
, c
)
1655 fprintf (dump_file
, " %u %s: ", c
->cnt
,
1656 c
->oecode
== MULT_EXPR
1657 ? "*" : c
->oecode
== RDIV_EXPR
? "/" : "?");
1658 print_generic_expr (dump_file
, c
->op
);
1659 fprintf (dump_file
, "\n");
1663 /* Process the (operand, code) pairs in order of most occurrence. */
1664 auto_sbitmap
candidates2 (length
);
1665 while (!cvec
.is_empty ())
1667 oecount
*c
= &cvec
.last ();
1671 /* Now collect the operands in the outer chain that contain
1672 the common operand in their inner chain. */
1673 bitmap_clear (candidates2
);
1675 EXECUTE_IF_SET_IN_BITMAP (candidates
, 0, i
, sbi0
)
1678 enum tree_code oecode
;
1680 tree op
= (*ops
)[i
]->op
;
1682 /* If we undistributed in this chain already this may be
1684 if (TREE_CODE (op
) != SSA_NAME
)
1687 oedef
= SSA_NAME_DEF_STMT (op
);
1688 oecode
= gimple_assign_rhs_code (oedef
);
1689 if (oecode
!= c
->oecode
)
1692 FOR_EACH_VEC_ELT (subops
[i
], j
, oe1
)
1694 if (oe1
->op
== c
->op
)
1696 bitmap_set_bit (candidates2
, i
);
1703 if (nr_candidates2
>= 2)
1705 operand_entry
*oe1
, *oe2
;
1707 int first
= bitmap_first_set_bit (candidates2
);
1709 /* Build the new addition chain. */
1710 oe1
= (*ops
)[first
];
1711 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1713 fprintf (dump_file
, "Building (");
1714 print_generic_expr (dump_file
, oe1
->op
);
1716 zero_one_operation (&oe1
->op
, c
->oecode
, c
->op
);
1717 EXECUTE_IF_SET_IN_BITMAP (candidates2
, first
+1, i
, sbi0
)
1721 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1723 fprintf (dump_file
, " + ");
1724 print_generic_expr (dump_file
, oe2
->op
);
1726 zero_one_operation (&oe2
->op
, c
->oecode
, c
->op
);
1727 sum
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1728 oe1
->op
, oe2
->op
, opcode
);
1729 oe2
->op
= build_zero_cst (TREE_TYPE (oe2
->op
));
1731 oe1
->op
= gimple_get_lhs (sum
);
1734 /* Apply the multiplication/division. */
1735 prod
= build_and_add_sum (TREE_TYPE (oe1
->op
),
1736 oe1
->op
, c
->op
, c
->oecode
);
1737 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1739 fprintf (dump_file
, ") %s ", c
->oecode
== MULT_EXPR
? "*" : "/");
1740 print_generic_expr (dump_file
, c
->op
);
1741 fprintf (dump_file
, "\n");
1744 /* Record it in the addition chain and disable further
1745 undistribution with this op. */
1746 oe1
->op
= gimple_assign_lhs (prod
);
1747 oe1
->rank
= get_rank (oe1
->op
);
1748 subops
[first
].release ();
1756 for (i
= 0; i
< ops
->length (); ++i
)
1757 subops
[i
].release ();
1764 /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison
1765 expression, examine the other OPS to see if any of them are comparisons
1766 of the same values, which we may be able to combine or eliminate.
1767 For example, we can rewrite (a < b) | (a == b) as (a <= b). */
1770 eliminate_redundant_comparison (enum tree_code opcode
,
1771 vec
<operand_entry
*> *ops
,
1772 unsigned int currindex
,
1773 operand_entry
*curr
)
1776 enum tree_code lcode
, rcode
;
1777 gimple
*def1
, *def2
;
1781 if (opcode
!= BIT_IOR_EXPR
&& opcode
!= BIT_AND_EXPR
)
1784 /* Check that CURR is a comparison. */
1785 if (TREE_CODE (curr
->op
) != SSA_NAME
)
1787 def1
= SSA_NAME_DEF_STMT (curr
->op
);
1788 if (!is_gimple_assign (def1
))
1790 lcode
= gimple_assign_rhs_code (def1
);
1791 if (TREE_CODE_CLASS (lcode
) != tcc_comparison
)
1793 op1
= gimple_assign_rhs1 (def1
);
1794 op2
= gimple_assign_rhs2 (def1
);
1796 /* Now look for a similar comparison in the remaining OPS. */
1797 for (i
= currindex
+ 1; ops
->iterate (i
, &oe
); i
++)
1801 if (TREE_CODE (oe
->op
) != SSA_NAME
)
1803 def2
= SSA_NAME_DEF_STMT (oe
->op
);
1804 if (!is_gimple_assign (def2
))
1806 rcode
= gimple_assign_rhs_code (def2
);
1807 if (TREE_CODE_CLASS (rcode
) != tcc_comparison
)
1810 /* If we got here, we have a match. See if we can combine the
1812 if (opcode
== BIT_IOR_EXPR
)
1813 t
= maybe_fold_or_comparisons (lcode
, op1
, op2
,
1814 rcode
, gimple_assign_rhs1 (def2
),
1815 gimple_assign_rhs2 (def2
));
1817 t
= maybe_fold_and_comparisons (lcode
, op1
, op2
,
1818 rcode
, gimple_assign_rhs1 (def2
),
1819 gimple_assign_rhs2 (def2
));
1823 /* maybe_fold_and_comparisons and maybe_fold_or_comparisons
1824 always give us a boolean_type_node value back. If the original
1825 BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type,
1826 we need to convert. */
1827 if (!useless_type_conversion_p (TREE_TYPE (curr
->op
), TREE_TYPE (t
)))
1828 t
= fold_convert (TREE_TYPE (curr
->op
), t
);
1830 if (TREE_CODE (t
) != INTEGER_CST
1831 && !operand_equal_p (t
, curr
->op
, 0))
1833 enum tree_code subcode
;
1834 tree newop1
, newop2
;
1835 if (!COMPARISON_CLASS_P (t
))
1837 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1838 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1839 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1840 if (!is_gimple_val (newop1
) || !is_gimple_val (newop2
))
1844 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1846 fprintf (dump_file
, "Equivalence: ");
1847 print_generic_expr (dump_file
, curr
->op
);
1848 fprintf (dump_file
, " %s ", op_symbol_code (opcode
));
1849 print_generic_expr (dump_file
, oe
->op
);
1850 fprintf (dump_file
, " -> ");
1851 print_generic_expr (dump_file
, t
);
1852 fprintf (dump_file
, "\n");
1855 /* Now we can delete oe, as it has been subsumed by the new combined
1857 ops
->ordered_remove (i
);
1858 reassociate_stats
.ops_eliminated
++;
1860 /* If t is the same as curr->op, we're done. Otherwise we must
1861 replace curr->op with t. Special case is if we got a constant
1862 back, in which case we add it to the end instead of in place of
1863 the current entry. */
1864 if (TREE_CODE (t
) == INTEGER_CST
)
1866 ops
->ordered_remove (currindex
);
1867 add_to_ops_vec (ops
, t
);
1869 else if (!operand_equal_p (t
, curr
->op
, 0))
1872 enum tree_code subcode
;
1875 gcc_assert (COMPARISON_CLASS_P (t
));
1876 extract_ops_from_tree (t
, &subcode
, &newop1
, &newop2
);
1877 STRIP_USELESS_TYPE_CONVERSION (newop1
);
1878 STRIP_USELESS_TYPE_CONVERSION (newop2
);
1879 gcc_checking_assert (is_gimple_val (newop1
)
1880 && is_gimple_val (newop2
));
1881 sum
= build_and_add_sum (TREE_TYPE (t
), newop1
, newop2
, subcode
);
1882 curr
->op
= gimple_get_lhs (sum
);
1891 /* Transform repeated addition of same values into multiply with
1894 transform_add_to_multiply (vec
<operand_entry
*> *ops
)
1897 tree op
= NULL_TREE
;
1899 int i
, start
= -1, end
= 0, count
= 0;
1900 auto_vec
<std::pair
<int, int> > indxs
;
1901 bool changed
= false;
1903 if (!INTEGRAL_TYPE_P (TREE_TYPE ((*ops
)[0]->op
))
1904 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE ((*ops
)[0]->op
))
1905 || !flag_unsafe_math_optimizations
))
1908 /* Look for repeated operands. */
1909 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
1917 else if (operand_equal_p (oe
->op
, op
, 0))
1925 indxs
.safe_push (std::make_pair (start
, end
));
1933 indxs
.safe_push (std::make_pair (start
, end
));
1935 for (j
= indxs
.length () - 1; j
>= 0; --j
)
1937 /* Convert repeated operand addition to multiplication. */
1938 start
= indxs
[j
].first
;
1939 end
= indxs
[j
].second
;
1940 op
= (*ops
)[start
]->op
;
1941 count
= end
- start
+ 1;
1942 for (i
= end
; i
>= start
; --i
)
1943 ops
->unordered_remove (i
);
1944 tree tmp
= make_ssa_name (TREE_TYPE (op
));
1945 tree cst
= build_int_cst (integer_type_node
, count
);
1947 = gimple_build_assign (tmp
, MULT_EXPR
,
1948 op
, fold_convert (TREE_TYPE (op
), cst
));
1949 gimple_set_visited (mul_stmt
, true);
1950 add_to_ops_vec (ops
, tmp
, mul_stmt
);
1958 /* Perform various identities and other optimizations on the list of
1959 operand entries, stored in OPS. The tree code for the binary
1960 operation between all the operands is OPCODE. */
1963 optimize_ops_list (enum tree_code opcode
,
1964 vec
<operand_entry
*> *ops
)
1966 unsigned int length
= ops
->length ();
1969 operand_entry
*oelast
= NULL
;
1970 bool iterate
= false;
1975 oelast
= ops
->last ();
1977 /* If the last two are constants, pop the constants off, merge them
1978 and try the next two. */
1979 if (oelast
->rank
== 0 && is_gimple_min_invariant (oelast
->op
))
1981 operand_entry
*oelm1
= (*ops
)[length
- 2];
1983 if (oelm1
->rank
== 0
1984 && is_gimple_min_invariant (oelm1
->op
)
1985 && useless_type_conversion_p (TREE_TYPE (oelm1
->op
),
1986 TREE_TYPE (oelast
->op
)))
1988 tree folded
= fold_binary (opcode
, TREE_TYPE (oelm1
->op
),
1989 oelm1
->op
, oelast
->op
);
1991 if (folded
&& is_gimple_min_invariant (folded
))
1993 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1994 fprintf (dump_file
, "Merging constants\n");
1999 add_to_ops_vec (ops
, folded
);
2000 reassociate_stats
.constants_eliminated
++;
2002 optimize_ops_list (opcode
, ops
);
2008 eliminate_using_constants (opcode
, ops
);
2011 for (i
= 0; ops
->iterate (i
, &oe
);)
2015 if (eliminate_not_pairs (opcode
, ops
, i
, oe
))
2017 if (eliminate_duplicate_pair (opcode
, ops
, &done
, i
, oe
, oelast
)
2018 || (!done
&& eliminate_plus_minus_pair (opcode
, ops
, i
, oe
))
2019 || (!done
&& eliminate_redundant_comparison (opcode
, ops
, i
, oe
)))
2031 length
= ops
->length ();
2032 oelast
= ops
->last ();
2035 optimize_ops_list (opcode
, ops
);
2038 /* The following functions are subroutines to optimize_range_tests and allow
2039 it to try to change a logical combination of comparisons into a range
2043 X == 2 || X == 5 || X == 3 || X == 4
2047 (unsigned) (X - 2) <= 3
2049 For more information see comments above fold_test_range in fold-const.c,
2050 this implementation is for GIMPLE. */
2058 bool strict_overflow_p
;
2059 unsigned int idx
, next
;
2062 /* This is similar to make_range in fold-const.c, but on top of
2063 GIMPLE instead of trees. If EXP is non-NULL, it should be
2064 an SSA_NAME and STMT argument is ignored, otherwise STMT
2065 argument should be a GIMPLE_COND. */
2068 init_range_entry (struct range_entry
*r
, tree exp
, gimple
*stmt
)
2072 bool is_bool
, strict_overflow_p
;
2076 r
->strict_overflow_p
= false;
2078 r
->high
= NULL_TREE
;
2079 if (exp
!= NULL_TREE
2080 && (TREE_CODE (exp
) != SSA_NAME
|| !INTEGRAL_TYPE_P (TREE_TYPE (exp
))))
2083 /* Start with simply saying "EXP != 0" and then look at the code of EXP
2084 and see if we can refine the range. Some of the cases below may not
2085 happen, but it doesn't seem worth worrying about this. We "continue"
2086 the outer loop when we've changed something; otherwise we "break"
2087 the switch, which will "break" the while. */
2088 low
= exp
? build_int_cst (TREE_TYPE (exp
), 0) : boolean_false_node
;
2091 strict_overflow_p
= false;
2093 if (exp
== NULL_TREE
)
2095 else if (TYPE_PRECISION (TREE_TYPE (exp
)) == 1)
2097 if (TYPE_UNSIGNED (TREE_TYPE (exp
)))
2102 else if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
)
2107 enum tree_code code
;
2108 tree arg0
, arg1
, exp_type
;
2112 if (exp
!= NULL_TREE
)
2114 if (TREE_CODE (exp
) != SSA_NAME
2115 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
))
2118 stmt
= SSA_NAME_DEF_STMT (exp
);
2119 if (!is_gimple_assign (stmt
))
2122 code
= gimple_assign_rhs_code (stmt
);
2123 arg0
= gimple_assign_rhs1 (stmt
);
2124 arg1
= gimple_assign_rhs2 (stmt
);
2125 exp_type
= TREE_TYPE (exp
);
2129 code
= gimple_cond_code (stmt
);
2130 arg0
= gimple_cond_lhs (stmt
);
2131 arg1
= gimple_cond_rhs (stmt
);
2132 exp_type
= boolean_type_node
;
2135 if (TREE_CODE (arg0
) != SSA_NAME
)
2137 loc
= gimple_location (stmt
);
2141 if (TREE_CODE (TREE_TYPE (exp
)) == BOOLEAN_TYPE
2142 /* Ensure the range is either +[-,0], +[0,0],
2143 -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or
2144 -[1,1]. If it is e.g. +[-,-] or -[-,-]
2145 or similar expression of unconditional true or
2146 false, it should not be negated. */
2147 && ((high
&& integer_zerop (high
))
2148 || (low
&& integer_onep (low
))))
2161 if (TYPE_PRECISION (TREE_TYPE (arg0
)) == 1)
2163 if (TYPE_UNSIGNED (TREE_TYPE (arg0
)))
2168 else if (TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
)
2183 nexp
= make_range_step (loc
, code
, arg0
, arg1
, exp_type
,
2185 &strict_overflow_p
);
2186 if (nexp
!= NULL_TREE
)
2189 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
2202 r
->strict_overflow_p
= strict_overflow_p
;
2206 /* Comparison function for qsort. Sort entries
2207 without SSA_NAME exp first, then with SSA_NAMEs sorted
2208 by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs
2209 by increasing ->low and if ->low is the same, by increasing
2210 ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE
2214 range_entry_cmp (const void *a
, const void *b
)
2216 const struct range_entry
*p
= (const struct range_entry
*) a
;
2217 const struct range_entry
*q
= (const struct range_entry
*) b
;
2219 if (p
->exp
!= NULL_TREE
&& TREE_CODE (p
->exp
) == SSA_NAME
)
2221 if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
2223 /* Group range_entries for the same SSA_NAME together. */
2224 if (SSA_NAME_VERSION (p
->exp
) < SSA_NAME_VERSION (q
->exp
))
2226 else if (SSA_NAME_VERSION (p
->exp
) > SSA_NAME_VERSION (q
->exp
))
2228 /* If ->low is different, NULL low goes first, then by
2230 if (p
->low
!= NULL_TREE
)
2232 if (q
->low
!= NULL_TREE
)
2234 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
2236 if (tem
&& integer_onep (tem
))
2238 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2240 if (tem
&& integer_onep (tem
))
2246 else if (q
->low
!= NULL_TREE
)
2248 /* If ->high is different, NULL high goes last, before that by
2250 if (p
->high
!= NULL_TREE
)
2252 if (q
->high
!= NULL_TREE
)
2254 tree tem
= fold_binary (LT_EXPR
, boolean_type_node
,
2256 if (tem
&& integer_onep (tem
))
2258 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2260 if (tem
&& integer_onep (tem
))
2266 else if (q
->high
!= NULL_TREE
)
2268 /* If both ranges are the same, sort below by ascending idx. */
2273 else if (q
->exp
!= NULL_TREE
&& TREE_CODE (q
->exp
) == SSA_NAME
)
2276 if (p
->idx
< q
->idx
)
2280 gcc_checking_assert (p
->idx
> q
->idx
);
2285 /* Helper function for update_range_test. Force EXPR into an SSA_NAME,
2286 insert needed statements BEFORE or after GSI. */
2289 force_into_ssa_name (gimple_stmt_iterator
*gsi
, tree expr
, bool before
)
2291 enum gsi_iterator_update m
= before
? GSI_SAME_STMT
: GSI_CONTINUE_LINKING
;
2292 tree ret
= force_gimple_operand_gsi (gsi
, expr
, true, NULL_TREE
, before
, m
);
2293 if (TREE_CODE (ret
) != SSA_NAME
)
2295 gimple
*g
= gimple_build_assign (make_ssa_name (TREE_TYPE (ret
)), ret
);
2297 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2299 gsi_insert_after (gsi
, g
, GSI_CONTINUE_LINKING
);
2300 ret
= gimple_assign_lhs (g
);
2305 /* Helper routine of optimize_range_test.
2306 [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for
2307 RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges,
2308 OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE
2309 is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to
2310 an array of COUNT pointers to other ranges. Return
2311 true if the range merge has been successful.
2312 If OPCODE is ERROR_MARK, this is called from within
2313 maybe_optimize_range_tests and is performing inter-bb range optimization.
2314 In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in
2318 update_range_test (struct range_entry
*range
, struct range_entry
*otherrange
,
2319 struct range_entry
**otherrangep
,
2320 unsigned int count
, enum tree_code opcode
,
2321 vec
<operand_entry
*> *ops
, tree exp
, gimple_seq seq
,
2322 bool in_p
, tree low
, tree high
, bool strict_overflow_p
)
2324 operand_entry
*oe
= (*ops
)[range
->idx
];
2326 gimple
*stmt
= op
? SSA_NAME_DEF_STMT (op
)
2327 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2328 location_t loc
= gimple_location (stmt
);
2329 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
2330 tree tem
= build_range_check (loc
, optype
, unshare_expr (exp
),
2332 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
2333 gimple_stmt_iterator gsi
;
2334 unsigned int i
, uid
;
2336 if (tem
== NULL_TREE
)
2339 /* If op is default def SSA_NAME, there is no place to insert the
2340 new comparison. Give up, unless we can use OP itself as the
2342 if (op
&& SSA_NAME_IS_DEFAULT_DEF (op
))
2344 if (op
== range
->exp
2345 && ((TYPE_PRECISION (optype
) == 1 && TYPE_UNSIGNED (optype
))
2346 || TREE_CODE (optype
) == BOOLEAN_TYPE
)
2348 || (TREE_CODE (tem
) == EQ_EXPR
2349 && TREE_OPERAND (tem
, 0) == op
2350 && integer_onep (TREE_OPERAND (tem
, 1))))
2351 && opcode
!= BIT_IOR_EXPR
2352 && (opcode
!= ERROR_MARK
|| oe
->rank
!= BIT_IOR_EXPR
))
2361 if (strict_overflow_p
&& issue_strict_overflow_warning (wc
))
2362 warning_at (loc
, OPT_Wstrict_overflow
,
2363 "assuming signed overflow does not occur "
2364 "when simplifying range test");
2366 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2368 struct range_entry
*r
;
2369 fprintf (dump_file
, "Optimizing range tests ");
2370 print_generic_expr (dump_file
, range
->exp
);
2371 fprintf (dump_file
, " %c[", range
->in_p
? '+' : '-');
2372 print_generic_expr (dump_file
, range
->low
);
2373 fprintf (dump_file
, ", ");
2374 print_generic_expr (dump_file
, range
->high
);
2375 fprintf (dump_file
, "]");
2376 for (i
= 0; i
< count
; i
++)
2382 fprintf (dump_file
, " and %c[", r
->in_p
? '+' : '-');
2383 print_generic_expr (dump_file
, r
->low
);
2384 fprintf (dump_file
, ", ");
2385 print_generic_expr (dump_file
, r
->high
);
2386 fprintf (dump_file
, "]");
2388 fprintf (dump_file
, "\n into ");
2389 print_generic_expr (dump_file
, tem
);
2390 fprintf (dump_file
, "\n");
2393 if (opcode
== BIT_IOR_EXPR
2394 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
2395 tem
= invert_truthvalue_loc (loc
, tem
);
2397 tem
= fold_convert_loc (loc
, optype
, tem
);
2400 gsi
= gsi_for_stmt (stmt
);
2401 uid
= gimple_uid (stmt
);
2409 gcc_checking_assert (tem
== op
);
2410 /* In rare cases range->exp can be equal to lhs of stmt.
2411 In that case we have to insert after the stmt rather then before
2412 it. If stmt is a PHI, insert it at the start of the basic block. */
2413 else if (op
!= range
->exp
)
2415 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2416 tem
= force_into_ssa_name (&gsi
, tem
, true);
2419 else if (gimple_code (stmt
) != GIMPLE_PHI
)
2421 gsi_insert_seq_after (&gsi
, seq
, GSI_CONTINUE_LINKING
);
2422 tem
= force_into_ssa_name (&gsi
, tem
, false);
2426 gsi
= gsi_after_labels (gimple_bb (stmt
));
2427 if (!gsi_end_p (gsi
))
2428 uid
= gimple_uid (gsi_stmt (gsi
));
2431 gsi
= gsi_start_bb (gimple_bb (stmt
));
2433 while (!gsi_end_p (gsi
))
2435 uid
= gimple_uid (gsi_stmt (gsi
));
2439 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2440 tem
= force_into_ssa_name (&gsi
, tem
, true);
2441 if (gsi_end_p (gsi
))
2442 gsi
= gsi_last_bb (gimple_bb (stmt
));
2446 for (; !gsi_end_p (gsi
); gsi_prev (&gsi
))
2447 if (gimple_uid (gsi_stmt (gsi
)))
2450 gimple_set_uid (gsi_stmt (gsi
), uid
);
2457 range
->strict_overflow_p
= false;
2459 for (i
= 0; i
< count
; i
++)
2462 range
= otherrange
+ i
;
2464 range
= otherrangep
[i
];
2465 oe
= (*ops
)[range
->idx
];
2466 /* Now change all the other range test immediate uses, so that
2467 those tests will be optimized away. */
2468 if (opcode
== ERROR_MARK
)
2471 oe
->op
= build_int_cst (TREE_TYPE (oe
->op
),
2472 oe
->rank
== BIT_IOR_EXPR
? 0 : 1);
2474 oe
->op
= (oe
->rank
== BIT_IOR_EXPR
2475 ? boolean_false_node
: boolean_true_node
);
2478 oe
->op
= error_mark_node
;
2479 range
->exp
= NULL_TREE
;
2480 range
->low
= NULL_TREE
;
2481 range
->high
= NULL_TREE
;
2486 /* Optimize X == CST1 || X == CST2
2487 if popcount (CST1 ^ CST2) == 1 into
2488 (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)).
2489 Similarly for ranges. E.g.
2490 X != 2 && X != 3 && X != 10 && X != 11
2491 will be transformed by the previous optimization into
2492 !((X - 2U) <= 1U || (X - 10U) <= 1U)
2493 and this loop can transform that into
2494 !(((X & ~8) - 2U) <= 1U). */
2497 optimize_range_tests_xor (enum tree_code opcode
, tree type
,
2498 tree lowi
, tree lowj
, tree highi
, tree highj
,
2499 vec
<operand_entry
*> *ops
,
2500 struct range_entry
*rangei
,
2501 struct range_entry
*rangej
)
2503 tree lowxor
, highxor
, tem
, exp
;
2504 /* Check lowi ^ lowj == highi ^ highj and
2505 popcount (lowi ^ lowj) == 1. */
2506 lowxor
= fold_binary (BIT_XOR_EXPR
, type
, lowi
, lowj
);
2507 if (lowxor
== NULL_TREE
|| TREE_CODE (lowxor
) != INTEGER_CST
)
2509 if (!integer_pow2p (lowxor
))
2511 highxor
= fold_binary (BIT_XOR_EXPR
, type
, highi
, highj
);
2512 if (!tree_int_cst_equal (lowxor
, highxor
))
2515 tem
= fold_build1 (BIT_NOT_EXPR
, type
, lowxor
);
2516 exp
= fold_build2 (BIT_AND_EXPR
, type
, rangei
->exp
, tem
);
2517 lowj
= fold_build2 (BIT_AND_EXPR
, type
, lowi
, tem
);
2518 highj
= fold_build2 (BIT_AND_EXPR
, type
, highi
, tem
);
2519 if (update_range_test (rangei
, rangej
, NULL
, 1, opcode
, ops
, exp
,
2520 NULL
, rangei
->in_p
, lowj
, highj
,
2521 rangei
->strict_overflow_p
2522 || rangej
->strict_overflow_p
))
2527 /* Optimize X == CST1 || X == CST2
2528 if popcount (CST2 - CST1) == 1 into
2529 ((X - CST1) & ~(CST2 - CST1)) == 0.
2530 Similarly for ranges. E.g.
2531 X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46
2532 || X == 75 || X == 45
2533 will be transformed by the previous optimization into
2534 (X - 43U) <= 3U || (X - 75U) <= 3U
2535 and this loop can transform that into
2536 ((X - 43U) & ~(75U - 43U)) <= 3U. */
2538 optimize_range_tests_diff (enum tree_code opcode
, tree type
,
2539 tree lowi
, tree lowj
, tree highi
, tree highj
,
2540 vec
<operand_entry
*> *ops
,
2541 struct range_entry
*rangei
,
2542 struct range_entry
*rangej
)
2544 tree tem1
, tem2
, mask
;
2545 /* Check highi - lowi == highj - lowj. */
2546 tem1
= fold_binary (MINUS_EXPR
, type
, highi
, lowi
);
2547 if (tem1
== NULL_TREE
|| TREE_CODE (tem1
) != INTEGER_CST
)
2549 tem2
= fold_binary (MINUS_EXPR
, type
, highj
, lowj
);
2550 if (!tree_int_cst_equal (tem1
, tem2
))
2552 /* Check popcount (lowj - lowi) == 1. */
2553 tem1
= fold_binary (MINUS_EXPR
, type
, lowj
, lowi
);
2554 if (tem1
== NULL_TREE
|| TREE_CODE (tem1
) != INTEGER_CST
)
2556 if (!integer_pow2p (tem1
))
2559 type
= unsigned_type_for (type
);
2560 tem1
= fold_convert (type
, tem1
);
2561 tem2
= fold_convert (type
, tem2
);
2562 lowi
= fold_convert (type
, lowi
);
2563 mask
= fold_build1 (BIT_NOT_EXPR
, type
, tem1
);
2564 tem1
= fold_build2 (MINUS_EXPR
, type
,
2565 fold_convert (type
, rangei
->exp
), lowi
);
2566 tem1
= fold_build2 (BIT_AND_EXPR
, type
, tem1
, mask
);
2567 lowj
= build_int_cst (type
, 0);
2568 if (update_range_test (rangei
, rangej
, NULL
, 1, opcode
, ops
, tem1
,
2569 NULL
, rangei
->in_p
, lowj
, tem2
,
2570 rangei
->strict_overflow_p
2571 || rangej
->strict_overflow_p
))
2576 /* It does some common checks for function optimize_range_tests_xor and
2577 optimize_range_tests_diff.
2578 If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor.
2579 Else it calls optimize_range_tests_diff. */
2582 optimize_range_tests_1 (enum tree_code opcode
, int first
, int length
,
2583 bool optimize_xor
, vec
<operand_entry
*> *ops
,
2584 struct range_entry
*ranges
)
2587 bool any_changes
= false;
2588 for (i
= first
; i
< length
; i
++)
2590 tree lowi
, highi
, lowj
, highj
, type
, tem
;
2592 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2594 type
= TREE_TYPE (ranges
[i
].exp
);
2595 if (!INTEGRAL_TYPE_P (type
))
2597 lowi
= ranges
[i
].low
;
2598 if (lowi
== NULL_TREE
)
2599 lowi
= TYPE_MIN_VALUE (type
);
2600 highi
= ranges
[i
].high
;
2601 if (highi
== NULL_TREE
)
2603 for (j
= i
+ 1; j
< length
&& j
< i
+ 64; j
++)
2606 if (ranges
[i
].exp
!= ranges
[j
].exp
|| ranges
[j
].in_p
)
2608 lowj
= ranges
[j
].low
;
2609 if (lowj
== NULL_TREE
)
2611 highj
= ranges
[j
].high
;
2612 if (highj
== NULL_TREE
)
2613 highj
= TYPE_MAX_VALUE (type
);
2614 /* Check lowj > highi. */
2615 tem
= fold_binary (GT_EXPR
, boolean_type_node
,
2617 if (tem
== NULL_TREE
|| !integer_onep (tem
))
2620 changes
= optimize_range_tests_xor (opcode
, type
, lowi
, lowj
,
2622 ranges
+ i
, ranges
+ j
);
2624 changes
= optimize_range_tests_diff (opcode
, type
, lowi
, lowj
,
2626 ranges
+ i
, ranges
+ j
);
2637 /* Helper function of optimize_range_tests_to_bit_test. Handle a single
2638 range, EXP, LOW, HIGH, compute bit mask of bits to test and return
2639 EXP on success, NULL otherwise. */
2642 extract_bit_test_mask (tree exp
, int prec
, tree totallow
, tree low
, tree high
,
2643 wide_int
*mask
, tree
*totallowp
)
2645 tree tem
= int_const_binop (MINUS_EXPR
, high
, low
);
2646 if (tem
== NULL_TREE
2647 || TREE_CODE (tem
) != INTEGER_CST
2648 || TREE_OVERFLOW (tem
)
2649 || tree_int_cst_sgn (tem
) == -1
2650 || compare_tree_int (tem
, prec
) != -1)
2653 unsigned HOST_WIDE_INT max
= tree_to_uhwi (tem
) + 1;
2654 *mask
= wi::shifted_mask (0, max
, false, prec
);
2655 if (TREE_CODE (exp
) == BIT_AND_EXPR
2656 && TREE_CODE (TREE_OPERAND (exp
, 1)) == INTEGER_CST
)
2658 widest_int msk
= wi::to_widest (TREE_OPERAND (exp
, 1));
2659 msk
= wi::zext (~msk
, TYPE_PRECISION (TREE_TYPE (exp
)));
2660 if (wi::popcount (msk
) == 1
2661 && wi::ltu_p (msk
, prec
- max
))
2663 *mask
|= wi::shifted_mask (msk
.to_uhwi (), max
, false, prec
);
2664 max
+= msk
.to_uhwi ();
2665 exp
= TREE_OPERAND (exp
, 0);
2666 if (integer_zerop (low
)
2667 && TREE_CODE (exp
) == PLUS_EXPR
2668 && TREE_CODE (TREE_OPERAND (exp
, 1)) == INTEGER_CST
)
2670 tree ret
= TREE_OPERAND (exp
, 0);
2673 = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp
, 1)),
2674 TYPE_PRECISION (TREE_TYPE (low
))));
2675 tree tbias
= wide_int_to_tree (TREE_TYPE (ret
), bias
);
2681 else if (!tree_int_cst_lt (totallow
, tbias
))
2683 bias
= wi::to_widest (tbias
);
2684 bias
-= wi::to_widest (totallow
);
2685 if (bias
>= 0 && bias
< prec
- max
)
2687 *mask
= wi::lshift (*mask
, bias
);
2695 if (!tree_int_cst_lt (totallow
, low
))
2697 tem
= int_const_binop (MINUS_EXPR
, low
, totallow
);
2698 if (tem
== NULL_TREE
2699 || TREE_CODE (tem
) != INTEGER_CST
2700 || TREE_OVERFLOW (tem
)
2701 || compare_tree_int (tem
, prec
- max
) == 1)
2704 *mask
= wi::lshift (*mask
, wi::to_widest (tem
));
2708 /* Attempt to optimize small range tests using bit test.
2710 X != 43 && X != 76 && X != 44 && X != 78 && X != 49
2711 && X != 77 && X != 46 && X != 75 && X != 45 && X != 82
2712 has been by earlier optimizations optimized into:
2713 ((X - 43U) & ~32U) > 3U && X != 49 && X != 82
2714 As all the 43 through 82 range is less than 64 numbers,
2715 for 64-bit word targets optimize that into:
2716 (X - 43U) > 40U && ((1 << (X - 43U)) & 0x8F0000004FULL) == 0 */
2719 optimize_range_tests_to_bit_test (enum tree_code opcode
, int first
, int length
,
2720 vec
<operand_entry
*> *ops
,
2721 struct range_entry
*ranges
)
2724 bool any_changes
= false;
2725 int prec
= GET_MODE_BITSIZE (word_mode
);
2726 auto_vec
<struct range_entry
*, 64> candidates
;
2728 for (i
= first
; i
< length
- 2; i
++)
2730 tree lowi
, highi
, lowj
, highj
, type
;
2732 if (ranges
[i
].exp
== NULL_TREE
|| ranges
[i
].in_p
)
2734 type
= TREE_TYPE (ranges
[i
].exp
);
2735 if (!INTEGRAL_TYPE_P (type
))
2737 lowi
= ranges
[i
].low
;
2738 if (lowi
== NULL_TREE
)
2739 lowi
= TYPE_MIN_VALUE (type
);
2740 highi
= ranges
[i
].high
;
2741 if (highi
== NULL_TREE
)
2744 tree exp
= extract_bit_test_mask (ranges
[i
].exp
, prec
, lowi
, lowi
,
2745 highi
, &mask
, &lowi
);
2746 if (exp
== NULL_TREE
)
2748 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
2749 candidates
.truncate (0);
2750 int end
= MIN (i
+ 64, length
);
2751 for (j
= i
+ 1; j
< end
; j
++)
2754 if (ranges
[j
].exp
== NULL_TREE
|| ranges
[j
].in_p
)
2756 if (ranges
[j
].exp
== exp
)
2758 else if (TREE_CODE (ranges
[j
].exp
) == BIT_AND_EXPR
)
2760 exp2
= TREE_OPERAND (ranges
[j
].exp
, 0);
2763 else if (TREE_CODE (exp2
) == PLUS_EXPR
)
2765 exp2
= TREE_OPERAND (exp2
, 0);
2775 lowj
= ranges
[j
].low
;
2776 if (lowj
== NULL_TREE
)
2778 highj
= ranges
[j
].high
;
2779 if (highj
== NULL_TREE
)
2780 highj
= TYPE_MAX_VALUE (type
);
2782 exp2
= extract_bit_test_mask (ranges
[j
].exp
, prec
, lowi
, lowj
,
2783 highj
, &mask2
, NULL
);
2787 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
2788 candidates
.safe_push (&ranges
[j
]);
2791 /* If we need otherwise 3 or more comparisons, use a bit test. */
2792 if (candidates
.length () >= 2)
2794 tree high
= wide_int_to_tree (TREE_TYPE (lowi
),
2795 wi::to_widest (lowi
)
2796 + prec
- 1 - wi::clz (mask
));
2797 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
2799 gimple
*stmt
= op
? SSA_NAME_DEF_STMT (op
)
2800 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2801 location_t loc
= gimple_location (stmt
);
2802 tree optype
= op
? TREE_TYPE (op
) : boolean_type_node
;
2804 /* See if it isn't cheaper to pretend the minimum value of the
2805 range is 0, if maximum value is small enough.
2806 We can avoid then subtraction of the minimum value, but the
2807 mask constant could be perhaps more expensive. */
2808 if (compare_tree_int (lowi
, 0) > 0
2809 && compare_tree_int (high
, prec
) < 0)
2812 HOST_WIDE_INT m
= tree_to_uhwi (lowi
);
2813 rtx reg
= gen_raw_REG (word_mode
, 10000);
2814 bool speed_p
= optimize_bb_for_speed_p (gimple_bb (stmt
));
2815 cost_diff
= set_rtx_cost (gen_rtx_PLUS (word_mode
, reg
,
2816 GEN_INT (-m
)), speed_p
);
2817 rtx r
= immed_wide_int_const (mask
, word_mode
);
2818 cost_diff
+= set_src_cost (gen_rtx_AND (word_mode
, reg
, r
),
2819 word_mode
, speed_p
);
2820 r
= immed_wide_int_const (wi::lshift (mask
, m
), word_mode
);
2821 cost_diff
-= set_src_cost (gen_rtx_AND (word_mode
, reg
, r
),
2822 word_mode
, speed_p
);
2825 mask
= wi::lshift (mask
, m
);
2826 lowi
= build_zero_cst (TREE_TYPE (lowi
));
2830 tree tem
= build_range_check (loc
, optype
, unshare_expr (exp
),
2832 if (tem
== NULL_TREE
|| is_gimple_val (tem
))
2834 tree etype
= unsigned_type_for (TREE_TYPE (exp
));
2835 exp
= fold_build2_loc (loc
, MINUS_EXPR
, etype
,
2836 fold_convert_loc (loc
, etype
, exp
),
2837 fold_convert_loc (loc
, etype
, lowi
));
2838 exp
= fold_convert_loc (loc
, integer_type_node
, exp
);
2839 tree word_type
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
2840 exp
= fold_build2_loc (loc
, LSHIFT_EXPR
, word_type
,
2841 build_int_cst (word_type
, 1), exp
);
2842 exp
= fold_build2_loc (loc
, BIT_AND_EXPR
, word_type
, exp
,
2843 wide_int_to_tree (word_type
, mask
));
2844 exp
= fold_build2_loc (loc
, EQ_EXPR
, optype
, exp
,
2845 build_zero_cst (word_type
));
2846 if (is_gimple_val (exp
))
2849 /* The shift might have undefined behavior if TEM is true,
2850 but reassociate_bb isn't prepared to have basic blocks
2851 split when it is running. So, temporarily emit a code
2852 with BIT_IOR_EXPR instead of &&, and fix it up in
2855 tem
= force_gimple_operand (tem
, &seq
, true, NULL_TREE
);
2856 gcc_assert (TREE_CODE (tem
) == SSA_NAME
);
2857 gimple_set_visited (SSA_NAME_DEF_STMT (tem
), true);
2859 exp
= force_gimple_operand (exp
, &seq2
, true, NULL_TREE
);
2860 gimple_seq_add_seq_without_update (&seq
, seq2
);
2861 gcc_assert (TREE_CODE (exp
) == SSA_NAME
);
2862 gimple_set_visited (SSA_NAME_DEF_STMT (exp
), true);
2863 gimple
*g
= gimple_build_assign (make_ssa_name (optype
),
2864 BIT_IOR_EXPR
, tem
, exp
);
2865 gimple_set_location (g
, loc
);
2866 gimple_seq_add_stmt_without_update (&seq
, g
);
2867 exp
= gimple_assign_lhs (g
);
2868 tree val
= build_zero_cst (optype
);
2869 if (update_range_test (&ranges
[i
], NULL
, candidates
.address (),
2870 candidates
.length (), opcode
, ops
, exp
,
2871 seq
, false, val
, val
, strict_overflow_p
))
2874 reassoc_branch_fixups
.safe_push (tem
);
2877 gimple_seq_discard (seq
);
2883 /* Attempt to optimize for signed a and b where b is known to be >= 0:
2884 a >= 0 && a < b into (unsigned) a < (unsigned) b
2885 a >= 0 && a <= b into (unsigned) a <= (unsigned) b */
2888 optimize_range_tests_var_bound (enum tree_code opcode
, int first
, int length
,
2889 vec
<operand_entry
*> *ops
,
2890 struct range_entry
*ranges
)
2893 bool any_changes
= false;
2894 hash_map
<tree
, int> *map
= NULL
;
2896 for (i
= first
; i
< length
; i
++)
2898 if (ranges
[i
].exp
== NULL_TREE
2899 || TREE_CODE (ranges
[i
].exp
) != SSA_NAME
2903 tree type
= TREE_TYPE (ranges
[i
].exp
);
2904 if (!INTEGRAL_TYPE_P (type
)
2905 || TYPE_UNSIGNED (type
)
2906 || ranges
[i
].low
== NULL_TREE
2907 || !integer_zerop (ranges
[i
].low
)
2908 || ranges
[i
].high
!= NULL_TREE
)
2910 /* EXP >= 0 here. */
2912 map
= new hash_map
<tree
, int>;
2913 map
->put (ranges
[i
].exp
, i
);
2919 for (i
= 0; i
< length
; i
++)
2921 bool in_p
= ranges
[i
].in_p
;
2922 if (ranges
[i
].low
== NULL_TREE
2923 || ranges
[i
].high
== NULL_TREE
)
2925 if (!integer_zerop (ranges
[i
].low
)
2926 || !integer_zerop (ranges
[i
].high
))
2929 && TYPE_PRECISION (TREE_TYPE (ranges
[i
].exp
)) == 1
2930 && TYPE_UNSIGNED (TREE_TYPE (ranges
[i
].exp
))
2931 && integer_onep (ranges
[i
].low
)
2932 && integer_onep (ranges
[i
].high
))
2943 if (TREE_CODE (ranges
[i
].exp
) != SSA_NAME
)
2945 stmt
= SSA_NAME_DEF_STMT (ranges
[i
].exp
);
2946 if (!is_gimple_assign (stmt
))
2948 ccode
= gimple_assign_rhs_code (stmt
);
2949 rhs1
= gimple_assign_rhs1 (stmt
);
2950 rhs2
= gimple_assign_rhs2 (stmt
);
2954 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
2955 stmt
= last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
));
2956 if (gimple_code (stmt
) != GIMPLE_COND
)
2958 ccode
= gimple_cond_code (stmt
);
2959 rhs1
= gimple_cond_lhs (stmt
);
2960 rhs2
= gimple_cond_rhs (stmt
);
2963 if (TREE_CODE (rhs1
) != SSA_NAME
2964 || rhs2
== NULL_TREE
2965 || TREE_CODE (rhs2
) != SSA_NAME
)
2979 ccode
= invert_tree_comparison (ccode
, false);
2984 std::swap (rhs1
, rhs2
);
2985 ccode
= swap_tree_comparison (ccode
);
2994 int *idx
= map
->get (rhs1
);
2998 wide_int nz
= get_nonzero_bits (rhs2
);
3002 /* We have EXP < RHS2 or EXP <= RHS2 where EXP >= 0
3003 and RHS2 is known to be RHS2 >= 0. */
3004 tree utype
= unsigned_type_for (TREE_TYPE (rhs1
));
3006 enum warn_strict_overflow_code wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
3007 if ((ranges
[*idx
].strict_overflow_p
3008 || ranges
[i
].strict_overflow_p
)
3009 && issue_strict_overflow_warning (wc
))
3010 warning_at (gimple_location (stmt
), OPT_Wstrict_overflow
,
3011 "assuming signed overflow does not occur "
3012 "when simplifying range test");
3014 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3016 struct range_entry
*r
= &ranges
[*idx
];
3017 fprintf (dump_file
, "Optimizing range test ");
3018 print_generic_expr (dump_file
, r
->exp
);
3019 fprintf (dump_file
, " +[");
3020 print_generic_expr (dump_file
, r
->low
);
3021 fprintf (dump_file
, ", ");
3022 print_generic_expr (dump_file
, r
->high
);
3023 fprintf (dump_file
, "] and comparison ");
3024 print_generic_expr (dump_file
, rhs1
);
3025 fprintf (dump_file
, " %s ", op_symbol_code (ccode
));
3026 print_generic_expr (dump_file
, rhs2
);
3027 fprintf (dump_file
, "\n into (");
3028 print_generic_expr (dump_file
, utype
);
3029 fprintf (dump_file
, ") ");
3030 print_generic_expr (dump_file
, rhs1
);
3031 fprintf (dump_file
, " %s (", op_symbol_code (ccode
));
3032 print_generic_expr (dump_file
, utype
);
3033 fprintf (dump_file
, ") ");
3034 print_generic_expr (dump_file
, rhs2
);
3035 fprintf (dump_file
, "\n");
3038 operand_entry
*oe
= (*ops
)[ranges
[i
].idx
];
3040 if (opcode
== BIT_IOR_EXPR
3041 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
3044 ccode
= invert_tree_comparison (ccode
, false);
3047 unsigned int uid
= gimple_uid (stmt
);
3048 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3049 gimple
*g
= gimple_build_assign (make_ssa_name (utype
), NOP_EXPR
, rhs1
);
3050 gimple_set_uid (g
, uid
);
3051 rhs1
= gimple_assign_lhs (g
);
3052 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3053 g
= gimple_build_assign (make_ssa_name (utype
), NOP_EXPR
, rhs2
);
3054 gimple_set_uid (g
, uid
);
3055 rhs2
= gimple_assign_lhs (g
);
3056 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3057 if (tree_swap_operands_p (rhs1
, rhs2
))
3059 std::swap (rhs1
, rhs2
);
3060 ccode
= swap_tree_comparison (ccode
);
3062 if (gimple_code (stmt
) == GIMPLE_COND
)
3064 gcond
*c
= as_a
<gcond
*> (stmt
);
3065 gimple_cond_set_code (c
, ccode
);
3066 gimple_cond_set_lhs (c
, rhs1
);
3067 gimple_cond_set_rhs (c
, rhs2
);
3072 tree ctype
= oe
->op
? TREE_TYPE (oe
->op
) : boolean_type_node
;
3073 if (!INTEGRAL_TYPE_P (ctype
)
3074 || (TREE_CODE (ctype
) != BOOLEAN_TYPE
3075 && TYPE_PRECISION (ctype
) != 1))
3076 ctype
= boolean_type_node
;
3077 g
= gimple_build_assign (make_ssa_name (ctype
), ccode
, rhs1
, rhs2
);
3078 gimple_set_uid (g
, uid
);
3079 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3080 if (oe
->op
&& ctype
!= TREE_TYPE (oe
->op
))
3082 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (oe
->op
)),
3083 NOP_EXPR
, gimple_assign_lhs (g
));
3084 gimple_set_uid (g
, uid
);
3085 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3087 ranges
[i
].exp
= gimple_assign_lhs (g
);
3088 oe
->op
= ranges
[i
].exp
;
3089 ranges
[i
].low
= build_zero_cst (TREE_TYPE (ranges
[i
].exp
));
3090 ranges
[i
].high
= ranges
[i
].low
;
3092 ranges
[i
].strict_overflow_p
= false;
3093 oe
= (*ops
)[ranges
[*idx
].idx
];
3094 /* Now change all the other range test immediate uses, so that
3095 those tests will be optimized away. */
3096 if (opcode
== ERROR_MARK
)
3099 oe
->op
= build_int_cst (TREE_TYPE (oe
->op
),
3100 oe
->rank
== BIT_IOR_EXPR
? 0 : 1);
3102 oe
->op
= (oe
->rank
== BIT_IOR_EXPR
3103 ? boolean_false_node
: boolean_true_node
);
3106 oe
->op
= error_mark_node
;
3107 ranges
[*idx
].exp
= NULL_TREE
;
3108 ranges
[*idx
].low
= NULL_TREE
;
3109 ranges
[*idx
].high
= NULL_TREE
;
3117 /* Optimize range tests, similarly how fold_range_test optimizes
3118 it on trees. The tree code for the binary
3119 operation between all the operands is OPCODE.
3120 If OPCODE is ERROR_MARK, optimize_range_tests is called from within
3121 maybe_optimize_range_tests for inter-bb range optimization.
3122 In that case if oe->op is NULL, oe->id is bb->index whose
3123 GIMPLE_COND is && or ||ed into the test, and oe->rank says
3124 the actual opcode. */
3127 optimize_range_tests (enum tree_code opcode
,
3128 vec
<operand_entry
*> *ops
)
3130 unsigned int length
= ops
->length (), i
, j
, first
;
3132 struct range_entry
*ranges
;
3133 bool any_changes
= false;
3138 ranges
= XNEWVEC (struct range_entry
, length
);
3139 for (i
= 0; i
< length
; i
++)
3143 init_range_entry (ranges
+ i
, oe
->op
,
3146 : last_stmt (BASIC_BLOCK_FOR_FN (cfun
, oe
->id
)));
3147 /* For | invert it now, we will invert it again before emitting
3148 the optimized expression. */
3149 if (opcode
== BIT_IOR_EXPR
3150 || (opcode
== ERROR_MARK
&& oe
->rank
== BIT_IOR_EXPR
))
3151 ranges
[i
].in_p
= !ranges
[i
].in_p
;
3154 qsort (ranges
, length
, sizeof (*ranges
), range_entry_cmp
);
3155 for (i
= 0; i
< length
; i
++)
3156 if (ranges
[i
].exp
!= NULL_TREE
&& TREE_CODE (ranges
[i
].exp
) == SSA_NAME
)
3159 /* Try to merge ranges. */
3160 for (first
= i
; i
< length
; i
++)
3162 tree low
= ranges
[i
].low
;
3163 tree high
= ranges
[i
].high
;
3164 int in_p
= ranges
[i
].in_p
;
3165 bool strict_overflow_p
= ranges
[i
].strict_overflow_p
;
3166 int update_fail_count
= 0;
3168 for (j
= i
+ 1; j
< length
; j
++)
3170 if (ranges
[i
].exp
!= ranges
[j
].exp
)
3172 if (!merge_ranges (&in_p
, &low
, &high
, in_p
, low
, high
,
3173 ranges
[j
].in_p
, ranges
[j
].low
, ranges
[j
].high
))
3175 strict_overflow_p
|= ranges
[j
].strict_overflow_p
;
3181 if (update_range_test (ranges
+ i
, ranges
+ i
+ 1, NULL
, j
- i
- 1,
3182 opcode
, ops
, ranges
[i
].exp
, NULL
, in_p
,
3183 low
, high
, strict_overflow_p
))
3188 /* Avoid quadratic complexity if all merge_ranges calls would succeed,
3189 while update_range_test would fail. */
3190 else if (update_fail_count
== 64)
3193 ++update_fail_count
;
3196 any_changes
|= optimize_range_tests_1 (opcode
, first
, length
, true,
3199 if (BRANCH_COST (optimize_function_for_speed_p (cfun
), false) >= 2)
3200 any_changes
|= optimize_range_tests_1 (opcode
, first
, length
, false,
3202 if (lshift_cheap_p (optimize_function_for_speed_p (cfun
)))
3203 any_changes
|= optimize_range_tests_to_bit_test (opcode
, first
, length
,
3205 any_changes
|= optimize_range_tests_var_bound (opcode
, first
, length
, ops
,
3208 if (any_changes
&& opcode
!= ERROR_MARK
)
3211 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
3213 if (oe
->op
== error_mark_node
)
3222 XDELETEVEC (ranges
);
3226 /* A subroutine of optimize_vec_cond_expr to extract and canonicalize
3227 the operands of the VEC_COND_EXPR. Returns ERROR_MARK on failure,
3228 otherwise the comparison code. */
3231 ovce_extract_ops (tree var
, gassign
**rets
, bool *reti
)
3233 if (TREE_CODE (var
) != SSA_NAME
)
3236 gassign
*stmt
= dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (var
));
3240 /* ??? If we start creating more COND_EXPR, we could perform
3241 this same optimization with them. For now, simplify. */
3242 if (gimple_assign_rhs_code (stmt
) != VEC_COND_EXPR
)
3245 tree cond
= gimple_assign_rhs1 (stmt
);
3246 tree_code cmp
= TREE_CODE (cond
);
3247 if (TREE_CODE_CLASS (cmp
) != tcc_comparison
)
3250 /* ??? For now, allow only canonical true and false result vectors.
3251 We could expand this to other constants should the need arise,
3252 but at the moment we don't create them. */
3253 tree t
= gimple_assign_rhs2 (stmt
);
3254 tree f
= gimple_assign_rhs3 (stmt
);
3256 if (integer_all_onesp (t
))
3258 else if (integer_all_onesp (f
))
3260 cmp
= invert_tree_comparison (cmp
, false);
3265 if (!integer_zerop (f
))
3276 /* Optimize the condition of VEC_COND_EXPRs which have been combined
3277 with OPCODE (either BIT_AND_EXPR or BIT_IOR_EXPR). */
3280 optimize_vec_cond_expr (tree_code opcode
, vec
<operand_entry
*> *ops
)
3282 unsigned int length
= ops
->length (), i
, j
;
3283 bool any_changes
= false;
3288 for (i
= 0; i
< length
; ++i
)
3290 tree elt0
= (*ops
)[i
]->op
;
3294 tree_code cmp0
= ovce_extract_ops (elt0
, &stmt0
, &invert
);
3295 if (cmp0
== ERROR_MARK
)
3298 for (j
= i
+ 1; j
< length
; ++j
)
3300 tree
&elt1
= (*ops
)[j
]->op
;
3303 tree_code cmp1
= ovce_extract_ops (elt1
, &stmt1
, NULL
);
3304 if (cmp1
== ERROR_MARK
)
3307 tree cond0
= gimple_assign_rhs1 (stmt0
);
3308 tree x0
= TREE_OPERAND (cond0
, 0);
3309 tree y0
= TREE_OPERAND (cond0
, 1);
3311 tree cond1
= gimple_assign_rhs1 (stmt1
);
3312 tree x1
= TREE_OPERAND (cond1
, 0);
3313 tree y1
= TREE_OPERAND (cond1
, 1);
3316 if (opcode
== BIT_AND_EXPR
)
3317 comb
= maybe_fold_and_comparisons (cmp0
, x0
, y0
, cmp1
, x1
, y1
);
3318 else if (opcode
== BIT_IOR_EXPR
)
3319 comb
= maybe_fold_or_comparisons (cmp0
, x0
, y0
, cmp1
, x1
, y1
);
3326 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3328 fprintf (dump_file
, "Transforming ");
3329 print_generic_expr (dump_file
, cond0
);
3330 fprintf (dump_file
, " %c ", opcode
== BIT_AND_EXPR
? '&' : '|');
3331 print_generic_expr (dump_file
, cond1
);
3332 fprintf (dump_file
, " into ");
3333 print_generic_expr (dump_file
, comb
);
3334 fputc ('\n', dump_file
);
3337 gimple_assign_set_rhs1 (stmt0
, comb
);
3339 std::swap (*gimple_assign_rhs2_ptr (stmt0
),
3340 *gimple_assign_rhs3_ptr (stmt0
));
3341 update_stmt (stmt0
);
3343 elt1
= error_mark_node
;
3352 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
3354 if (oe
->op
== error_mark_node
)
3366 /* Return true if STMT is a cast like:
3372 # _345 = PHI <_123(N), 1(...), 1(...)>
3373 where _234 has bool type, _123 has single use and
3374 bb N has a single successor M. This is commonly used in
3375 the last block of a range test.
3377 Also Return true if STMT is tcc_compare like:
3383 # _345 = PHI <_234(N), 1(...), 1(...)>
3385 where _234 has booltype, single use and
3386 bb N has a single successor M. This is commonly used in
3387 the last block of a range test. */
3390 final_range_test_p (gimple
*stmt
)
3392 basic_block bb
, rhs_bb
, lhs_bb
;
3395 use_operand_p use_p
;
3398 if (!gimple_assign_cast_p (stmt
)
3399 && (!is_gimple_assign (stmt
)
3400 || (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
3401 != tcc_comparison
)))
3403 bb
= gimple_bb (stmt
);
3404 if (!single_succ_p (bb
))
3406 e
= single_succ_edge (bb
);
3407 if (e
->flags
& EDGE_COMPLEX
)
3410 lhs
= gimple_assign_lhs (stmt
);
3411 rhs
= gimple_assign_rhs1 (stmt
);
3412 if (gimple_assign_cast_p (stmt
)
3413 && (!INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3414 || TREE_CODE (rhs
) != SSA_NAME
3415 || TREE_CODE (TREE_TYPE (rhs
)) != BOOLEAN_TYPE
))
3418 if (!gimple_assign_cast_p (stmt
)
3419 && (TREE_CODE (TREE_TYPE (lhs
)) != BOOLEAN_TYPE
))
3422 /* Test whether lhs is consumed only by a PHI in the only successor bb. */
3423 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
3426 if (gimple_code (use_stmt
) != GIMPLE_PHI
3427 || gimple_bb (use_stmt
) != e
->dest
)
3430 /* And that the rhs is defined in the same loop. */
3431 if (gimple_assign_cast_p (stmt
))
3433 if (TREE_CODE (rhs
) != SSA_NAME
3434 || !(rhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (rhs
)))
3435 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), rhs_bb
))
3440 if (TREE_CODE (lhs
) != SSA_NAME
3441 || !(lhs_bb
= gimple_bb (SSA_NAME_DEF_STMT (lhs
)))
3442 || !flow_bb_inside_loop_p (loop_containing_stmt (stmt
), lhs_bb
))
3449 /* Return true if BB is suitable basic block for inter-bb range test
3450 optimization. If BACKWARD is true, BB should be the only predecessor
3451 of TEST_BB, and *OTHER_BB is either NULL and filled by the routine,
3452 or compared with to find a common basic block to which all conditions
3453 branch to if true resp. false. If BACKWARD is false, TEST_BB should
3454 be the only predecessor of BB. */
3457 suitable_cond_bb (basic_block bb
, basic_block test_bb
, basic_block
*other_bb
,
3460 edge_iterator ei
, ei2
;
3464 bool other_edge_seen
= false;
3469 /* Check last stmt first. */
3470 stmt
= last_stmt (bb
);
3472 || (gimple_code (stmt
) != GIMPLE_COND
3473 && (backward
|| !final_range_test_p (stmt
)))
3474 || gimple_visited_p (stmt
)
3475 || stmt_could_throw_p (stmt
)
3478 is_cond
= gimple_code (stmt
) == GIMPLE_COND
;
3481 /* If last stmt is GIMPLE_COND, verify that one of the succ edges
3482 goes to the next bb (if BACKWARD, it is TEST_BB), and the other
3483 to *OTHER_BB (if not set yet, try to find it out). */
3484 if (EDGE_COUNT (bb
->succs
) != 2)
3486 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3488 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
3490 if (e
->dest
== test_bb
)
3499 if (*other_bb
== NULL
)
3501 FOR_EACH_EDGE (e2
, ei2
, test_bb
->succs
)
3502 if (!(e2
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
3504 else if (e
->dest
== e2
->dest
)
3505 *other_bb
= e
->dest
;
3506 if (*other_bb
== NULL
)
3509 if (e
->dest
== *other_bb
)
3510 other_edge_seen
= true;
3514 if (*other_bb
== NULL
|| !other_edge_seen
)
3517 else if (single_succ (bb
) != *other_bb
)
3520 /* Now check all PHIs of *OTHER_BB. */
3521 e
= find_edge (bb
, *other_bb
);
3522 e2
= find_edge (test_bb
, *other_bb
);
3523 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3525 gphi
*phi
= gsi
.phi ();
3526 /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments
3527 corresponding to BB and TEST_BB predecessor must be the same. */
3528 if (!operand_equal_p (gimple_phi_arg_def (phi
, e
->dest_idx
),
3529 gimple_phi_arg_def (phi
, e2
->dest_idx
), 0))
3531 /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND,
3532 one of the PHIs should have the lhs of the last stmt in
3533 that block as PHI arg and that PHI should have 0 or 1
3534 corresponding to it in all other range test basic blocks
3538 if (gimple_phi_arg_def (phi
, e
->dest_idx
)
3539 == gimple_assign_lhs (stmt
)
3540 && (integer_zerop (gimple_phi_arg_def (phi
, e2
->dest_idx
))
3541 || integer_onep (gimple_phi_arg_def (phi
,
3547 gimple
*test_last
= last_stmt (test_bb
);
3548 if (gimple_code (test_last
) != GIMPLE_COND
3549 && gimple_phi_arg_def (phi
, e2
->dest_idx
)
3550 == gimple_assign_lhs (test_last
)
3551 && (integer_zerop (gimple_phi_arg_def (phi
, e
->dest_idx
))
3552 || integer_onep (gimple_phi_arg_def (phi
, e
->dest_idx
))))
3562 /* Return true if BB doesn't have side-effects that would disallow
3563 range test optimization, all SSA_NAMEs set in the bb are consumed
3564 in the bb and there are no PHIs. */
3567 no_side_effect_bb (basic_block bb
)
3569 gimple_stmt_iterator gsi
;
3572 if (!gimple_seq_empty_p (phi_nodes (bb
)))
3574 last
= last_stmt (bb
);
3575 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3577 gimple
*stmt
= gsi_stmt (gsi
);
3579 imm_use_iterator imm_iter
;
3580 use_operand_p use_p
;
3582 if (is_gimple_debug (stmt
))
3584 if (gimple_has_side_effects (stmt
))
3588 if (!is_gimple_assign (stmt
))
3590 lhs
= gimple_assign_lhs (stmt
);
3591 if (TREE_CODE (lhs
) != SSA_NAME
)
3593 if (gimple_assign_rhs_could_trap_p (stmt
))
3595 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
3597 gimple
*use_stmt
= USE_STMT (use_p
);
3598 if (is_gimple_debug (use_stmt
))
3600 if (gimple_bb (use_stmt
) != bb
)
3607 /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable,
3608 return true and fill in *OPS recursively. */
3611 get_ops (tree var
, enum tree_code code
, vec
<operand_entry
*> *ops
,
3614 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
3618 if (!is_reassociable_op (stmt
, code
, loop
))
3621 rhs
[0] = gimple_assign_rhs1 (stmt
);
3622 rhs
[1] = gimple_assign_rhs2 (stmt
);
3623 gimple_set_visited (stmt
, true);
3624 for (i
= 0; i
< 2; i
++)
3625 if (TREE_CODE (rhs
[i
]) == SSA_NAME
3626 && !get_ops (rhs
[i
], code
, ops
, loop
)
3627 && has_single_use (rhs
[i
]))
3629 operand_entry
*oe
= operand_entry_pool
.allocate ();
3635 oe
->stmt_to_insert
= NULL
;
3636 ops
->safe_push (oe
);
3641 /* Find the ops that were added by get_ops starting from VAR, see if
3642 they were changed during update_range_test and if yes, create new
3646 update_ops (tree var
, enum tree_code code
, vec
<operand_entry
*> ops
,
3647 unsigned int *pidx
, struct loop
*loop
)
3649 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
3653 if (!is_reassociable_op (stmt
, code
, loop
))
3656 rhs
[0] = gimple_assign_rhs1 (stmt
);
3657 rhs
[1] = gimple_assign_rhs2 (stmt
);
3660 for (i
= 0; i
< 2; i
++)
3661 if (TREE_CODE (rhs
[i
]) == SSA_NAME
)
3663 rhs
[2 + i
] = update_ops (rhs
[i
], code
, ops
, pidx
, loop
);
3664 if (rhs
[2 + i
] == NULL_TREE
)
3666 if (has_single_use (rhs
[i
]))
3667 rhs
[2 + i
] = ops
[(*pidx
)++]->op
;
3669 rhs
[2 + i
] = rhs
[i
];
3672 if ((rhs
[2] != rhs
[0] || rhs
[3] != rhs
[1])
3673 && (rhs
[2] != rhs
[1] || rhs
[3] != rhs
[0]))
3675 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3676 var
= make_ssa_name (TREE_TYPE (var
));
3677 gassign
*g
= gimple_build_assign (var
, gimple_assign_rhs_code (stmt
),
3679 gimple_set_uid (g
, gimple_uid (stmt
));
3680 gimple_set_visited (g
, true);
3681 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3686 /* Structure to track the initial value passed to get_ops and
3687 the range in the ops vector for each basic block. */
3689 struct inter_bb_range_test_entry
3692 unsigned int first_idx
, last_idx
;
3695 /* Inter-bb range test optimization.
3697 Returns TRUE if a gimple conditional is optimized to a true/false,
3698 otherwise return FALSE.
3700 This indicates to the caller that it should run a CFG cleanup pass
3701 once reassociation is completed. */
3704 maybe_optimize_range_tests (gimple
*stmt
)
3706 basic_block first_bb
= gimple_bb (stmt
);
3707 basic_block last_bb
= first_bb
;
3708 basic_block other_bb
= NULL
;
3712 auto_vec
<operand_entry
*> ops
;
3713 auto_vec
<inter_bb_range_test_entry
> bbinfo
;
3714 bool any_changes
= false;
3715 bool cfg_cleanup_needed
= false;
3717 /* Consider only basic blocks that end with GIMPLE_COND or
3718 a cast statement satisfying final_range_test_p. All
3719 but the last bb in the first_bb .. last_bb range
3720 should end with GIMPLE_COND. */
3721 if (gimple_code (stmt
) == GIMPLE_COND
)
3723 if (EDGE_COUNT (first_bb
->succs
) != 2)
3724 return cfg_cleanup_needed
;
3726 else if (final_range_test_p (stmt
))
3727 other_bb
= single_succ (first_bb
);
3729 return cfg_cleanup_needed
;
3731 if (stmt_could_throw_p (stmt
))
3732 return cfg_cleanup_needed
;
3734 /* As relative ordering of post-dominator sons isn't fixed,
3735 maybe_optimize_range_tests can be called first on any
3736 bb in the range we want to optimize. So, start searching
3737 backwards, if first_bb can be set to a predecessor. */
3738 while (single_pred_p (first_bb
))
3740 basic_block pred_bb
= single_pred (first_bb
);
3741 if (!suitable_cond_bb (pred_bb
, first_bb
, &other_bb
, true))
3743 if (!no_side_effect_bb (first_bb
))
3747 /* If first_bb is last_bb, other_bb hasn't been computed yet.
3748 Before starting forward search in last_bb successors, find
3749 out the other_bb. */
3750 if (first_bb
== last_bb
)
3753 /* As non-GIMPLE_COND last stmt always terminates the range,
3754 if forward search didn't discover anything, just give up. */
3755 if (gimple_code (stmt
) != GIMPLE_COND
)
3756 return cfg_cleanup_needed
;
3757 /* Look at both successors. Either it ends with a GIMPLE_COND
3758 and satisfies suitable_cond_bb, or ends with a cast and
3759 other_bb is that cast's successor. */
3760 FOR_EACH_EDGE (e
, ei
, first_bb
->succs
)
3761 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
))
3762 || e
->dest
== first_bb
)
3763 return cfg_cleanup_needed
;
3764 else if (single_pred_p (e
->dest
))
3766 stmt
= last_stmt (e
->dest
);
3768 && gimple_code (stmt
) == GIMPLE_COND
3769 && EDGE_COUNT (e
->dest
->succs
) == 2)
3771 if (suitable_cond_bb (first_bb
, e
->dest
, &other_bb
, true))
3777 && final_range_test_p (stmt
)
3778 && find_edge (first_bb
, single_succ (e
->dest
)))
3780 other_bb
= single_succ (e
->dest
);
3781 if (other_bb
== first_bb
)
3785 if (other_bb
== NULL
)
3786 return cfg_cleanup_needed
;
3788 /* Now do the forward search, moving last_bb to successor bbs
3789 that aren't other_bb. */
3790 while (EDGE_COUNT (last_bb
->succs
) == 2)
3792 FOR_EACH_EDGE (e
, ei
, last_bb
->succs
)
3793 if (e
->dest
!= other_bb
)
3797 if (!single_pred_p (e
->dest
))
3799 if (!suitable_cond_bb (e
->dest
, last_bb
, &other_bb
, false))
3801 if (!no_side_effect_bb (e
->dest
))
3805 if (first_bb
== last_bb
)
3806 return cfg_cleanup_needed
;
3807 /* Here basic blocks first_bb through last_bb's predecessor
3808 end with GIMPLE_COND, all of them have one of the edges to
3809 other_bb and another to another block in the range,
3810 all blocks except first_bb don't have side-effects and
3811 last_bb ends with either GIMPLE_COND, or cast satisfying
3812 final_range_test_p. */
3813 for (bb
= last_bb
; ; bb
= single_pred (bb
))
3815 enum tree_code code
;
3817 inter_bb_range_test_entry bb_ent
;
3819 bb_ent
.op
= NULL_TREE
;
3820 bb_ent
.first_idx
= ops
.length ();
3821 bb_ent
.last_idx
= bb_ent
.first_idx
;
3822 e
= find_edge (bb
, other_bb
);
3823 stmt
= last_stmt (bb
);
3824 gimple_set_visited (stmt
, true);
3825 if (gimple_code (stmt
) != GIMPLE_COND
)
3827 use_operand_p use_p
;
3832 lhs
= gimple_assign_lhs (stmt
);
3833 rhs
= gimple_assign_rhs1 (stmt
);
3834 gcc_assert (bb
== last_bb
);
3843 # _345 = PHI <_123(N), 1(...), 1(...)>
3845 or 0 instead of 1. If it is 0, the _234
3846 range test is anded together with all the
3847 other range tests, if it is 1, it is ored with
3849 single_imm_use (lhs
, &use_p
, &phi
);
3850 gcc_assert (gimple_code (phi
) == GIMPLE_PHI
);
3851 e2
= find_edge (first_bb
, other_bb
);
3853 gcc_assert (gimple_phi_arg_def (phi
, e
->dest_idx
) == lhs
);
3854 if (integer_zerop (gimple_phi_arg_def (phi
, d
)))
3855 code
= BIT_AND_EXPR
;
3858 gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi
, d
)));
3859 code
= BIT_IOR_EXPR
;
3862 /* If _234 SSA_NAME_DEF_STMT is
3864 (or &, corresponding to 1/0 in the phi arguments,
3865 push into ops the individual range test arguments
3866 of the bitwise or resp. and, recursively. */
3867 if (TREE_CODE (rhs
) == SSA_NAME
3868 && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
3870 && !get_ops (rhs
, code
, &ops
,
3871 loop_containing_stmt (stmt
))
3872 && has_single_use (rhs
))
3874 /* Otherwise, push the _234 range test itself. */
3875 operand_entry
*oe
= operand_entry_pool
.allocate ();
3881 oe
->stmt_to_insert
= NULL
;
3886 else if (is_gimple_assign (stmt
)
3887 && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
3889 && !get_ops (lhs
, code
, &ops
,
3890 loop_containing_stmt (stmt
))
3891 && has_single_use (lhs
))
3893 operand_entry
*oe
= operand_entry_pool
.allocate ();
3904 bb_ent
.last_idx
= ops
.length ();
3907 bbinfo
.safe_push (bb_ent
);
3910 /* Otherwise stmt is GIMPLE_COND. */
3911 code
= gimple_cond_code (stmt
);
3912 lhs
= gimple_cond_lhs (stmt
);
3913 rhs
= gimple_cond_rhs (stmt
);
3914 if (TREE_CODE (lhs
) == SSA_NAME
3915 && INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3916 && ((code
!= EQ_EXPR
&& code
!= NE_EXPR
)
3917 || rhs
!= boolean_false_node
3918 /* Either push into ops the individual bitwise
3919 or resp. and operands, depending on which
3920 edge is other_bb. */
3921 || !get_ops (lhs
, (((e
->flags
& EDGE_TRUE_VALUE
) == 0)
3922 ^ (code
== EQ_EXPR
))
3923 ? BIT_AND_EXPR
: BIT_IOR_EXPR
, &ops
,
3924 loop_containing_stmt (stmt
))))
3926 /* Or push the GIMPLE_COND stmt itself. */
3927 operand_entry
*oe
= operand_entry_pool
.allocate ();
3930 oe
->rank
= (e
->flags
& EDGE_TRUE_VALUE
)
3931 ? BIT_IOR_EXPR
: BIT_AND_EXPR
;
3932 /* oe->op = NULL signs that there is no SSA_NAME
3933 for the range test, and oe->id instead is the
3934 basic block number, at which's end the GIMPLE_COND
3938 oe
->stmt_to_insert
= NULL
;
3943 else if (ops
.length () > bb_ent
.first_idx
)
3946 bb_ent
.last_idx
= ops
.length ();
3948 bbinfo
.safe_push (bb_ent
);
3952 if (ops
.length () > 1)
3953 any_changes
= optimize_range_tests (ERROR_MARK
, &ops
);
3956 unsigned int idx
, max_idx
= 0;
3957 /* update_ops relies on has_single_use predicates returning the
3958 same values as it did during get_ops earlier. Additionally it
3959 never removes statements, only adds new ones and it should walk
3960 from the single imm use and check the predicate already before
3961 making those changes.
3962 On the other side, the handling of GIMPLE_COND directly can turn
3963 previously multiply used SSA_NAMEs into single use SSA_NAMEs, so
3964 it needs to be done in a separate loop afterwards. */
3965 for (bb
= last_bb
, idx
= 0; ; bb
= single_pred (bb
), idx
++)
3967 if (bbinfo
[idx
].first_idx
< bbinfo
[idx
].last_idx
3968 && bbinfo
[idx
].op
!= NULL_TREE
)
3973 stmt
= last_stmt (bb
);
3974 new_op
= update_ops (bbinfo
[idx
].op
,
3976 ops
[bbinfo
[idx
].first_idx
]->rank
,
3977 ops
, &bbinfo
[idx
].first_idx
,
3978 loop_containing_stmt (stmt
));
3979 if (new_op
== NULL_TREE
)
3981 gcc_assert (bb
== last_bb
);
3982 new_op
= ops
[bbinfo
[idx
].first_idx
++]->op
;
3984 if (bbinfo
[idx
].op
!= new_op
)
3986 imm_use_iterator iter
;
3987 use_operand_p use_p
;
3988 gimple
*use_stmt
, *cast_or_tcc_cmp_stmt
= NULL
;
3990 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, bbinfo
[idx
].op
)
3991 if (is_gimple_debug (use_stmt
))
3993 else if (gimple_code (use_stmt
) == GIMPLE_COND
3994 || gimple_code (use_stmt
) == GIMPLE_PHI
)
3995 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3996 SET_USE (use_p
, new_op
);
3997 else if ((is_gimple_assign (use_stmt
)
3999 (gimple_assign_rhs_code (use_stmt
))
4000 == tcc_comparison
)))
4001 cast_or_tcc_cmp_stmt
= use_stmt
;
4002 else if (gimple_assign_cast_p (use_stmt
))
4003 cast_or_tcc_cmp_stmt
= use_stmt
;
4007 if (cast_or_tcc_cmp_stmt
)
4009 gcc_assert (bb
== last_bb
);
4010 tree lhs
= gimple_assign_lhs (cast_or_tcc_cmp_stmt
);
4011 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
4012 enum tree_code rhs_code
4013 = gimple_assign_cast_p (cast_or_tcc_cmp_stmt
)
4014 ? gimple_assign_rhs_code (cast_or_tcc_cmp_stmt
)
4017 if (is_gimple_min_invariant (new_op
))
4019 new_op
= fold_convert (TREE_TYPE (lhs
), new_op
);
4020 g
= gimple_build_assign (new_lhs
, new_op
);
4023 g
= gimple_build_assign (new_lhs
, rhs_code
, new_op
);
4024 gimple_stmt_iterator gsi
4025 = gsi_for_stmt (cast_or_tcc_cmp_stmt
);
4026 gimple_set_uid (g
, gimple_uid (cast_or_tcc_cmp_stmt
));
4027 gimple_set_visited (g
, true);
4028 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
4029 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4030 if (is_gimple_debug (use_stmt
))
4032 else if (gimple_code (use_stmt
) == GIMPLE_COND
4033 || gimple_code (use_stmt
) == GIMPLE_PHI
)
4034 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4035 SET_USE (use_p
, new_lhs
);
4044 for (bb
= last_bb
, idx
= 0; ; bb
= single_pred (bb
), idx
++)
4046 if (bbinfo
[idx
].first_idx
< bbinfo
[idx
].last_idx
4047 && bbinfo
[idx
].op
== NULL_TREE
4048 && ops
[bbinfo
[idx
].first_idx
]->op
!= NULL_TREE
)
4050 gcond
*cond_stmt
= as_a
<gcond
*> (last_stmt (bb
));
4055 /* If we collapse the conditional to a true/false
4056 condition, then bubble that knowledge up to our caller. */
4057 if (integer_zerop (ops
[bbinfo
[idx
].first_idx
]->op
))
4059 gimple_cond_make_false (cond_stmt
);
4060 cfg_cleanup_needed
= true;
4062 else if (integer_onep (ops
[bbinfo
[idx
].first_idx
]->op
))
4064 gimple_cond_make_true (cond_stmt
);
4065 cfg_cleanup_needed
= true;
4069 gimple_cond_set_code (cond_stmt
, NE_EXPR
);
4070 gimple_cond_set_lhs (cond_stmt
,
4071 ops
[bbinfo
[idx
].first_idx
]->op
);
4072 gimple_cond_set_rhs (cond_stmt
, boolean_false_node
);
4074 update_stmt (cond_stmt
);
4080 /* The above changes could result in basic blocks after the first
4081 modified one, up to and including last_bb, to be executed even if
4082 they would not be in the original program. If the value ranges of
4083 assignment lhs' in those bbs were dependent on the conditions
4084 guarding those basic blocks which now can change, the VRs might
4085 be incorrect. As no_side_effect_bb should ensure those SSA_NAMEs
4086 are only used within the same bb, it should be not a big deal if
4087 we just reset all the VRs in those bbs. See PR68671. */
4088 for (bb
= last_bb
, idx
= 0; idx
< max_idx
; bb
= single_pred (bb
), idx
++)
4089 reset_flow_sensitive_info_in_bb (bb
);
4091 return cfg_cleanup_needed
;
4094 /* Return true if OPERAND is defined by a PHI node which uses the LHS
4095 of STMT in it's operands. This is also known as a "destructive
4096 update" operation. */
4099 is_phi_for_stmt (gimple
*stmt
, tree operand
)
4104 use_operand_p arg_p
;
4107 if (TREE_CODE (operand
) != SSA_NAME
)
4110 lhs
= gimple_assign_lhs (stmt
);
4112 def_stmt
= SSA_NAME_DEF_STMT (operand
);
4113 def_phi
= dyn_cast
<gphi
*> (def_stmt
);
4117 FOR_EACH_PHI_ARG (arg_p
, def_phi
, i
, SSA_OP_USE
)
4118 if (lhs
== USE_FROM_PTR (arg_p
))
4123 /* Remove def stmt of VAR if VAR has zero uses and recurse
4124 on rhs1 operand if so. */
4127 remove_visited_stmt_chain (tree var
)
4130 gimple_stmt_iterator gsi
;
4134 if (TREE_CODE (var
) != SSA_NAME
|| !has_zero_uses (var
))
4136 stmt
= SSA_NAME_DEF_STMT (var
);
4137 if (is_gimple_assign (stmt
) && gimple_visited_p (stmt
))
4139 var
= gimple_assign_rhs1 (stmt
);
4140 gsi
= gsi_for_stmt (stmt
);
4141 reassoc_remove_stmt (&gsi
);
4142 release_defs (stmt
);
4149 /* This function checks three consequtive operands in
4150 passed operands vector OPS starting from OPINDEX and
4151 swaps two operands if it is profitable for binary operation
4152 consuming OPINDEX + 1 abnd OPINDEX + 2 operands.
4154 We pair ops with the same rank if possible.
4156 The alternative we try is to see if STMT is a destructive
4157 update style statement, which is like:
4160 In that case, we want to use the destructive update form to
4161 expose the possible vectorizer sum reduction opportunity.
4162 In that case, the third operand will be the phi node. This
4163 check is not performed if STMT is null.
4165 We could, of course, try to be better as noted above, and do a
4166 lot of work to try to find these opportunities in >3 operand
4167 cases, but it is unlikely to be worth it. */
4170 swap_ops_for_binary_stmt (vec
<operand_entry
*> ops
,
4171 unsigned int opindex
, gimple
*stmt
)
4173 operand_entry
*oe1
, *oe2
, *oe3
;
4176 oe2
= ops
[opindex
+ 1];
4177 oe3
= ops
[opindex
+ 2];
4179 if ((oe1
->rank
== oe2
->rank
4180 && oe2
->rank
!= oe3
->rank
)
4181 || (stmt
&& is_phi_for_stmt (stmt
, oe3
->op
)
4182 && !is_phi_for_stmt (stmt
, oe1
->op
)
4183 && !is_phi_for_stmt (stmt
, oe2
->op
)))
4184 std::swap (*oe1
, *oe3
);
4185 else if ((oe1
->rank
== oe3
->rank
4186 && oe2
->rank
!= oe3
->rank
)
4187 || (stmt
&& is_phi_for_stmt (stmt
, oe2
->op
)
4188 && !is_phi_for_stmt (stmt
, oe1
->op
)
4189 && !is_phi_for_stmt (stmt
, oe3
->op
)))
4190 std::swap (*oe1
, *oe2
);
4193 /* If definition of RHS1 or RHS2 dominates STMT, return the later of those
4194 two definitions, otherwise return STMT. */
4196 static inline gimple
*
4197 find_insert_point (gimple
*stmt
, tree rhs1
, tree rhs2
)
4199 if (TREE_CODE (rhs1
) == SSA_NAME
4200 && reassoc_stmt_dominates_stmt_p (stmt
, SSA_NAME_DEF_STMT (rhs1
)))
4201 stmt
= SSA_NAME_DEF_STMT (rhs1
);
4202 if (TREE_CODE (rhs2
) == SSA_NAME
4203 && reassoc_stmt_dominates_stmt_p (stmt
, SSA_NAME_DEF_STMT (rhs2
)))
4204 stmt
= SSA_NAME_DEF_STMT (rhs2
);
4208 /* If the stmt that defines operand has to be inserted, insert it
4211 insert_stmt_before_use (gimple
*stmt
, gimple
*stmt_to_insert
)
4213 gcc_assert (is_gimple_assign (stmt_to_insert
));
4214 tree rhs1
= gimple_assign_rhs1 (stmt_to_insert
);
4215 tree rhs2
= gimple_assign_rhs2 (stmt_to_insert
);
4216 gimple
*insert_point
= find_insert_point (stmt
, rhs1
, rhs2
);
4217 gimple_stmt_iterator gsi
= gsi_for_stmt (insert_point
);
4218 gimple_set_uid (stmt_to_insert
, gimple_uid (insert_point
));
4220 /* If the insert point is not stmt, then insert_point would be
4221 the point where operand rhs1 or rhs2 is defined. In this case,
4222 stmt_to_insert has to be inserted afterwards. This would
4223 only happen when the stmt insertion point is flexible. */
4224 if (stmt
== insert_point
)
4225 gsi_insert_before (&gsi
, stmt_to_insert
, GSI_NEW_STMT
);
4227 insert_stmt_after (stmt_to_insert
, insert_point
);
4231 /* Recursively rewrite our linearized statements so that the operators
4232 match those in OPS[OPINDEX], putting the computation in rank
4233 order. Return new lhs.
4234 CHANGED is true if we shouldn't reuse the lhs SSA_NAME both in
4235 the current stmt and during recursive invocations.
4236 NEXT_CHANGED is true if we shouldn't reuse the lhs SSA_NAME in
4237 recursive invocations. */
4240 rewrite_expr_tree (gimple
*stmt
, unsigned int opindex
,
4241 vec
<operand_entry
*> ops
, bool changed
, bool next_changed
)
4243 tree rhs1
= gimple_assign_rhs1 (stmt
);
4244 tree rhs2
= gimple_assign_rhs2 (stmt
);
4245 tree lhs
= gimple_assign_lhs (stmt
);
4248 /* The final recursion case for this function is that you have
4249 exactly two operations left.
4250 If we had exactly one op in the entire list to start with, we
4251 would have never called this function, and the tail recursion
4252 rewrites them one at a time. */
4253 if (opindex
+ 2 == ops
.length ())
4255 operand_entry
*oe1
, *oe2
;
4258 oe2
= ops
[opindex
+ 1];
4260 if (rhs1
!= oe1
->op
|| rhs2
!= oe2
->op
)
4262 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4263 unsigned int uid
= gimple_uid (stmt
);
4265 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4267 fprintf (dump_file
, "Transforming ");
4268 print_gimple_stmt (dump_file
, stmt
, 0);
4271 /* If the stmt that defines operand has to be inserted, insert it
4273 if (oe1
->stmt_to_insert
)
4274 insert_stmt_before_use (stmt
, oe1
->stmt_to_insert
);
4275 if (oe2
->stmt_to_insert
)
4276 insert_stmt_before_use (stmt
, oe2
->stmt_to_insert
);
4277 /* Even when changed is false, reassociation could have e.g. removed
4278 some redundant operations, so unless we are just swapping the
4279 arguments or unless there is no change at all (then we just
4280 return lhs), force creation of a new SSA_NAME. */
4281 if (changed
|| ((rhs1
!= oe2
->op
|| rhs2
!= oe1
->op
) && opindex
))
4283 gimple
*insert_point
4284 = find_insert_point (stmt
, oe1
->op
, oe2
->op
);
4285 lhs
= make_ssa_name (TREE_TYPE (lhs
));
4287 = gimple_build_assign (lhs
, gimple_assign_rhs_code (stmt
),
4289 gimple_set_uid (stmt
, uid
);
4290 gimple_set_visited (stmt
, true);
4291 if (insert_point
== gsi_stmt (gsi
))
4292 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4294 insert_stmt_after (stmt
, insert_point
);
4298 gcc_checking_assert (find_insert_point (stmt
, oe1
->op
, oe2
->op
)
4300 gimple_assign_set_rhs1 (stmt
, oe1
->op
);
4301 gimple_assign_set_rhs2 (stmt
, oe2
->op
);
4305 if (rhs1
!= oe1
->op
&& rhs1
!= oe2
->op
)
4306 remove_visited_stmt_chain (rhs1
);
4308 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4310 fprintf (dump_file
, " into ");
4311 print_gimple_stmt (dump_file
, stmt
, 0);
4317 /* If we hit here, we should have 3 or more ops left. */
4318 gcc_assert (opindex
+ 2 < ops
.length ());
4320 /* Rewrite the next operator. */
4323 /* If the stmt that defines operand has to be inserted, insert it
4325 if (oe
->stmt_to_insert
)
4326 insert_stmt_before_use (stmt
, oe
->stmt_to_insert
);
4328 /* Recurse on the LHS of the binary operator, which is guaranteed to
4329 be the non-leaf side. */
4331 = rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1
), opindex
+ 1, ops
,
4332 changed
|| oe
->op
!= rhs2
|| next_changed
,
4335 if (oe
->op
!= rhs2
|| new_rhs1
!= rhs1
)
4337 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4339 fprintf (dump_file
, "Transforming ");
4340 print_gimple_stmt (dump_file
, stmt
, 0);
4343 /* If changed is false, this is either opindex == 0
4344 or all outer rhs2's were equal to corresponding oe->op,
4345 and powi_result is NULL.
4346 That means lhs is equivalent before and after reassociation.
4347 Otherwise ensure the old lhs SSA_NAME is not reused and
4348 create a new stmt as well, so that any debug stmts will be
4349 properly adjusted. */
4352 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4353 unsigned int uid
= gimple_uid (stmt
);
4354 gimple
*insert_point
= find_insert_point (stmt
, new_rhs1
, oe
->op
);
4356 lhs
= make_ssa_name (TREE_TYPE (lhs
));
4357 stmt
= gimple_build_assign (lhs
, gimple_assign_rhs_code (stmt
),
4359 gimple_set_uid (stmt
, uid
);
4360 gimple_set_visited (stmt
, true);
4361 if (insert_point
== gsi_stmt (gsi
))
4362 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4364 insert_stmt_after (stmt
, insert_point
);
4368 gcc_checking_assert (find_insert_point (stmt
, new_rhs1
, oe
->op
)
4370 gimple_assign_set_rhs1 (stmt
, new_rhs1
);
4371 gimple_assign_set_rhs2 (stmt
, oe
->op
);
4375 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4377 fprintf (dump_file
, " into ");
4378 print_gimple_stmt (dump_file
, stmt
, 0);
4384 /* Find out how many cycles we need to compute statements chain.
4385 OPS_NUM holds number os statements in a chain. CPU_WIDTH is a
4386 maximum number of independent statements we may execute per cycle. */
4389 get_required_cycles (int ops_num
, int cpu_width
)
4395 /* While we have more than 2 * cpu_width operands
4396 we may reduce number of operands by cpu_width
4398 res
= ops_num
/ (2 * cpu_width
);
4400 /* Remained operands count may be reduced twice per cycle
4401 until we have only one operand. */
4402 rest
= (unsigned)(ops_num
- res
* cpu_width
);
4403 elog
= exact_log2 (rest
);
4407 res
+= floor_log2 (rest
) + 1;
4412 /* Returns an optimal number of registers to use for computation of
4413 given statements. */
4416 get_reassociation_width (int ops_num
, enum tree_code opc
,
4419 int param_width
= PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH
);
4424 if (param_width
> 0)
4425 width
= param_width
;
4427 width
= targetm
.sched
.reassociation_width (opc
, mode
);
4432 /* Get the minimal time required for sequence computation. */
4433 cycles_best
= get_required_cycles (ops_num
, width
);
4435 /* Check if we may use less width and still compute sequence for
4436 the same time. It will allow us to reduce registers usage.
4437 get_required_cycles is monotonically increasing with lower width
4438 so we can perform a binary search for the minimal width that still
4439 results in the optimal cycle count. */
4441 while (width
> width_min
)
4443 int width_mid
= (width
+ width_min
) / 2;
4445 if (get_required_cycles (ops_num
, width_mid
) == cycles_best
)
4447 else if (width_min
< width_mid
)
4448 width_min
= width_mid
;
4456 /* Recursively rewrite our linearized statements so that the operators
4457 match those in OPS[OPINDEX], putting the computation in rank
4458 order and trying to allow operations to be executed in
4462 rewrite_expr_tree_parallel (gassign
*stmt
, int width
,
4463 vec
<operand_entry
*> ops
)
4465 enum tree_code opcode
= gimple_assign_rhs_code (stmt
);
4466 int op_num
= ops
.length ();
4467 gcc_assert (op_num
> 0);
4468 int stmt_num
= op_num
- 1;
4469 gimple
**stmts
= XALLOCAVEC (gimple
*, stmt_num
);
4470 int op_index
= op_num
- 1;
4472 int ready_stmts_end
= 0;
4474 gimple
*stmt1
= NULL
, *stmt2
= NULL
;
4475 tree last_rhs1
= gimple_assign_rhs1 (stmt
);
4477 /* We start expression rewriting from the top statements.
4478 So, in this loop we create a full list of statements
4479 we will work with. */
4480 stmts
[stmt_num
- 1] = stmt
;
4481 for (i
= stmt_num
- 2; i
>= 0; i
--)
4482 stmts
[i
] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts
[i
+1]));
4484 for (i
= 0; i
< stmt_num
; i
++)
4488 /* Determine whether we should use results of
4489 already handled statements or not. */
4490 if (ready_stmts_end
== 0
4491 && (i
- stmt_index
>= width
|| op_index
< 1))
4492 ready_stmts_end
= i
;
4494 /* Now we choose operands for the next statement. Non zero
4495 value in ready_stmts_end means here that we should use
4496 the result of already generated statements as new operand. */
4497 if (ready_stmts_end
> 0)
4499 op1
= gimple_assign_lhs (stmts
[stmt_index
++]);
4500 if (ready_stmts_end
> stmt_index
)
4501 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
4502 else if (op_index
>= 0)
4504 operand_entry
*oe
= ops
[op_index
--];
4505 stmt2
= oe
->stmt_to_insert
;
4510 gcc_assert (stmt_index
< i
);
4511 op2
= gimple_assign_lhs (stmts
[stmt_index
++]);
4514 if (stmt_index
>= ready_stmts_end
)
4515 ready_stmts_end
= 0;
4520 swap_ops_for_binary_stmt (ops
, op_index
- 2, NULL
);
4521 operand_entry
*oe2
= ops
[op_index
--];
4522 operand_entry
*oe1
= ops
[op_index
--];
4524 stmt2
= oe2
->stmt_to_insert
;
4526 stmt1
= oe1
->stmt_to_insert
;
4529 /* If we emit the last statement then we should put
4530 operands into the last statement. It will also
4532 if (op_index
< 0 && stmt_index
== i
)
4535 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4537 fprintf (dump_file
, "Transforming ");
4538 print_gimple_stmt (dump_file
, stmts
[i
], 0);
4541 /* If the stmt that defines operand has to be inserted, insert it
4544 insert_stmt_before_use (stmts
[i
], stmt1
);
4546 insert_stmt_before_use (stmts
[i
], stmt2
);
4547 stmt1
= stmt2
= NULL
;
4549 /* We keep original statement only for the last one. All
4550 others are recreated. */
4551 if (i
== stmt_num
- 1)
4553 gimple_assign_set_rhs1 (stmts
[i
], op1
);
4554 gimple_assign_set_rhs2 (stmts
[i
], op2
);
4555 update_stmt (stmts
[i
]);
4559 stmts
[i
] = build_and_add_sum (TREE_TYPE (last_rhs1
), op1
, op2
, opcode
);
4561 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4563 fprintf (dump_file
, " into ");
4564 print_gimple_stmt (dump_file
, stmts
[i
], 0);
4568 remove_visited_stmt_chain (last_rhs1
);
4571 /* Transform STMT, which is really (A +B) + (C + D) into the left
4572 linear form, ((A+B)+C)+D.
4573 Recurse on D if necessary. */
4576 linearize_expr (gimple
*stmt
)
4578 gimple_stmt_iterator gsi
;
4579 gimple
*binlhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
4580 gimple
*binrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
4581 gimple
*oldbinrhs
= binrhs
;
4582 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
4583 gimple
*newbinrhs
= NULL
;
4584 struct loop
*loop
= loop_containing_stmt (stmt
);
4585 tree lhs
= gimple_assign_lhs (stmt
);
4587 gcc_assert (is_reassociable_op (binlhs
, rhscode
, loop
)
4588 && is_reassociable_op (binrhs
, rhscode
, loop
));
4590 gsi
= gsi_for_stmt (stmt
);
4592 gimple_assign_set_rhs2 (stmt
, gimple_assign_rhs1 (binrhs
));
4593 binrhs
= gimple_build_assign (make_ssa_name (TREE_TYPE (lhs
)),
4594 gimple_assign_rhs_code (binrhs
),
4595 gimple_assign_lhs (binlhs
),
4596 gimple_assign_rhs2 (binrhs
));
4597 gimple_assign_set_rhs1 (stmt
, gimple_assign_lhs (binrhs
));
4598 gsi_insert_before (&gsi
, binrhs
, GSI_SAME_STMT
);
4599 gimple_set_uid (binrhs
, gimple_uid (stmt
));
4601 if (TREE_CODE (gimple_assign_rhs2 (stmt
)) == SSA_NAME
)
4602 newbinrhs
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
4604 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4606 fprintf (dump_file
, "Linearized: ");
4607 print_gimple_stmt (dump_file
, stmt
, 0);
4610 reassociate_stats
.linearized
++;
4613 gsi
= gsi_for_stmt (oldbinrhs
);
4614 reassoc_remove_stmt (&gsi
);
4615 release_defs (oldbinrhs
);
4617 gimple_set_visited (stmt
, true);
4618 gimple_set_visited (binlhs
, true);
4619 gimple_set_visited (binrhs
, true);
4621 /* Tail recurse on the new rhs if it still needs reassociation. */
4622 if (newbinrhs
&& is_reassociable_op (newbinrhs
, rhscode
, loop
))
4623 /* ??? This should probably be linearize_expr (newbinrhs) but I don't
4624 want to change the algorithm while converting to tuples. */
4625 linearize_expr (stmt
);
4628 /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return
4629 it. Otherwise, return NULL. */
4632 get_single_immediate_use (tree lhs
)
4634 use_operand_p immuse
;
4637 if (TREE_CODE (lhs
) == SSA_NAME
4638 && single_imm_use (lhs
, &immuse
, &immusestmt
)
4639 && is_gimple_assign (immusestmt
))
4645 /* Recursively negate the value of TONEGATE, and return the SSA_NAME
4646 representing the negated value. Insertions of any necessary
4647 instructions go before GSI.
4648 This function is recursive in that, if you hand it "a_5" as the
4649 value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will
4650 transform b_3 + b_4 into a_5 = -b_3 + -b_4. */
4653 negate_value (tree tonegate
, gimple_stmt_iterator
*gsip
)
4655 gimple
*negatedefstmt
= NULL
;
4656 tree resultofnegate
;
4657 gimple_stmt_iterator gsi
;
4660 /* If we are trying to negate a name, defined by an add, negate the
4661 add operands instead. */
4662 if (TREE_CODE (tonegate
) == SSA_NAME
)
4663 negatedefstmt
= SSA_NAME_DEF_STMT (tonegate
);
4664 if (TREE_CODE (tonegate
) == SSA_NAME
4665 && is_gimple_assign (negatedefstmt
)
4666 && TREE_CODE (gimple_assign_lhs (negatedefstmt
)) == SSA_NAME
4667 && has_single_use (gimple_assign_lhs (negatedefstmt
))
4668 && gimple_assign_rhs_code (negatedefstmt
) == PLUS_EXPR
)
4670 tree rhs1
= gimple_assign_rhs1 (negatedefstmt
);
4671 tree rhs2
= gimple_assign_rhs2 (negatedefstmt
);
4672 tree lhs
= gimple_assign_lhs (negatedefstmt
);
4675 gsi
= gsi_for_stmt (negatedefstmt
);
4676 rhs1
= negate_value (rhs1
, &gsi
);
4678 gsi
= gsi_for_stmt (negatedefstmt
);
4679 rhs2
= negate_value (rhs2
, &gsi
);
4681 gsi
= gsi_for_stmt (negatedefstmt
);
4682 lhs
= make_ssa_name (TREE_TYPE (lhs
));
4683 gimple_set_visited (negatedefstmt
, true);
4684 g
= gimple_build_assign (lhs
, PLUS_EXPR
, rhs1
, rhs2
);
4685 gimple_set_uid (g
, gimple_uid (negatedefstmt
));
4686 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
4690 tonegate
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (tonegate
), tonegate
);
4691 resultofnegate
= force_gimple_operand_gsi (gsip
, tonegate
, true,
4692 NULL_TREE
, true, GSI_SAME_STMT
);
4694 uid
= gimple_uid (gsi_stmt (gsi
));
4695 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
4697 gimple
*stmt
= gsi_stmt (gsi
);
4698 if (gimple_uid (stmt
) != 0)
4700 gimple_set_uid (stmt
, uid
);
4702 return resultofnegate
;
4705 /* Return true if we should break up the subtract in STMT into an add
4706 with negate. This is true when we the subtract operands are really
4707 adds, or the subtract itself is used in an add expression. In
4708 either case, breaking up the subtract into an add with negate
4709 exposes the adds to reassociation. */
4712 should_break_up_subtract (gimple
*stmt
)
4714 tree lhs
= gimple_assign_lhs (stmt
);
4715 tree binlhs
= gimple_assign_rhs1 (stmt
);
4716 tree binrhs
= gimple_assign_rhs2 (stmt
);
4718 struct loop
*loop
= loop_containing_stmt (stmt
);
4720 if (TREE_CODE (binlhs
) == SSA_NAME
4721 && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs
), PLUS_EXPR
, loop
))
4724 if (TREE_CODE (binrhs
) == SSA_NAME
4725 && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
), PLUS_EXPR
, loop
))
4728 if (TREE_CODE (lhs
) == SSA_NAME
4729 && (immusestmt
= get_single_immediate_use (lhs
))
4730 && is_gimple_assign (immusestmt
)
4731 && (gimple_assign_rhs_code (immusestmt
) == PLUS_EXPR
4732 || (gimple_assign_rhs_code (immusestmt
) == MINUS_EXPR
4733 && gimple_assign_rhs1 (immusestmt
) == lhs
)
4734 || gimple_assign_rhs_code (immusestmt
) == MULT_EXPR
))
4739 /* Transform STMT from A - B into A + -B. */
4742 break_up_subtract (gimple
*stmt
, gimple_stmt_iterator
*gsip
)
4744 tree rhs1
= gimple_assign_rhs1 (stmt
);
4745 tree rhs2
= gimple_assign_rhs2 (stmt
);
4747 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4749 fprintf (dump_file
, "Breaking up subtract ");
4750 print_gimple_stmt (dump_file
, stmt
, 0);
4753 rhs2
= negate_value (rhs2
, gsip
);
4754 gimple_assign_set_rhs_with_ops (gsip
, PLUS_EXPR
, rhs1
, rhs2
);
4758 /* Determine whether STMT is a builtin call that raises an SSA name
4759 to an integer power and has only one use. If so, and this is early
4760 reassociation and unsafe math optimizations are permitted, place
4761 the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE.
4762 If any of these conditions does not hold, return FALSE. */
4765 acceptable_pow_call (gcall
*stmt
, tree
*base
, HOST_WIDE_INT
*exponent
)
4768 REAL_VALUE_TYPE c
, cint
;
4770 switch (gimple_call_combined_fn (stmt
))
4773 if (flag_errno_math
)
4776 *base
= gimple_call_arg (stmt
, 0);
4777 arg1
= gimple_call_arg (stmt
, 1);
4779 if (TREE_CODE (arg1
) != REAL_CST
)
4782 c
= TREE_REAL_CST (arg1
);
4784 if (REAL_EXP (&c
) > HOST_BITS_PER_WIDE_INT
)
4787 *exponent
= real_to_integer (&c
);
4788 real_from_integer (&cint
, VOIDmode
, *exponent
, SIGNED
);
4789 if (!real_identical (&c
, &cint
))
4795 *base
= gimple_call_arg (stmt
, 0);
4796 arg1
= gimple_call_arg (stmt
, 1);
4798 if (!tree_fits_shwi_p (arg1
))
4801 *exponent
= tree_to_shwi (arg1
);
4808 /* Expanding negative exponents is generally unproductive, so we don't
4809 complicate matters with those. Exponents of zero and one should
4810 have been handled by expression folding. */
4811 if (*exponent
< 2 || TREE_CODE (*base
) != SSA_NAME
)
4817 /* Try to derive and add operand entry for OP to *OPS. Return false if
4821 try_special_add_to_ops (vec
<operand_entry
*> *ops
,
4822 enum tree_code code
,
4823 tree op
, gimple
* def_stmt
)
4825 tree base
= NULL_TREE
;
4826 HOST_WIDE_INT exponent
= 0;
4828 if (TREE_CODE (op
) != SSA_NAME
4829 || ! has_single_use (op
))
4832 if (code
== MULT_EXPR
4833 && reassoc_insert_powi_p
4834 && flag_unsafe_math_optimizations
4835 && is_gimple_call (def_stmt
)
4836 && acceptable_pow_call (as_a
<gcall
*> (def_stmt
), &base
, &exponent
))
4838 add_repeat_to_ops_vec (ops
, base
, exponent
);
4839 gimple_set_visited (def_stmt
, true);
4842 else if (code
== MULT_EXPR
4843 && is_gimple_assign (def_stmt
)
4844 && gimple_assign_rhs_code (def_stmt
) == NEGATE_EXPR
4845 && !HONOR_SNANS (TREE_TYPE (op
))
4846 && (!HONOR_SIGNED_ZEROS (TREE_TYPE (op
))
4847 || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (op
))))
4849 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4850 tree cst
= build_minus_one_cst (TREE_TYPE (op
));
4851 add_to_ops_vec (ops
, rhs1
);
4852 add_to_ops_vec (ops
, cst
);
4853 gimple_set_visited (def_stmt
, true);
4860 /* Recursively linearize a binary expression that is the RHS of STMT.
4861 Place the operands of the expression tree in the vector named OPS. */
4864 linearize_expr_tree (vec
<operand_entry
*> *ops
, gimple
*stmt
,
4865 bool is_associative
, bool set_visited
)
4867 tree binlhs
= gimple_assign_rhs1 (stmt
);
4868 tree binrhs
= gimple_assign_rhs2 (stmt
);
4869 gimple
*binlhsdef
= NULL
, *binrhsdef
= NULL
;
4870 bool binlhsisreassoc
= false;
4871 bool binrhsisreassoc
= false;
4872 enum tree_code rhscode
= gimple_assign_rhs_code (stmt
);
4873 struct loop
*loop
= loop_containing_stmt (stmt
);
4876 gimple_set_visited (stmt
, true);
4878 if (TREE_CODE (binlhs
) == SSA_NAME
)
4880 binlhsdef
= SSA_NAME_DEF_STMT (binlhs
);
4881 binlhsisreassoc
= (is_reassociable_op (binlhsdef
, rhscode
, loop
)
4882 && !stmt_could_throw_p (binlhsdef
));
4885 if (TREE_CODE (binrhs
) == SSA_NAME
)
4887 binrhsdef
= SSA_NAME_DEF_STMT (binrhs
);
4888 binrhsisreassoc
= (is_reassociable_op (binrhsdef
, rhscode
, loop
)
4889 && !stmt_could_throw_p (binrhsdef
));
4892 /* If the LHS is not reassociable, but the RHS is, we need to swap
4893 them. If neither is reassociable, there is nothing we can do, so
4894 just put them in the ops vector. If the LHS is reassociable,
4895 linearize it. If both are reassociable, then linearize the RHS
4898 if (!binlhsisreassoc
)
4900 /* If this is not a associative operation like division, give up. */
4901 if (!is_associative
)
4903 add_to_ops_vec (ops
, binrhs
);
4907 if (!binrhsisreassoc
)
4909 if (!try_special_add_to_ops (ops
, rhscode
, binrhs
, binrhsdef
))
4910 add_to_ops_vec (ops
, binrhs
);
4912 if (!try_special_add_to_ops (ops
, rhscode
, binlhs
, binlhsdef
))
4913 add_to_ops_vec (ops
, binlhs
);
4918 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4920 fprintf (dump_file
, "swapping operands of ");
4921 print_gimple_stmt (dump_file
, stmt
, 0);
4924 swap_ssa_operands (stmt
,
4925 gimple_assign_rhs1_ptr (stmt
),
4926 gimple_assign_rhs2_ptr (stmt
));
4929 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4931 fprintf (dump_file
, " is now ");
4932 print_gimple_stmt (dump_file
, stmt
, 0);
4935 /* We want to make it so the lhs is always the reassociative op,
4937 std::swap (binlhs
, binrhs
);
4939 else if (binrhsisreassoc
)
4941 linearize_expr (stmt
);
4942 binlhs
= gimple_assign_rhs1 (stmt
);
4943 binrhs
= gimple_assign_rhs2 (stmt
);
4946 gcc_assert (TREE_CODE (binrhs
) != SSA_NAME
4947 || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs
),
4949 linearize_expr_tree (ops
, SSA_NAME_DEF_STMT (binlhs
),
4950 is_associative
, set_visited
);
4952 if (!try_special_add_to_ops (ops
, rhscode
, binrhs
, binrhsdef
))
4953 add_to_ops_vec (ops
, binrhs
);
4956 /* Repropagate the negates back into subtracts, since no other pass
4957 currently does it. */
4960 repropagate_negates (void)
4965 FOR_EACH_VEC_ELT (plus_negates
, i
, negate
)
4967 gimple
*user
= get_single_immediate_use (negate
);
4969 if (!user
|| !is_gimple_assign (user
))
4972 /* The negate operand can be either operand of a PLUS_EXPR
4973 (it can be the LHS if the RHS is a constant for example).
4975 Force the negate operand to the RHS of the PLUS_EXPR, then
4976 transform the PLUS_EXPR into a MINUS_EXPR. */
4977 if (gimple_assign_rhs_code (user
) == PLUS_EXPR
)
4979 /* If the negated operand appears on the LHS of the
4980 PLUS_EXPR, exchange the operands of the PLUS_EXPR
4981 to force the negated operand to the RHS of the PLUS_EXPR. */
4982 if (gimple_assign_rhs1 (user
) == negate
)
4984 swap_ssa_operands (user
,
4985 gimple_assign_rhs1_ptr (user
),
4986 gimple_assign_rhs2_ptr (user
));
4989 /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace
4990 the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */
4991 if (gimple_assign_rhs2 (user
) == negate
)
4993 tree rhs1
= gimple_assign_rhs1 (user
);
4994 tree rhs2
= gimple_assign_rhs1 (SSA_NAME_DEF_STMT (negate
));
4995 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
4996 gimple_assign_set_rhs_with_ops (&gsi
, MINUS_EXPR
, rhs1
, rhs2
);
5000 else if (gimple_assign_rhs_code (user
) == MINUS_EXPR
)
5002 if (gimple_assign_rhs1 (user
) == negate
)
5007 which we transform into
5010 This pushes down the negate which we possibly can merge
5011 into some other operation, hence insert it into the
5012 plus_negates vector. */
5013 gimple
*feed
= SSA_NAME_DEF_STMT (negate
);
5014 tree a
= gimple_assign_rhs1 (feed
);
5015 tree b
= gimple_assign_rhs2 (user
);
5016 gimple_stmt_iterator gsi
= gsi_for_stmt (feed
);
5017 gimple_stmt_iterator gsi2
= gsi_for_stmt (user
);
5018 tree x
= make_ssa_name (TREE_TYPE (gimple_assign_lhs (feed
)));
5019 gimple
*g
= gimple_build_assign (x
, PLUS_EXPR
, a
, b
);
5020 gsi_insert_before (&gsi2
, g
, GSI_SAME_STMT
);
5021 gimple_assign_set_rhs_with_ops (&gsi2
, NEGATE_EXPR
, x
);
5022 user
= gsi_stmt (gsi2
);
5024 reassoc_remove_stmt (&gsi
);
5025 release_defs (feed
);
5026 plus_negates
.safe_push (gimple_assign_lhs (user
));
5030 /* Transform "x = -a; y = b - x" into "y = b + a", getting
5031 rid of one operation. */
5032 gimple
*feed
= SSA_NAME_DEF_STMT (negate
);
5033 tree a
= gimple_assign_rhs1 (feed
);
5034 tree rhs1
= gimple_assign_rhs1 (user
);
5035 gimple_stmt_iterator gsi
= gsi_for_stmt (user
);
5036 gimple_assign_set_rhs_with_ops (&gsi
, PLUS_EXPR
, rhs1
, a
);
5037 update_stmt (gsi_stmt (gsi
));
5043 /* Returns true if OP is of a type for which we can do reassociation.
5044 That is for integral or non-saturating fixed-point types, and for
5045 floating point type when associative-math is enabled. */
5048 can_reassociate_p (tree op
)
5050 tree type
= TREE_TYPE (op
);
5051 if (TREE_CODE (op
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
5053 if ((ANY_INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_WRAPS (type
))
5054 || NON_SAT_FIXED_POINT_TYPE_P (type
)
5055 || (flag_associative_math
&& FLOAT_TYPE_P (type
)))
5060 /* Break up subtract operations in block BB.
5062 We do this top down because we don't know whether the subtract is
5063 part of a possible chain of reassociation except at the top.
5072 we want to break up k = t - q, but we won't until we've transformed q
5073 = b - r, which won't be broken up until we transform b = c - d.
5075 En passant, clear the GIMPLE visited flag on every statement
5076 and set UIDs within each basic block. */
5079 break_up_subtract_bb (basic_block bb
)
5081 gimple_stmt_iterator gsi
;
5083 unsigned int uid
= 1;
5085 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5087 gimple
*stmt
= gsi_stmt (gsi
);
5088 gimple_set_visited (stmt
, false);
5089 gimple_set_uid (stmt
, uid
++);
5091 if (!is_gimple_assign (stmt
)
5092 || !can_reassociate_p (gimple_assign_lhs (stmt
)))
5095 /* Look for simple gimple subtract operations. */
5096 if (gimple_assign_rhs_code (stmt
) == MINUS_EXPR
)
5098 if (!can_reassociate_p (gimple_assign_rhs1 (stmt
))
5099 || !can_reassociate_p (gimple_assign_rhs2 (stmt
)))
5102 /* Check for a subtract used only in an addition. If this
5103 is the case, transform it into add of a negate for better
5104 reassociation. IE transform C = A-B into C = A + -B if C
5105 is only used in an addition. */
5106 if (should_break_up_subtract (stmt
))
5107 break_up_subtract (stmt
, &gsi
);
5109 else if (gimple_assign_rhs_code (stmt
) == NEGATE_EXPR
5110 && can_reassociate_p (gimple_assign_rhs1 (stmt
)))
5111 plus_negates
.safe_push (gimple_assign_lhs (stmt
));
5113 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5115 son
= next_dom_son (CDI_DOMINATORS
, son
))
5116 break_up_subtract_bb (son
);
5119 /* Used for repeated factor analysis. */
5120 struct repeat_factor
5122 /* An SSA name that occurs in a multiply chain. */
5125 /* Cached rank of the factor. */
5128 /* Number of occurrences of the factor in the chain. */
5129 HOST_WIDE_INT count
;
5131 /* An SSA name representing the product of this factor and
5132 all factors appearing later in the repeated factor vector. */
5137 static vec
<repeat_factor
> repeat_factor_vec
;
5139 /* Used for sorting the repeat factor vector. Sort primarily by
5140 ascending occurrence count, secondarily by descending rank. */
5143 compare_repeat_factors (const void *x1
, const void *x2
)
5145 const repeat_factor
*rf1
= (const repeat_factor
*) x1
;
5146 const repeat_factor
*rf2
= (const repeat_factor
*) x2
;
5148 if (rf1
->count
!= rf2
->count
)
5149 return rf1
->count
- rf2
->count
;
5151 return rf2
->rank
- rf1
->rank
;
5154 /* Look for repeated operands in OPS in the multiply tree rooted at
5155 STMT. Replace them with an optimal sequence of multiplies and powi
5156 builtin calls, and remove the used operands from OPS. Return an
5157 SSA name representing the value of the replacement sequence. */
5160 attempt_builtin_powi (gimple
*stmt
, vec
<operand_entry
*> *ops
)
5162 unsigned i
, j
, vec_len
;
5165 repeat_factor
*rf1
, *rf2
;
5166 repeat_factor rfnew
;
5167 tree result
= NULL_TREE
;
5168 tree target_ssa
, iter_result
;
5169 tree type
= TREE_TYPE (gimple_get_lhs (stmt
));
5170 tree powi_fndecl
= mathfn_built_in (type
, BUILT_IN_POWI
);
5171 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
5172 gimple
*mul_stmt
, *pow_stmt
;
5174 /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and
5179 /* Allocate the repeated factor vector. */
5180 repeat_factor_vec
.create (10);
5182 /* Scan the OPS vector for all SSA names in the product and build
5183 up a vector of occurrence counts for each factor. */
5184 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
5186 if (TREE_CODE (oe
->op
) == SSA_NAME
)
5188 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
5190 if (rf1
->factor
== oe
->op
)
5192 rf1
->count
+= oe
->count
;
5197 if (j
>= repeat_factor_vec
.length ())
5199 rfnew
.factor
= oe
->op
;
5200 rfnew
.rank
= oe
->rank
;
5201 rfnew
.count
= oe
->count
;
5202 rfnew
.repr
= NULL_TREE
;
5203 repeat_factor_vec
.safe_push (rfnew
);
5208 /* Sort the repeated factor vector by (a) increasing occurrence count,
5209 and (b) decreasing rank. */
5210 repeat_factor_vec
.qsort (compare_repeat_factors
);
5212 /* It is generally best to combine as many base factors as possible
5213 into a product before applying __builtin_powi to the result.
5214 However, the sort order chosen for the repeated factor vector
5215 allows us to cache partial results for the product of the base
5216 factors for subsequent use. When we already have a cached partial
5217 result from a previous iteration, it is best to make use of it
5218 before looking for another __builtin_pow opportunity.
5220 As an example, consider x * x * y * y * y * z * z * z * z.
5221 We want to first compose the product x * y * z, raise it to the
5222 second power, then multiply this by y * z, and finally multiply
5223 by z. This can be done in 5 multiplies provided we cache y * z
5224 for use in both expressions:
5232 If we instead ignored the cached y * z and first multiplied by
5233 the __builtin_pow opportunity z * z, we would get the inferior:
5242 vec_len
= repeat_factor_vec
.length ();
5244 /* Repeatedly look for opportunities to create a builtin_powi call. */
5247 HOST_WIDE_INT power
;
5249 /* First look for the largest cached product of factors from
5250 preceding iterations. If found, create a builtin_powi for
5251 it if the minimum occurrence count for its factors is at
5252 least 2, or just use this cached product as our next
5253 multiplicand if the minimum occurrence count is 1. */
5254 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
5256 if (rf1
->repr
&& rf1
->count
> 0)
5266 iter_result
= rf1
->repr
;
5268 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5272 fputs ("Multiplying by cached product ", dump_file
);
5273 for (elt
= j
; elt
< vec_len
; elt
++)
5275 rf
= &repeat_factor_vec
[elt
];
5276 print_generic_expr (dump_file
, rf
->factor
);
5277 if (elt
< vec_len
- 1)
5278 fputs (" * ", dump_file
);
5280 fputs ("\n", dump_file
);
5285 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5286 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
5287 build_int_cst (integer_type_node
,
5289 gimple_call_set_lhs (pow_stmt
, iter_result
);
5290 gimple_set_location (pow_stmt
, gimple_location (stmt
));
5291 gimple_set_uid (pow_stmt
, gimple_uid (stmt
));
5292 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
5294 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5298 fputs ("Building __builtin_pow call for cached product (",
5300 for (elt
= j
; elt
< vec_len
; elt
++)
5302 rf
= &repeat_factor_vec
[elt
];
5303 print_generic_expr (dump_file
, rf
->factor
);
5304 if (elt
< vec_len
- 1)
5305 fputs (" * ", dump_file
);
5307 fprintf (dump_file
, ")^" HOST_WIDE_INT_PRINT_DEC
"\n",
5314 /* Otherwise, find the first factor in the repeated factor
5315 vector whose occurrence count is at least 2. If no such
5316 factor exists, there are no builtin_powi opportunities
5318 FOR_EACH_VEC_ELT (repeat_factor_vec
, j
, rf1
)
5320 if (rf1
->count
>= 2)
5329 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5333 fputs ("Building __builtin_pow call for (", dump_file
);
5334 for (elt
= j
; elt
< vec_len
; elt
++)
5336 rf
= &repeat_factor_vec
[elt
];
5337 print_generic_expr (dump_file
, rf
->factor
);
5338 if (elt
< vec_len
- 1)
5339 fputs (" * ", dump_file
);
5341 fprintf (dump_file
, ")^" HOST_WIDE_INT_PRINT_DEC
"\n", power
);
5344 reassociate_stats
.pows_created
++;
5346 /* Visit each element of the vector in reverse order (so that
5347 high-occurrence elements are visited first, and within the
5348 same occurrence count, lower-ranked elements are visited
5349 first). Form a linear product of all elements in this order
5350 whose occurrencce count is at least that of element J.
5351 Record the SSA name representing the product of each element
5352 with all subsequent elements in the vector. */
5353 if (j
== vec_len
- 1)
5354 rf1
->repr
= rf1
->factor
;
5357 for (ii
= vec_len
- 2; ii
>= (int)j
; ii
--)
5361 rf1
= &repeat_factor_vec
[ii
];
5362 rf2
= &repeat_factor_vec
[ii
+ 1];
5364 /* Init the last factor's representative to be itself. */
5366 rf2
->repr
= rf2
->factor
;
5371 target_ssa
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5372 mul_stmt
= gimple_build_assign (target_ssa
, MULT_EXPR
,
5374 gimple_set_location (mul_stmt
, gimple_location (stmt
));
5375 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
5376 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
5377 rf1
->repr
= target_ssa
;
5379 /* Don't reprocess the multiply we just introduced. */
5380 gimple_set_visited (mul_stmt
, true);
5384 /* Form a call to __builtin_powi for the maximum product
5385 just formed, raised to the power obtained earlier. */
5386 rf1
= &repeat_factor_vec
[j
];
5387 iter_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5388 pow_stmt
= gimple_build_call (powi_fndecl
, 2, rf1
->repr
,
5389 build_int_cst (integer_type_node
,
5391 gimple_call_set_lhs (pow_stmt
, iter_result
);
5392 gimple_set_location (pow_stmt
, gimple_location (stmt
));
5393 gimple_set_uid (pow_stmt
, gimple_uid (stmt
));
5394 gsi_insert_before (&gsi
, pow_stmt
, GSI_SAME_STMT
);
5397 /* If we previously formed at least one other builtin_powi call,
5398 form the product of this one and those others. */
5401 tree new_result
= make_temp_ssa_name (type
, NULL
, "reassocpow");
5402 mul_stmt
= gimple_build_assign (new_result
, MULT_EXPR
,
5403 result
, iter_result
);
5404 gimple_set_location (mul_stmt
, gimple_location (stmt
));
5405 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
5406 gsi_insert_before (&gsi
, mul_stmt
, GSI_SAME_STMT
);
5407 gimple_set_visited (mul_stmt
, true);
5408 result
= new_result
;
5411 result
= iter_result
;
5413 /* Decrement the occurrence count of each element in the product
5414 by the count found above, and remove this many copies of each
5416 for (i
= j
; i
< vec_len
; i
++)
5421 rf1
= &repeat_factor_vec
[i
];
5422 rf1
->count
-= power
;
5424 FOR_EACH_VEC_ELT_REVERSE (*ops
, n
, oe
)
5426 if (oe
->op
== rf1
->factor
)
5430 ops
->ordered_remove (n
);
5446 /* At this point all elements in the repeated factor vector have a
5447 remaining occurrence count of 0 or 1, and those with a count of 1
5448 don't have cached representatives. Re-sort the ops vector and
5450 ops
->qsort (sort_by_operand_rank
);
5451 repeat_factor_vec
.release ();
5453 /* Return the final product computed herein. Note that there may
5454 still be some elements with single occurrence count left in OPS;
5455 those will be handled by the normal reassociation logic. */
5459 /* Attempt to optimize
5460 CST1 * copysign (CST2, y) -> copysign (CST1 * CST2, y) if CST1 > 0, or
5461 CST1 * copysign (CST2, y) -> -copysign (CST1 * CST2, y) if CST1 < 0. */
5464 attempt_builtin_copysign (vec
<operand_entry
*> *ops
)
5468 unsigned int length
= ops
->length ();
5469 tree cst
= ops
->last ()->op
;
5471 if (length
== 1 || TREE_CODE (cst
) != REAL_CST
)
5474 FOR_EACH_VEC_ELT (*ops
, i
, oe
)
5476 if (TREE_CODE (oe
->op
) == SSA_NAME
5477 && has_single_use (oe
->op
))
5479 gimple
*def_stmt
= SSA_NAME_DEF_STMT (oe
->op
);
5480 if (gcall
*old_call
= dyn_cast
<gcall
*> (def_stmt
))
5483 switch (gimple_call_combined_fn (old_call
))
5486 arg0
= gimple_call_arg (old_call
, 0);
5487 arg1
= gimple_call_arg (old_call
, 1);
5488 /* The first argument of copysign must be a constant,
5489 otherwise there's nothing to do. */
5490 if (TREE_CODE (arg0
) == REAL_CST
)
5492 tree type
= TREE_TYPE (arg0
);
5493 tree mul
= const_binop (MULT_EXPR
, type
, cst
, arg0
);
5494 /* If we couldn't fold to a single constant, skip it.
5495 That happens e.g. for inexact multiplication when
5497 if (mul
== NULL_TREE
)
5499 /* Instead of adjusting OLD_CALL, let's build a new
5500 call to not leak the LHS and prevent keeping bogus
5501 debug statements. DCE will clean up the old call. */
5503 if (gimple_call_internal_p (old_call
))
5504 new_call
= gimple_build_call_internal
5505 (IFN_COPYSIGN
, 2, mul
, arg1
);
5507 new_call
= gimple_build_call
5508 (gimple_call_fndecl (old_call
), 2, mul
, arg1
);
5509 tree lhs
= make_ssa_name (type
);
5510 gimple_call_set_lhs (new_call
, lhs
);
5511 gimple_set_location (new_call
,
5512 gimple_location (old_call
));
5513 insert_stmt_after (new_call
, old_call
);
5514 /* We've used the constant, get rid of it. */
5516 bool cst1_neg
= real_isneg (TREE_REAL_CST_PTR (cst
));
5517 /* Handle the CST1 < 0 case by negating the result. */
5520 tree negrhs
= make_ssa_name (TREE_TYPE (lhs
));
5522 = gimple_build_assign (negrhs
, NEGATE_EXPR
, lhs
);
5523 insert_stmt_after (negate_stmt
, new_call
);
5528 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5530 fprintf (dump_file
, "Optimizing copysign: ");
5531 print_generic_expr (dump_file
, cst
);
5532 fprintf (dump_file
, " * COPYSIGN (");
5533 print_generic_expr (dump_file
, arg0
);
5534 fprintf (dump_file
, ", ");
5535 print_generic_expr (dump_file
, arg1
);
5536 fprintf (dump_file
, ") into %sCOPYSIGN (",
5537 cst1_neg
? "-" : "");
5538 print_generic_expr (dump_file
, mul
);
5539 fprintf (dump_file
, ", ");
5540 print_generic_expr (dump_file
, arg1
);
5541 fprintf (dump_file
, "\n");
5554 /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */
5557 transform_stmt_to_copy (gimple_stmt_iterator
*gsi
, gimple
*stmt
, tree new_rhs
)
5561 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5563 fprintf (dump_file
, "Transforming ");
5564 print_gimple_stmt (dump_file
, stmt
, 0);
5567 rhs1
= gimple_assign_rhs1 (stmt
);
5568 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
5570 remove_visited_stmt_chain (rhs1
);
5572 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5574 fprintf (dump_file
, " into ");
5575 print_gimple_stmt (dump_file
, stmt
, 0);
5579 /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */
5582 transform_stmt_to_multiply (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
5583 tree rhs1
, tree rhs2
)
5585 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5587 fprintf (dump_file
, "Transforming ");
5588 print_gimple_stmt (dump_file
, stmt
, 0);
5591 gimple_assign_set_rhs_with_ops (gsi
, MULT_EXPR
, rhs1
, rhs2
);
5592 update_stmt (gsi_stmt (*gsi
));
5593 remove_visited_stmt_chain (rhs1
);
5595 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5597 fprintf (dump_file
, " into ");
5598 print_gimple_stmt (dump_file
, stmt
, 0);
5602 /* Reassociate expressions in basic block BB and its post-dominator as
5605 Bubble up return status from maybe_optimize_range_tests. */
5608 reassociate_bb (basic_block bb
)
5610 gimple_stmt_iterator gsi
;
5612 gimple
*stmt
= last_stmt (bb
);
5613 bool cfg_cleanup_needed
= false;
5615 if (stmt
&& !gimple_visited_p (stmt
))
5616 cfg_cleanup_needed
|= maybe_optimize_range_tests (stmt
);
5618 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
5620 stmt
= gsi_stmt (gsi
);
5622 if (is_gimple_assign (stmt
)
5623 && !stmt_could_throw_p (stmt
))
5625 tree lhs
, rhs1
, rhs2
;
5626 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
5628 /* If this is not a gimple binary expression, there is
5629 nothing for us to do with it. */
5630 if (get_gimple_rhs_class (rhs_code
) != GIMPLE_BINARY_RHS
)
5633 /* If this was part of an already processed statement,
5634 we don't need to touch it again. */
5635 if (gimple_visited_p (stmt
))
5637 /* This statement might have become dead because of previous
5639 if (has_zero_uses (gimple_get_lhs (stmt
)))
5641 reassoc_remove_stmt (&gsi
);
5642 release_defs (stmt
);
5643 /* We might end up removing the last stmt above which
5644 places the iterator to the end of the sequence.
5645 Reset it to the last stmt in this case which might
5646 be the end of the sequence as well if we removed
5647 the last statement of the sequence. In which case
5648 we need to bail out. */
5649 if (gsi_end_p (gsi
))
5651 gsi
= gsi_last_bb (bb
);
5652 if (gsi_end_p (gsi
))
5659 lhs
= gimple_assign_lhs (stmt
);
5660 rhs1
= gimple_assign_rhs1 (stmt
);
5661 rhs2
= gimple_assign_rhs2 (stmt
);
5663 /* For non-bit or min/max operations we can't associate
5664 all types. Verify that here. */
5665 if (rhs_code
!= BIT_IOR_EXPR
5666 && rhs_code
!= BIT_AND_EXPR
5667 && rhs_code
!= BIT_XOR_EXPR
5668 && rhs_code
!= MIN_EXPR
5669 && rhs_code
!= MAX_EXPR
5670 && (!can_reassociate_p (lhs
)
5671 || !can_reassociate_p (rhs1
)
5672 || !can_reassociate_p (rhs2
)))
5675 if (associative_tree_code (rhs_code
))
5677 auto_vec
<operand_entry
*> ops
;
5678 tree powi_result
= NULL_TREE
;
5679 bool is_vector
= VECTOR_TYPE_P (TREE_TYPE (lhs
));
5681 /* There may be no immediate uses left by the time we
5682 get here because we may have eliminated them all. */
5683 if (TREE_CODE (lhs
) == SSA_NAME
&& has_zero_uses (lhs
))
5686 gimple_set_visited (stmt
, true);
5687 linearize_expr_tree (&ops
, stmt
, true, true);
5688 ops
.qsort (sort_by_operand_rank
);
5689 int orig_len
= ops
.length ();
5690 optimize_ops_list (rhs_code
, &ops
);
5691 if (undistribute_ops_list (rhs_code
, &ops
,
5692 loop_containing_stmt (stmt
)))
5694 ops
.qsort (sort_by_operand_rank
);
5695 optimize_ops_list (rhs_code
, &ops
);
5698 if (rhs_code
== PLUS_EXPR
5699 && transform_add_to_multiply (&ops
))
5700 ops
.qsort (sort_by_operand_rank
);
5702 if (rhs_code
== BIT_IOR_EXPR
|| rhs_code
== BIT_AND_EXPR
)
5705 optimize_vec_cond_expr (rhs_code
, &ops
);
5707 optimize_range_tests (rhs_code
, &ops
);
5710 if (rhs_code
== MULT_EXPR
&& !is_vector
)
5712 attempt_builtin_copysign (&ops
);
5714 if (reassoc_insert_powi_p
5715 && flag_unsafe_math_optimizations
)
5716 powi_result
= attempt_builtin_powi (stmt
, &ops
);
5719 operand_entry
*last
;
5720 bool negate_result
= false;
5721 if (ops
.length () > 1
5722 && rhs_code
== MULT_EXPR
)
5725 if ((integer_minus_onep (last
->op
)
5726 || real_minus_onep (last
->op
))
5727 && !HONOR_SNANS (TREE_TYPE (lhs
))
5728 && (!HONOR_SIGNED_ZEROS (TREE_TYPE (lhs
))
5729 || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (lhs
))))
5732 negate_result
= true;
5737 /* If the operand vector is now empty, all operands were
5738 consumed by the __builtin_powi optimization. */
5739 if (ops
.length () == 0)
5740 transform_stmt_to_copy (&gsi
, stmt
, powi_result
);
5741 else if (ops
.length () == 1)
5743 tree last_op
= ops
.last ()->op
;
5745 /* If the stmt that defines operand has to be inserted, insert it
5747 if (ops
.last ()->stmt_to_insert
)
5748 insert_stmt_before_use (stmt
, ops
.last ()->stmt_to_insert
);
5750 transform_stmt_to_multiply (&gsi
, stmt
, last_op
,
5753 transform_stmt_to_copy (&gsi
, stmt
, last_op
);
5757 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
5758 int ops_num
= ops
.length ();
5759 int width
= get_reassociation_width (ops_num
, rhs_code
, mode
);
5761 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5763 "Width = %d was chosen for reassociation\n", width
);
5766 /* For binary bit operations, if the last operand in
5767 OPS is a constant, move it to the front. This
5768 helps ensure that we generate (X & Y) & C rather
5769 than (X & C) & Y. The former will often match
5770 a canonical bit test when we get to RTL. */
5771 if ((rhs_code
== BIT_AND_EXPR
5772 || rhs_code
== BIT_IOR_EXPR
5773 || rhs_code
== BIT_XOR_EXPR
)
5774 && TREE_CODE (ops
.last ()->op
) == INTEGER_CST
)
5775 std::swap (*ops
[0], *ops
[ops_num
- 1]);
5778 && ops
.length () > 3)
5779 rewrite_expr_tree_parallel (as_a
<gassign
*> (stmt
),
5783 /* When there are three operands left, we want
5784 to make sure the ones that get the double
5785 binary op are chosen wisely. */
5786 int len
= ops
.length ();
5788 swap_ops_for_binary_stmt (ops
, len
- 3, stmt
);
5790 new_lhs
= rewrite_expr_tree (stmt
, 0, ops
,
5796 /* If we combined some repeated factors into a
5797 __builtin_powi call, multiply that result by the
5798 reassociated operands. */
5801 gimple
*mul_stmt
, *lhs_stmt
= SSA_NAME_DEF_STMT (lhs
);
5802 tree type
= TREE_TYPE (lhs
);
5803 tree target_ssa
= make_temp_ssa_name (type
, NULL
,
5805 gimple_set_lhs (lhs_stmt
, target_ssa
);
5806 update_stmt (lhs_stmt
);
5809 target_ssa
= new_lhs
;
5812 mul_stmt
= gimple_build_assign (lhs
, MULT_EXPR
,
5813 powi_result
, target_ssa
);
5814 gimple_set_location (mul_stmt
, gimple_location (stmt
));
5815 gimple_set_uid (mul_stmt
, gimple_uid (stmt
));
5816 gsi_insert_after (&gsi
, mul_stmt
, GSI_NEW_STMT
);
5822 stmt
= SSA_NAME_DEF_STMT (lhs
);
5823 tree tmp
= make_ssa_name (TREE_TYPE (lhs
));
5824 gimple_set_lhs (stmt
, tmp
);
5827 gassign
*neg_stmt
= gimple_build_assign (lhs
, NEGATE_EXPR
,
5829 gimple_set_uid (neg_stmt
, gimple_uid (stmt
));
5830 gsi_insert_after (&gsi
, neg_stmt
, GSI_NEW_STMT
);
5836 for (son
= first_dom_son (CDI_POST_DOMINATORS
, bb
);
5838 son
= next_dom_son (CDI_POST_DOMINATORS
, son
))
5839 cfg_cleanup_needed
|= reassociate_bb (son
);
5841 return cfg_cleanup_needed
;
5844 /* Add jumps around shifts for range tests turned into bit tests.
5845 For each SSA_NAME VAR we have code like:
5846 VAR = ...; // final stmt of range comparison
5847 // bit test here...;
5848 OTHERVAR = ...; // final stmt of the bit test sequence
5849 RES = VAR | OTHERVAR;
5850 Turn the above into:
5857 // bit test here...;
5860 # RES = PHI<1(l1), OTHERVAR(l2)>; */
5868 FOR_EACH_VEC_ELT (reassoc_branch_fixups
, i
, var
)
5870 gimple
*def_stmt
= SSA_NAME_DEF_STMT (var
);
5873 bool ok
= single_imm_use (var
, &use
, &use_stmt
);
5875 && is_gimple_assign (use_stmt
)
5876 && gimple_assign_rhs_code (use_stmt
) == BIT_IOR_EXPR
5877 && gimple_bb (def_stmt
) == gimple_bb (use_stmt
));
5879 basic_block cond_bb
= gimple_bb (def_stmt
);
5880 basic_block then_bb
= split_block (cond_bb
, def_stmt
)->dest
;
5881 basic_block merge_bb
= split_block (then_bb
, use_stmt
)->dest
;
5883 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5884 gimple
*g
= gimple_build_cond (NE_EXPR
, var
,
5885 build_zero_cst (TREE_TYPE (var
)),
5886 NULL_TREE
, NULL_TREE
);
5887 location_t loc
= gimple_location (use_stmt
);
5888 gimple_set_location (g
, loc
);
5889 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
5891 edge etrue
= make_edge (cond_bb
, merge_bb
, EDGE_TRUE_VALUE
);
5892 etrue
->probability
= profile_probability::even ();
5893 etrue
->count
= cond_bb
->count
.apply_scale (1, 2);
5894 edge efalse
= find_edge (cond_bb
, then_bb
);
5895 efalse
->flags
= EDGE_FALSE_VALUE
;
5896 efalse
->probability
-= etrue
->probability
;
5897 efalse
->count
-= etrue
->count
;
5898 then_bb
->count
-= etrue
->count
;
5900 tree othervar
= NULL_TREE
;
5901 if (gimple_assign_rhs1 (use_stmt
) == var
)
5902 othervar
= gimple_assign_rhs2 (use_stmt
);
5903 else if (gimple_assign_rhs2 (use_stmt
) == var
)
5904 othervar
= gimple_assign_rhs1 (use_stmt
);
5907 tree lhs
= gimple_assign_lhs (use_stmt
);
5908 gphi
*phi
= create_phi_node (lhs
, merge_bb
);
5909 add_phi_arg (phi
, build_one_cst (TREE_TYPE (lhs
)), etrue
, loc
);
5910 add_phi_arg (phi
, othervar
, single_succ_edge (then_bb
), loc
);
5911 gsi
= gsi_for_stmt (use_stmt
);
5912 gsi_remove (&gsi
, true);
5914 set_immediate_dominator (CDI_DOMINATORS
, merge_bb
, cond_bb
);
5915 set_immediate_dominator (CDI_POST_DOMINATORS
, cond_bb
, merge_bb
);
5917 reassoc_branch_fixups
.release ();
5920 void dump_ops_vector (FILE *file
, vec
<operand_entry
*> ops
);
5921 void debug_ops_vector (vec
<operand_entry
*> ops
);
5923 /* Dump the operand entry vector OPS to FILE. */
5926 dump_ops_vector (FILE *file
, vec
<operand_entry
*> ops
)
5931 FOR_EACH_VEC_ELT (ops
, i
, oe
)
5933 fprintf (file
, "Op %d -> rank: %d, tree: ", i
, oe
->rank
);
5934 print_generic_expr (file
, oe
->op
);
5935 fprintf (file
, "\n");
5939 /* Dump the operand entry vector OPS to STDERR. */
5942 debug_ops_vector (vec
<operand_entry
*> ops
)
5944 dump_ops_vector (stderr
, ops
);
5947 /* Bubble up return status from reassociate_bb. */
5952 break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
5953 return reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun
));
5956 /* Initialize the reassociation pass. */
5963 int *bbs
= XNEWVEC (int, n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
);
5965 /* Find the loops, so that we can prevent moving calculations in
5967 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
5969 memset (&reassociate_stats
, 0, sizeof (reassociate_stats
));
5971 next_operand_entry_id
= 0;
5973 /* Reverse RPO (Reverse Post Order) will give us something where
5974 deeper loops come later. */
5975 pre_and_rev_post_order_compute (NULL
, bbs
, false);
5976 bb_rank
= XCNEWVEC (long, last_basic_block_for_fn (cfun
));
5977 operand_rank
= new hash_map
<tree
, long>;
5979 /* Give each default definition a distinct rank. This includes
5980 parameters and the static chain. Walk backwards over all
5981 SSA names so that we get proper rank ordering according
5982 to tree_swap_operands_p. */
5983 for (i
= num_ssa_names
- 1; i
> 0; --i
)
5985 tree name
= ssa_name (i
);
5986 if (name
&& SSA_NAME_IS_DEFAULT_DEF (name
))
5987 insert_operand_rank (name
, ++rank
);
5990 /* Set up rank for each BB */
5991 for (i
= 0; i
< n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
; i
++)
5992 bb_rank
[bbs
[i
]] = ++rank
<< 16;
5995 calculate_dominance_info (CDI_POST_DOMINATORS
);
5996 plus_negates
= vNULL
;
5999 /* Cleanup after the reassociation pass, and print stats if
6005 statistics_counter_event (cfun
, "Linearized",
6006 reassociate_stats
.linearized
);
6007 statistics_counter_event (cfun
, "Constants eliminated",
6008 reassociate_stats
.constants_eliminated
);
6009 statistics_counter_event (cfun
, "Ops eliminated",
6010 reassociate_stats
.ops_eliminated
);
6011 statistics_counter_event (cfun
, "Statements rewritten",
6012 reassociate_stats
.rewritten
);
6013 statistics_counter_event (cfun
, "Built-in pow[i] calls encountered",
6014 reassociate_stats
.pows_encountered
);
6015 statistics_counter_event (cfun
, "Built-in powi calls created",
6016 reassociate_stats
.pows_created
);
6018 delete operand_rank
;
6019 operand_entry_pool
.release ();
6021 plus_negates
.release ();
6022 free_dominance_info (CDI_POST_DOMINATORS
);
6023 loop_optimizer_finalize ();
6026 /* Gate and execute functions for Reassociation. If INSERT_POWI_P, enable
6027 insertion of __builtin_powi calls.
6029 Returns TODO_cfg_cleanup if a CFG cleanup pass is desired due to
6030 optimization of a gimple conditional. Otherwise returns zero. */
6033 execute_reassoc (bool insert_powi_p
)
6035 reassoc_insert_powi_p
= insert_powi_p
;
6039 bool cfg_cleanup_needed
;
6040 cfg_cleanup_needed
= do_reassoc ();
6041 repropagate_negates ();
6045 return cfg_cleanup_needed
? TODO_cleanup_cfg
: 0;
6050 const pass_data pass_data_reassoc
=
6052 GIMPLE_PASS
, /* type */
6053 "reassoc", /* name */
6054 OPTGROUP_NONE
, /* optinfo_flags */
6055 TV_TREE_REASSOC
, /* tv_id */
6056 ( PROP_cfg
| PROP_ssa
), /* properties_required */
6057 0, /* properties_provided */
6058 0, /* properties_destroyed */
6059 0, /* todo_flags_start */
6060 TODO_update_ssa_only_virtuals
, /* todo_flags_finish */
6063 class pass_reassoc
: public gimple_opt_pass
6066 pass_reassoc (gcc::context
*ctxt
)
6067 : gimple_opt_pass (pass_data_reassoc
, ctxt
), insert_powi_p (false)
6070 /* opt_pass methods: */
6071 opt_pass
* clone () { return new pass_reassoc (m_ctxt
); }
6072 void set_pass_param (unsigned int n
, bool param
)
6074 gcc_assert (n
== 0);
6075 insert_powi_p
= param
;
6077 virtual bool gate (function
*) { return flag_tree_reassoc
!= 0; }
6078 virtual unsigned int execute (function
*)
6079 { return execute_reassoc (insert_powi_p
); }
6082 /* Enable insertion of __builtin_powi calls during execute_reassoc. See
6083 point 3a in the pass header comment. */
6085 }; // class pass_reassoc
6090 make_pass_reassoc (gcc::context
*ctxt
)
6092 return new pass_reassoc (ctxt
);