1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.cc, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "gimple-iterator.h"
104 #include "gimple-fold.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "internal-fn.h"
113 #include "case-cfn-macros.h"
114 #include "optabs-libfuncs.h"
116 #include "targhooks.h"
118 #include "tree-ssa-math-opts.h"
121 /* This structure represents one basic block that either computes a
122 division, or is a common dominator for basic block that compute a
125 /* The basic block represented by this structure. */
126 basic_block bb
= basic_block();
128 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
130 tree recip_def
= tree();
132 /* If non-NULL, the SSA_NAME holding the definition for a squared
133 reciprocal inserted in BB. */
134 tree square_recip_def
= tree();
136 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
137 was inserted in BB. */
138 gimple
*recip_def_stmt
= nullptr;
140 /* Pointer to a list of "struct occurrence"s for blocks dominated
142 struct occurrence
*children
= nullptr;
144 /* Pointer to the next "struct occurrence"s in the list of blocks
145 sharing a common dominator. */
146 struct occurrence
*next
= nullptr;
148 /* The number of divisions that are in BB before compute_merit. The
149 number of divisions that are in BB or post-dominate it after
151 int num_divisions
= 0;
153 /* True if the basic block has a division, false if it is a common
154 dominator for basic blocks that do. If it is false and trapping
155 math is active, BB is not a candidate for inserting a reciprocal. */
156 bool bb_has_division
= false;
158 /* Construct a struct occurrence for basic block BB, and whose
159 children list is headed by CHILDREN. */
160 occurrence (basic_block bb
, struct occurrence
*children
)
161 : bb (bb
), children (children
)
166 /* Destroy a struct occurrence and remove it from its basic block. */
172 /* Allocate memory for a struct occurrence from OCC_POOL. */
173 static void* operator new (size_t);
175 /* Return memory for a struct occurrence to OCC_POOL. */
176 static void operator delete (void*, size_t);
181 /* Number of 1.0/X ops inserted. */
184 /* Number of 1.0/FUNC ops inserted. */
190 /* Number of cexpi calls inserted. */
193 /* Number of conversions removed. */
200 /* Number of widening multiplication ops inserted. */
201 int widen_mults_inserted
;
203 /* Number of integer multiply-and-accumulate ops inserted. */
206 /* Number of fp fused multiply-add ops inserted. */
209 /* Number of divmod calls inserted. */
210 int divmod_calls_inserted
;
212 /* Number of highpart multiplication ops inserted. */
213 int highpart_mults_inserted
;
216 /* The instance of "struct occurrence" representing the highest
217 interesting block in the dominator tree. */
218 static struct occurrence
*occ_head
;
220 /* Allocation pool for getting instances of "struct occurrence". */
221 static object_allocator
<occurrence
> *occ_pool
;
223 void* occurrence::operator new (size_t n
)
225 gcc_assert (n
== sizeof(occurrence
));
226 return occ_pool
->allocate_raw ();
229 void occurrence::operator delete (void *occ
, size_t n
)
231 gcc_assert (n
== sizeof(occurrence
));
232 occ_pool
->remove_raw (occ
);
235 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
236 list of "struct occurrence"s, one per basic block, having IDOM as
237 their common dominator.
239 We try to insert NEW_OCC as deep as possible in the tree, and we also
240 insert any other block that is a common dominator for BB and one
241 block already in the tree. */
244 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
245 struct occurrence
**p_head
)
247 struct occurrence
*occ
, **p_occ
;
249 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
251 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
252 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
255 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
258 occ
->next
= new_occ
->children
;
259 new_occ
->children
= occ
;
261 /* Try the next block (it may as well be dominated by BB). */
264 else if (dom
== occ_bb
)
266 /* OCC_BB dominates BB. Tail recurse to look deeper. */
267 insert_bb (new_occ
, dom
, &occ
->children
);
271 else if (dom
!= idom
)
273 gcc_assert (!dom
->aux
);
275 /* There is a dominator between IDOM and BB, add it and make
276 two children out of NEW_OCC and OCC. First, remove OCC from
282 /* None of the previous blocks has DOM as a dominator: if we tail
283 recursed, we would reexamine them uselessly. Just switch BB with
284 DOM, and go on looking for blocks dominated by DOM. */
285 new_occ
= new occurrence (dom
, new_occ
);
290 /* Nothing special, go on with the next element. */
295 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
296 new_occ
->next
= *p_head
;
300 /* Register that we found a division in BB.
301 IMPORTANCE is a measure of how much weighting to give
302 that division. Use IMPORTANCE = 2 to register a single
303 division. If the division is going to be found multiple
304 times use 1 (as it is with squares). */
307 register_division_in (basic_block bb
, int importance
)
309 struct occurrence
*occ
;
311 occ
= (struct occurrence
*) bb
->aux
;
314 occ
= new occurrence (bb
, NULL
);
315 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
318 occ
->bb_has_division
= true;
319 occ
->num_divisions
+= importance
;
323 /* Compute the number of divisions that postdominate each block in OCC and
327 compute_merit (struct occurrence
*occ
)
329 struct occurrence
*occ_child
;
330 basic_block dom
= occ
->bb
;
332 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
335 if (occ_child
->children
)
336 compute_merit (occ_child
);
339 bb
= single_noncomplex_succ (dom
);
343 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
344 occ
->num_divisions
+= occ_child
->num_divisions
;
349 /* Return whether USE_STMT is a floating-point division by DEF. */
351 is_division_by (gimple
*use_stmt
, tree def
)
353 return is_gimple_assign (use_stmt
)
354 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
355 && gimple_assign_rhs2 (use_stmt
) == def
356 /* Do not recognize x / x as valid division, as we are getting
357 confused later by replacing all immediate uses x in such
359 && gimple_assign_rhs1 (use_stmt
) != def
360 && !stmt_can_throw_internal (cfun
, use_stmt
);
363 /* Return TRUE if USE_STMT is a multiplication of DEF by A. */
365 is_mult_by (gimple
*use_stmt
, tree def
, tree a
)
367 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
368 && gimple_assign_rhs_code (use_stmt
) == MULT_EXPR
)
370 tree op0
= gimple_assign_rhs1 (use_stmt
);
371 tree op1
= gimple_assign_rhs2 (use_stmt
);
373 return (op0
== def
&& op1
== a
)
374 || (op0
== a
&& op1
== def
);
379 /* Return whether USE_STMT is DEF * DEF. */
381 is_square_of (gimple
*use_stmt
, tree def
)
383 return is_mult_by (use_stmt
, def
, def
);
386 /* Return whether USE_STMT is a floating-point division by
389 is_division_by_square (gimple
*use_stmt
, tree def
)
391 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
392 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
393 && gimple_assign_rhs1 (use_stmt
) != gimple_assign_rhs2 (use_stmt
)
394 && !stmt_can_throw_internal (cfun
, use_stmt
))
396 tree denominator
= gimple_assign_rhs2 (use_stmt
);
397 if (TREE_CODE (denominator
) == SSA_NAME
)
398 return is_square_of (SSA_NAME_DEF_STMT (denominator
), def
);
403 /* Walk the subset of the dominator tree rooted at OCC, setting the
404 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
405 the given basic block. The field may be left NULL, of course,
406 if it is not possible or profitable to do the optimization.
408 DEF_BSI is an iterator pointing at the statement defining DEF.
409 If RECIP_DEF is set, a dominator already has a computation that can
412 If should_insert_square_recip is set, then this also inserts
413 the square of the reciprocal immediately after the definition
414 of the reciprocal. */
417 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
418 tree def
, tree recip_def
, tree square_recip_def
,
419 int should_insert_square_recip
, int threshold
)
422 gassign
*new_stmt
, *new_square_stmt
;
423 gimple_stmt_iterator gsi
;
424 struct occurrence
*occ_child
;
427 && (occ
->bb_has_division
|| !flag_trapping_math
)
428 /* Divide by two as all divisions are counted twice in
430 && occ
->num_divisions
/ 2 >= threshold
)
432 /* Make a variable with the replacement and substitute it. */
433 type
= TREE_TYPE (def
);
434 recip_def
= create_tmp_reg (type
, "reciptmp");
435 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
436 build_one_cst (type
), def
);
438 if (should_insert_square_recip
)
440 square_recip_def
= create_tmp_reg (type
, "powmult_reciptmp");
441 new_square_stmt
= gimple_build_assign (square_recip_def
, MULT_EXPR
,
442 recip_def
, recip_def
);
445 if (occ
->bb_has_division
)
447 /* Case 1: insert before an existing division. */
448 gsi
= gsi_after_labels (occ
->bb
);
449 while (!gsi_end_p (gsi
)
450 && (!is_division_by (gsi_stmt (gsi
), def
))
451 && (!is_division_by_square (gsi_stmt (gsi
), def
)))
454 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
455 if (should_insert_square_recip
)
456 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
458 else if (def_gsi
&& occ
->bb
== gsi_bb (*def_gsi
))
460 /* Case 2: insert right after the definition. Note that this will
461 never happen if the definition statement can throw, because in
462 that case the sole successor of the statement's basic block will
463 dominate all the uses as well. */
464 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
465 if (should_insert_square_recip
)
466 gsi_insert_after (def_gsi
, new_square_stmt
, GSI_NEW_STMT
);
470 /* Case 3: insert in a basic block not containing defs/uses. */
471 gsi
= gsi_after_labels (occ
->bb
);
472 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
473 if (should_insert_square_recip
)
474 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
477 reciprocal_stats
.rdivs_inserted
++;
479 occ
->recip_def_stmt
= new_stmt
;
482 occ
->recip_def
= recip_def
;
483 occ
->square_recip_def
= square_recip_def
;
484 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
485 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
,
486 square_recip_def
, should_insert_square_recip
,
490 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
491 Take as argument the use for (x * x). */
493 replace_reciprocal_squares (use_operand_p use_p
)
495 gimple
*use_stmt
= USE_STMT (use_p
);
496 basic_block bb
= gimple_bb (use_stmt
);
497 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
499 if (optimize_bb_for_speed_p (bb
) && occ
->square_recip_def
502 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
503 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
504 gimple_assign_set_rhs2 (use_stmt
, occ
->square_recip_def
);
505 SET_USE (use_p
, occ
->square_recip_def
);
506 fold_stmt_inplace (&gsi
);
507 update_stmt (use_stmt
);
512 /* Replace the division at USE_P with a multiplication by the reciprocal, if
516 replace_reciprocal (use_operand_p use_p
)
518 gimple
*use_stmt
= USE_STMT (use_p
);
519 basic_block bb
= gimple_bb (use_stmt
);
520 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
522 if (optimize_bb_for_speed_p (bb
)
523 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
525 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
526 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
527 SET_USE (use_p
, occ
->recip_def
);
528 fold_stmt_inplace (&gsi
);
529 update_stmt (use_stmt
);
534 /* Free OCC and return one more "struct occurrence" to be freed. */
536 static struct occurrence
*
537 free_bb (struct occurrence
*occ
)
539 struct occurrence
*child
, *next
;
541 /* First get the two pointers hanging off OCC. */
543 child
= occ
->children
;
546 /* Now ensure that we don't recurse unless it is necessary. */
552 next
= free_bb (next
);
558 /* Transform sequences like
568 depending on the uses of x, r1, r2. This removes one multiplication and
569 allows the sqrt and division operations to execute in parallel.
570 DEF_GSI is the gsi of the initial division by sqrt that defines
571 DEF (x in the example above). */
574 optimize_recip_sqrt (gimple_stmt_iterator
*def_gsi
, tree def
)
577 imm_use_iterator use_iter
;
578 gimple
*stmt
= gsi_stmt (*def_gsi
);
580 tree orig_sqrt_ssa_name
= gimple_assign_rhs2 (stmt
);
581 tree div_rhs1
= gimple_assign_rhs1 (stmt
);
583 if (TREE_CODE (orig_sqrt_ssa_name
) != SSA_NAME
584 || TREE_CODE (div_rhs1
) != REAL_CST
585 || !real_equal (&TREE_REAL_CST (div_rhs1
), &dconst1
))
589 = dyn_cast
<gcall
*> (SSA_NAME_DEF_STMT (orig_sqrt_ssa_name
));
591 if (!sqrt_stmt
|| !gimple_call_lhs (sqrt_stmt
))
594 switch (gimple_call_combined_fn (sqrt_stmt
))
603 tree a
= gimple_call_arg (sqrt_stmt
, 0);
605 /* We have 'a' and 'x'. Now analyze the uses of 'x'. */
607 /* Statements that use x in x * x. */
608 auto_vec
<gimple
*> sqr_stmts
;
609 /* Statements that use x in a * x. */
610 auto_vec
<gimple
*> mult_stmts
;
611 bool has_other_use
= false;
612 bool mult_on_main_path
= false;
614 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, x
)
616 if (is_gimple_debug (use_stmt
))
618 if (is_square_of (use_stmt
, x
))
620 sqr_stmts
.safe_push (use_stmt
);
621 if (gimple_bb (use_stmt
) == gimple_bb (stmt
))
622 mult_on_main_path
= true;
624 else if (is_mult_by (use_stmt
, x
, a
))
626 mult_stmts
.safe_push (use_stmt
);
627 if (gimple_bb (use_stmt
) == gimple_bb (stmt
))
628 mult_on_main_path
= true;
631 has_other_use
= true;
634 /* In the x * x and a * x cases we just rewire stmt operands or
635 remove multiplications. In the has_other_use case we introduce
636 a multiplication so make sure we don't introduce a multiplication
637 on a path where there was none. */
638 if (has_other_use
&& !mult_on_main_path
)
641 if (sqr_stmts
.is_empty () && mult_stmts
.is_empty ())
644 /* If x = 1.0 / sqrt (a) has uses other than those optimized here we want
645 to be able to compose it from the sqr and mult cases. */
646 if (has_other_use
&& (sqr_stmts
.is_empty () || mult_stmts
.is_empty ()))
651 fprintf (dump_file
, "Optimizing reciprocal sqrt multiplications of\n");
652 print_gimple_stmt (dump_file
, sqrt_stmt
, 0, TDF_NONE
);
653 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
654 fprintf (dump_file
, "\n");
657 bool delete_div
= !has_other_use
;
658 tree sqr_ssa_name
= NULL_TREE
;
659 if (!sqr_stmts
.is_empty ())
661 /* r1 = x * x. Transform the original
668 = make_temp_ssa_name (TREE_TYPE (a
), NULL
, "recip_sqrt_sqr");
672 fprintf (dump_file
, "Replacing original division\n");
673 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
674 fprintf (dump_file
, "with new division\n");
677 = gimple_build_assign (sqr_ssa_name
, gimple_assign_rhs_code (stmt
),
678 gimple_assign_rhs1 (stmt
), a
);
679 gsi_insert_before (def_gsi
, stmt
, GSI_SAME_STMT
);
680 gsi_remove (def_gsi
, true);
681 *def_gsi
= gsi_for_stmt (stmt
);
682 fold_stmt_inplace (def_gsi
);
686 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
691 FOR_EACH_VEC_ELT (sqr_stmts
, i
, sqr_stmt
)
693 gimple_stmt_iterator gsi2
= gsi_for_stmt (sqr_stmt
);
694 gimple_assign_set_rhs_from_tree (&gsi2
, sqr_ssa_name
);
695 update_stmt (sqr_stmt
);
698 if (!mult_stmts
.is_empty ())
700 /* r2 = a * x. Transform this into:
701 r2 = t (The original sqrt (a)). */
703 gimple
*mult_stmt
= NULL
;
704 FOR_EACH_VEC_ELT (mult_stmts
, i
, mult_stmt
)
706 gimple_stmt_iterator gsi2
= gsi_for_stmt (mult_stmt
);
710 fprintf (dump_file
, "Replacing squaring multiplication\n");
711 print_gimple_stmt (dump_file
, mult_stmt
, 0, TDF_NONE
);
712 fprintf (dump_file
, "with assignment\n");
714 gimple_assign_set_rhs_from_tree (&gsi2
, orig_sqrt_ssa_name
);
715 fold_stmt_inplace (&gsi2
);
716 update_stmt (mult_stmt
);
718 print_gimple_stmt (dump_file
, mult_stmt
, 0, TDF_NONE
);
724 /* Using the two temporaries tmp1, tmp2 from above
725 the original x is now:
727 gcc_assert (orig_sqrt_ssa_name
);
728 gcc_assert (sqr_ssa_name
);
731 = gimple_build_assign (x
, MULT_EXPR
,
732 orig_sqrt_ssa_name
, sqr_ssa_name
);
733 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
738 /* Remove the original division. */
739 gimple_stmt_iterator gsi2
= gsi_for_stmt (stmt
);
740 gsi_remove (&gsi2
, true);
744 release_ssa_name (x
);
747 /* Look for floating-point divisions among DEF's uses, and try to
748 replace them by multiplications with the reciprocal. Add
749 as many statements computing the reciprocal as needed.
751 DEF must be a GIMPLE register of a floating-point type. */
754 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
756 use_operand_p use_p
, square_use_p
;
757 imm_use_iterator use_iter
, square_use_iter
;
759 struct occurrence
*occ
;
762 int square_recip_count
= 0;
763 int sqrt_recip_count
= 0;
765 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && TREE_CODE (def
) == SSA_NAME
);
766 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
768 /* If DEF is a square (x * x), count the number of divisions by x.
769 If there are more divisions by x than by (DEF * DEF), prefer to optimize
770 the reciprocal of x instead of DEF. This improves cases like:
775 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
776 gimple
*def_stmt
= SSA_NAME_DEF_STMT (def
);
778 if (is_gimple_assign (def_stmt
)
779 && gimple_assign_rhs_code (def_stmt
) == MULT_EXPR
780 && TREE_CODE (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
781 && gimple_assign_rhs1 (def_stmt
) == gimple_assign_rhs2 (def_stmt
))
783 tree op0
= gimple_assign_rhs1 (def_stmt
);
785 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, op0
)
787 gimple
*use_stmt
= USE_STMT (use_p
);
788 if (is_division_by (use_stmt
, op0
))
793 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
795 gimple
*use_stmt
= USE_STMT (use_p
);
796 if (is_division_by (use_stmt
, def
))
798 register_division_in (gimple_bb (use_stmt
), 2);
802 if (is_square_of (use_stmt
, def
))
804 square_def
= gimple_assign_lhs (use_stmt
);
805 FOR_EACH_IMM_USE_FAST (square_use_p
, square_use_iter
, square_def
)
807 gimple
*square_use_stmt
= USE_STMT (square_use_p
);
808 if (is_division_by (square_use_stmt
, square_def
))
810 /* This is executed twice for each division by a square. */
811 register_division_in (gimple_bb (square_use_stmt
), 1);
812 square_recip_count
++;
818 /* Square reciprocals were counted twice above. */
819 square_recip_count
/= 2;
821 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
822 if (sqrt_recip_count
> square_recip_count
)
825 /* Do the expensive part only if we can hope to optimize something. */
826 if (count
+ square_recip_count
>= threshold
&& count
>= 1)
829 for (occ
= occ_head
; occ
; occ
= occ
->next
)
832 insert_reciprocals (def_gsi
, occ
, def
, NULL
, NULL
,
833 square_recip_count
, threshold
);
836 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
838 if (is_division_by (use_stmt
, def
))
840 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
841 replace_reciprocal (use_p
);
843 else if (square_recip_count
> 0 && is_square_of (use_stmt
, def
))
845 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
847 /* Find all uses of the square that are divisions and
848 * replace them by multiplications with the inverse. */
849 imm_use_iterator square_iterator
;
850 gimple
*powmult_use_stmt
= USE_STMT (use_p
);
851 tree powmult_def_name
= gimple_assign_lhs (powmult_use_stmt
);
853 FOR_EACH_IMM_USE_STMT (powmult_use_stmt
,
854 square_iterator
, powmult_def_name
)
855 FOR_EACH_IMM_USE_ON_STMT (square_use_p
, square_iterator
)
857 gimple
*powmult_use_stmt
= USE_STMT (square_use_p
);
858 if (is_division_by (powmult_use_stmt
, powmult_def_name
))
859 replace_reciprocal_squares (square_use_p
);
867 for (occ
= occ_head
; occ
; )
873 /* Return an internal function that implements the reciprocal of CALL,
874 or IFN_LAST if there is no such function that the target supports. */
877 internal_fn_reciprocal (gcall
*call
)
881 switch (gimple_call_combined_fn (call
))
892 tree_pair types
= direct_internal_fn_types (ifn
, call
);
893 if (!direct_internal_fn_supported_p (ifn
, types
, OPTIMIZE_FOR_SPEED
))
899 /* Go through all the floating-point SSA_NAMEs, and call
900 execute_cse_reciprocals_1 on each of them. */
903 const pass_data pass_data_cse_reciprocals
=
905 GIMPLE_PASS
, /* type */
907 OPTGROUP_NONE
, /* optinfo_flags */
908 TV_TREE_RECIP
, /* tv_id */
909 PROP_ssa
, /* properties_required */
910 0, /* properties_provided */
911 0, /* properties_destroyed */
912 0, /* todo_flags_start */
913 TODO_update_ssa
, /* todo_flags_finish */
916 class pass_cse_reciprocals
: public gimple_opt_pass
919 pass_cse_reciprocals (gcc::context
*ctxt
)
920 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
923 /* opt_pass methods: */
924 bool gate (function
*) final override
926 return optimize
&& flag_reciprocal_math
;
928 unsigned int execute (function
*) final override
;
930 }; // class pass_cse_reciprocals
933 pass_cse_reciprocals::execute (function
*fun
)
938 occ_pool
= new object_allocator
<occurrence
> ("dominators for recip");
940 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
941 calculate_dominance_info (CDI_DOMINATORS
);
942 calculate_dominance_info (CDI_POST_DOMINATORS
);
945 FOR_EACH_BB_FN (bb
, fun
)
946 gcc_assert (!bb
->aux
);
948 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
949 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
950 && is_gimple_reg (arg
))
952 tree name
= ssa_default_def (fun
, arg
);
954 execute_cse_reciprocals_1 (NULL
, name
);
957 FOR_EACH_BB_FN (bb
, fun
)
961 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
964 gphi
*phi
= gsi
.phi ();
965 def
= PHI_RESULT (phi
);
966 if (! virtual_operand_p (def
)
967 && FLOAT_TYPE_P (TREE_TYPE (def
)))
968 execute_cse_reciprocals_1 (NULL
, def
);
971 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
974 gimple
*stmt
= gsi_stmt (gsi
);
976 if (gimple_has_lhs (stmt
)
977 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
978 && FLOAT_TYPE_P (TREE_TYPE (def
))
979 && TREE_CODE (def
) == SSA_NAME
)
981 execute_cse_reciprocals_1 (&gsi
, def
);
982 stmt
= gsi_stmt (gsi
);
983 if (flag_unsafe_math_optimizations
984 && is_gimple_assign (stmt
)
985 && gimple_assign_lhs (stmt
) == def
986 && !stmt_can_throw_internal (cfun
, stmt
)
987 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
988 optimize_recip_sqrt (&gsi
, def
);
992 if (optimize_bb_for_size_p (bb
))
995 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
996 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
999 gimple
*stmt
= gsi_stmt (gsi
);
1001 if (is_gimple_assign (stmt
)
1002 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
1004 tree arg1
= gimple_assign_rhs2 (stmt
);
1007 if (TREE_CODE (arg1
) != SSA_NAME
)
1010 stmt1
= SSA_NAME_DEF_STMT (arg1
);
1012 if (is_gimple_call (stmt1
)
1013 && gimple_call_lhs (stmt1
))
1016 imm_use_iterator ui
;
1017 use_operand_p use_p
;
1018 tree fndecl
= NULL_TREE
;
1020 gcall
*call
= as_a
<gcall
*> (stmt1
);
1021 internal_fn ifn
= internal_fn_reciprocal (call
);
1022 if (ifn
== IFN_LAST
)
1024 fndecl
= gimple_call_fndecl (call
);
1026 || !fndecl_built_in_p (fndecl
, BUILT_IN_MD
))
1028 fndecl
= targetm
.builtin_reciprocal (fndecl
);
1033 /* Check that all uses of the SSA name are divisions,
1034 otherwise replacing the defining statement will do
1037 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
1039 gimple
*stmt2
= USE_STMT (use_p
);
1040 if (is_gimple_debug (stmt2
))
1042 if (!is_gimple_assign (stmt2
)
1043 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
1044 || gimple_assign_rhs1 (stmt2
) == arg1
1045 || gimple_assign_rhs2 (stmt2
) != arg1
)
1054 gimple_replace_ssa_lhs (call
, arg1
);
1055 if (gimple_call_internal_p (call
) != (ifn
!= IFN_LAST
))
1057 auto_vec
<tree
, 4> args
;
1058 for (unsigned int i
= 0;
1059 i
< gimple_call_num_args (call
); i
++)
1060 args
.safe_push (gimple_call_arg (call
, i
));
1062 if (ifn
== IFN_LAST
)
1063 stmt2
= gimple_build_call_vec (fndecl
, args
);
1065 stmt2
= gimple_build_call_internal_vec (ifn
, args
);
1066 gimple_call_set_lhs (stmt2
, arg1
);
1067 gimple_move_vops (stmt2
, call
);
1068 gimple_call_set_nothrow (stmt2
,
1069 gimple_call_nothrow_p (call
));
1070 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
1071 gsi_replace (&gsi2
, stmt2
, true);
1075 if (ifn
== IFN_LAST
)
1076 gimple_call_set_fndecl (call
, fndecl
);
1078 gimple_call_set_internal_fn (call
, ifn
);
1081 reciprocal_stats
.rfuncs_inserted
++;
1083 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
1085 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1086 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
1087 fold_stmt_inplace (&gsi
);
1095 statistics_counter_event (fun
, "reciprocal divs inserted",
1096 reciprocal_stats
.rdivs_inserted
);
1097 statistics_counter_event (fun
, "reciprocal functions inserted",
1098 reciprocal_stats
.rfuncs_inserted
);
1100 free_dominance_info (CDI_DOMINATORS
);
1101 free_dominance_info (CDI_POST_DOMINATORS
);
1109 make_pass_cse_reciprocals (gcc::context
*ctxt
)
1111 return new pass_cse_reciprocals (ctxt
);
1114 /* If NAME is the result of a type conversion, look for other
1115 equivalent dominating or dominated conversions, and replace all
1116 uses with the earliest dominating name, removing the redundant
1117 conversions. Return the prevailing name. */
1120 execute_cse_conv_1 (tree name
, bool *cfg_changed
)
1122 if (SSA_NAME_IS_DEFAULT_DEF (name
)
1123 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1126 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
1128 if (!gimple_assign_cast_p (def_stmt
))
1131 tree src
= gimple_assign_rhs1 (def_stmt
);
1133 if (TREE_CODE (src
) != SSA_NAME
)
1136 imm_use_iterator use_iter
;
1139 /* Find the earliest dominating def. */
1140 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, src
)
1142 if (use_stmt
== def_stmt
1143 || !gimple_assign_cast_p (use_stmt
))
1146 tree lhs
= gimple_assign_lhs (use_stmt
);
1148 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
1149 || (gimple_assign_rhs1 (use_stmt
)
1150 != gimple_assign_rhs1 (def_stmt
))
1151 || !types_compatible_p (TREE_TYPE (name
), TREE_TYPE (lhs
)))
1155 if (gimple_bb (def_stmt
) == gimple_bb (use_stmt
))
1157 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
1158 while (!gsi_end_p (gsi
) && gsi_stmt (gsi
) != def_stmt
)
1160 use_dominates
= !gsi_end_p (gsi
);
1162 else if (dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
),
1163 gimple_bb (def_stmt
)))
1164 use_dominates
= false;
1165 else if (dominated_by_p (CDI_DOMINATORS
, gimple_bb (def_stmt
),
1166 gimple_bb (use_stmt
)))
1167 use_dominates
= true;
1173 std::swap (name
, lhs
);
1174 std::swap (def_stmt
, use_stmt
);
1178 /* Now go through all uses of SRC again, replacing the equivalent
1179 dominated conversions. We may replace defs that were not
1180 dominated by the then-prevailing defs when we first visited
1182 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, src
)
1184 if (use_stmt
== def_stmt
1185 || !gimple_assign_cast_p (use_stmt
))
1188 tree lhs
= gimple_assign_lhs (use_stmt
);
1190 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
1191 || (gimple_assign_rhs1 (use_stmt
)
1192 != gimple_assign_rhs1 (def_stmt
))
1193 || !types_compatible_p (TREE_TYPE (name
), TREE_TYPE (lhs
)))
1196 basic_block use_bb
= gimple_bb (use_stmt
);
1197 if (gimple_bb (def_stmt
) == use_bb
1198 || dominated_by_p (CDI_DOMINATORS
, use_bb
, gimple_bb (def_stmt
)))
1200 sincos_stats
.conv_removed
++;
1202 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
1203 replace_uses_by (lhs
, name
);
1204 if (gsi_remove (&gsi
, true)
1205 && gimple_purge_dead_eh_edges (use_bb
))
1206 *cfg_changed
= true;
1207 release_defs (use_stmt
);
1214 /* Records an occurrence at statement USE_STMT in the vector of trees
1215 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
1216 is not yet initialized. Returns true if the occurrence was pushed on
1217 the vector. Adjusts *TOP_BB to be the basic block dominating all
1218 statements in the vector. */
1221 maybe_record_sincos (vec
<gimple
*> *stmts
,
1222 basic_block
*top_bb
, gimple
*use_stmt
)
1224 basic_block use_bb
= gimple_bb (use_stmt
);
1226 && (*top_bb
== use_bb
1227 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
1228 stmts
->safe_push (use_stmt
);
1230 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
1232 stmts
->safe_push (use_stmt
);
1241 /* Look for sin, cos and cexpi calls with the same argument NAME and
1242 create a single call to cexpi CSEing the result in this case.
1243 We first walk over all immediate uses of the argument collecting
1244 statements that we can CSE in a vector and in a second pass replace
1245 the statement rhs with a REALPART or IMAGPART expression on the
1246 result of the cexpi call we insert before the use statement that
1247 dominates all other candidates. */
1250 execute_cse_sincos_1 (tree name
)
1252 gimple_stmt_iterator gsi
;
1253 imm_use_iterator use_iter
;
1254 tree fndecl
, res
, type
= NULL_TREE
;
1255 gimple
*def_stmt
, *use_stmt
, *stmt
;
1256 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
1257 auto_vec
<gimple
*> stmts
;
1258 basic_block top_bb
= NULL
;
1260 bool cfg_changed
= false;
1262 name
= execute_cse_conv_1 (name
, &cfg_changed
);
1264 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
1266 if (gimple_code (use_stmt
) != GIMPLE_CALL
1267 || !gimple_call_lhs (use_stmt
))
1270 switch (gimple_call_combined_fn (use_stmt
))
1273 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1277 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1281 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1288 tree t
= mathfn_built_in_type (gimple_call_combined_fn (use_stmt
));
1292 t
= TREE_TYPE (name
);
1294 /* This checks that NAME has the right type in the first round,
1295 and, in subsequent rounds, that the built_in type is the same
1296 type, or a compatible type. */
1297 if (type
!= t
&& !types_compatible_p (type
, t
))
1300 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
1303 /* Simply insert cexpi at the beginning of top_bb but not earlier than
1304 the name def statement. */
1305 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
1308 stmt
= gimple_build_call (fndecl
, 1, name
);
1309 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
1310 gimple_call_set_lhs (stmt
, res
);
1312 def_stmt
= SSA_NAME_DEF_STMT (name
);
1313 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
1314 && gimple_code (def_stmt
) != GIMPLE_PHI
1315 && gimple_bb (def_stmt
) == top_bb
)
1317 gsi
= gsi_for_stmt (def_stmt
);
1318 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
1322 gsi
= gsi_after_labels (top_bb
);
1323 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1325 sincos_stats
.inserted
++;
1327 /* And adjust the recorded old call sites. */
1328 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
1332 switch (gimple_call_combined_fn (use_stmt
))
1335 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
1339 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
1350 /* Replace call with a copy. */
1351 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
1353 gsi
= gsi_for_stmt (use_stmt
);
1354 gsi_replace (&gsi
, stmt
, true);
1355 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
1362 /* To evaluate powi(x,n), the floating point value x raised to the
1363 constant integer exponent n, we use a hybrid algorithm that
1364 combines the "window method" with look-up tables. For an
1365 introduction to exponentiation algorithms and "addition chains",
1366 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1367 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1368 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1369 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1371 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1372 multiplications to inline before calling the system library's pow
1373 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1374 so this default never requires calling pow, powf or powl. */
1376 #ifndef POWI_MAX_MULTS
1377 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1380 /* The size of the "optimal power tree" lookup table. All
1381 exponents less than this value are simply looked up in the
1382 powi_table below. This threshold is also used to size the
1383 cache of pseudo registers that hold intermediate results. */
1384 #define POWI_TABLE_SIZE 256
1386 /* The size, in bits of the window, used in the "window method"
1387 exponentiation algorithm. This is equivalent to a radix of
1388 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1389 #define POWI_WINDOW_SIZE 3
1391 /* The following table is an efficient representation of an
1392 "optimal power tree". For each value, i, the corresponding
1393 value, j, in the table states than an optimal evaluation
1394 sequence for calculating pow(x,i) can be found by evaluating
1395 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1396 100 integers is given in Knuth's "Seminumerical algorithms". */
1398 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
1400 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1401 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1402 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1403 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1404 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1405 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1406 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1407 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1408 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1409 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1410 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1411 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1412 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1413 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1414 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1415 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1416 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1417 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1418 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1419 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1420 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1421 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1422 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1423 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1424 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1425 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1426 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1427 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1428 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1429 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1430 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1431 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1435 /* Return the number of multiplications required to calculate
1436 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1437 subroutine of powi_cost. CACHE is an array indicating
1438 which exponents have already been calculated. */
1441 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
1443 /* If we've already calculated this exponent, then this evaluation
1444 doesn't require any additional multiplications. */
1449 return powi_lookup_cost (n
- powi_table
[n
], cache
)
1450 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
1453 /* Return the number of multiplications required to calculate
1454 powi(x,n) for an arbitrary x, given the exponent N. This
1455 function needs to be kept in sync with powi_as_mults below. */
1458 powi_cost (HOST_WIDE_INT n
)
1460 bool cache
[POWI_TABLE_SIZE
];
1461 unsigned HOST_WIDE_INT digit
;
1462 unsigned HOST_WIDE_INT val
;
1468 /* Ignore the reciprocal when calculating the cost. */
1471 /* Initialize the exponent cache. */
1472 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
1477 while (val
>= POWI_TABLE_SIZE
)
1481 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
1482 result
+= powi_lookup_cost (digit
, cache
)
1483 + POWI_WINDOW_SIZE
+ 1;
1484 val
>>= POWI_WINDOW_SIZE
;
1493 return result
+ powi_lookup_cost (val
, cache
);
1496 /* Recursive subroutine of powi_as_mults. This function takes the
1497 array, CACHE, of already calculated exponents and an exponent N and
1498 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1501 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1502 unsigned HOST_WIDE_INT n
, tree
*cache
)
1504 tree op0
, op1
, ssa_target
;
1505 unsigned HOST_WIDE_INT digit
;
1508 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
1511 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
1513 if (n
< POWI_TABLE_SIZE
)
1515 cache
[n
] = ssa_target
;
1516 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
1517 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
1521 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
1522 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
1523 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
1527 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1531 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1532 gimple_set_location (mult_stmt
, loc
);
1533 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1538 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1539 This function needs to be kept in sync with powi_cost above. */
1542 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1543 tree arg0
, HOST_WIDE_INT n
)
1545 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1550 return build_one_cst (type
);
1552 memset (cache
, 0, sizeof (cache
));
1555 result
= powi_as_mults_1 (gsi
, loc
, type
, absu_hwi (n
), cache
);
1559 /* If the original exponent was negative, reciprocate the result. */
1560 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1561 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1562 build_real (type
, dconst1
), result
);
1563 gimple_set_location (div_stmt
, loc
);
1564 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1569 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1570 location info LOC. If the arguments are appropriate, create an
1571 equivalent sequence of statements prior to GSI using an optimal
1572 number of multiplications, and return an expession holding the
1576 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1577 tree arg0
, HOST_WIDE_INT n
)
1579 if ((n
>= -1 && n
<= 2)
1580 || (optimize_function_for_speed_p (cfun
)
1581 && powi_cost (n
) <= POWI_MAX_MULTS
))
1582 return powi_as_mults (gsi
, loc
, arg0
, n
);
1587 /* Build a gimple call statement that calls FN with argument ARG.
1588 Set the lhs of the call statement to a fresh SSA name. Insert the
1589 statement prior to GSI's current position, and return the fresh
1593 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1599 call_stmt
= gimple_build_call (fn
, 1, arg
);
1600 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1601 gimple_set_lhs (call_stmt
, ssa_target
);
1602 gimple_set_location (call_stmt
, loc
);
1603 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1608 /* Build a gimple binary operation with the given CODE and arguments
1609 ARG0, ARG1, assigning the result to a new SSA name for variable
1610 TARGET. Insert the statement prior to GSI's current position, and
1611 return the fresh SSA name.*/
1614 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1615 const char *name
, enum tree_code code
,
1616 tree arg0
, tree arg1
)
1618 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1619 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1620 gimple_set_location (stmt
, loc
);
1621 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1625 /* Build a gimple reference operation with the given CODE and argument
1626 ARG, assigning the result to a new SSA name of TYPE with NAME.
1627 Insert the statement prior to GSI's current position, and return
1628 the fresh SSA name. */
1631 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1632 const char *name
, enum tree_code code
, tree arg0
)
1634 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1635 gimple
*stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1636 gimple_set_location (stmt
, loc
);
1637 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1641 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1642 prior to GSI's current position, and return the fresh SSA name. */
1645 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1646 tree type
, tree val
)
1648 tree result
= make_ssa_name (type
);
1649 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1650 gimple_set_location (stmt
, loc
);
1651 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1655 struct pow_synth_sqrt_info
1658 unsigned int deepest
;
1659 unsigned int num_mults
;
1662 /* Return true iff the real value C can be represented as a
1663 sum of powers of 0.5 up to N. That is:
1664 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1665 Record in INFO the various parameters of the synthesis algorithm such
1666 as the factors a[i], the maximum 0.5 power and the number of
1667 multiplications that will be required. */
1670 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1671 struct pow_synth_sqrt_info
*info
)
1673 REAL_VALUE_TYPE factor
= dconsthalf
;
1674 REAL_VALUE_TYPE remainder
= c
;
1677 info
->num_mults
= 0;
1678 memset (info
->factors
, 0, n
* sizeof (bool));
1680 for (unsigned i
= 0; i
< n
; i
++)
1682 REAL_VALUE_TYPE res
;
1684 /* If something inexact happened bail out now. */
1685 if (real_arithmetic (&res
, MINUS_EXPR
, &remainder
, &factor
))
1688 /* We have hit zero. The number is representable as a sum
1689 of powers of 0.5. */
1690 if (real_equal (&res
, &dconst0
))
1692 info
->factors
[i
] = true;
1693 info
->deepest
= i
+ 1;
1696 else if (!REAL_VALUE_NEGATIVE (res
))
1699 info
->factors
[i
] = true;
1703 info
->factors
[i
] = false;
1705 real_arithmetic (&factor
, MULT_EXPR
, &factor
, &dconsthalf
);
1710 /* Return the tree corresponding to FN being applied
1711 to ARG N times at GSI and LOC.
1712 Look up previous results from CACHE if need be.
1713 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1716 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1717 tree fn
, location_t loc
, tree
*cache
)
1719 tree res
= cache
[n
];
1722 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1723 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1730 /* Print to STREAM the repeated application of function FNAME to ARG
1731 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1735 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1739 fprintf (stream
, "%s", arg
);
1742 fprintf (stream
, "%s (", fname
);
1743 print_nested_fn (stream
, fname
, arg
, n
- 1);
1744 fprintf (stream
, ")");
1748 /* Print to STREAM the fractional sequence of sqrt chains
1749 applied to ARG, described by INFO. Used for the dump file. */
1752 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1753 struct pow_synth_sqrt_info
*info
)
1755 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1757 bool is_set
= info
->factors
[i
];
1760 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1761 if (i
!= info
->deepest
- 1)
1762 fprintf (stream
, " * ");
1767 /* Print to STREAM a representation of raising ARG to an integer
1768 power N. Used for the dump file. */
1771 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1774 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1776 fprintf (stream
, "%s", arg
);
1779 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1780 square roots. Place at GSI and LOC. Limit the maximum depth
1781 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1782 result of the expanded sequence or NULL_TREE if the expansion failed.
1784 This routine assumes that ARG1 is a real number with a fractional part
1785 (the integer exponent case will have been handled earlier in
1786 gimple_expand_builtin_pow).
1789 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1790 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1791 FRAC_PART == ARG1 - WHOLE_PART:
1792 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1793 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1794 if it can be expressed as such, that is if FRAC_PART satisfies:
1795 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1796 where integer a[i] is either 0 or 1.
1799 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1800 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1802 For ARG1 < 0.0 there are two approaches:
1803 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1804 is calculated as above.
1807 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1808 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1810 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1811 FRAC_PART := ARG1 - WHOLE_PART
1812 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1814 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1815 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1817 For ARG1 < 0.0 we choose between (A) and (B) depending on
1818 how many multiplications we'd have to do.
1819 So, for the example in (B): POW (x, -5.875), if we were to
1820 follow algorithm (A) we would produce:
1821 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1822 which contains more multiplications than approach (B).
1824 Hopefully, this approach will eliminate potentially expensive POW library
1825 calls when unsafe floating point math is enabled and allow the compiler to
1826 further optimise the multiplies, square roots and divides produced by this
1830 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1831 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1833 tree type
= TREE_TYPE (arg0
);
1834 machine_mode mode
= TYPE_MODE (type
);
1835 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1836 bool one_over
= true;
1841 if (TREE_CODE (arg1
) != REAL_CST
)
1844 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1846 gcc_assert (max_depth
> 0);
1847 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1849 struct pow_synth_sqrt_info synth_info
;
1850 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1851 synth_info
.deepest
= 0;
1852 synth_info
.num_mults
= 0;
1854 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1855 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1857 /* The whole and fractional parts of exp. */
1858 REAL_VALUE_TYPE whole_part
;
1859 REAL_VALUE_TYPE frac_part
;
1861 real_floor (&whole_part
, mode
, &exp
);
1862 real_arithmetic (&frac_part
, MINUS_EXPR
, &exp
, &whole_part
);
1865 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1866 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1870 real_ceil (&ceil_whole
, mode
, &exp
);
1871 real_arithmetic (&ceil_fract
, MINUS_EXPR
, &ceil_whole
, &exp
);
1874 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1877 /* Check whether it's more profitable to not use 1.0 / ... */
1880 struct pow_synth_sqrt_info alt_synth_info
;
1881 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1882 alt_synth_info
.deepest
= 0;
1883 alt_synth_info
.num_mults
= 0;
1885 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1887 && alt_synth_info
.deepest
<= synth_info
.deepest
1888 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1890 whole_part
= ceil_whole
;
1891 frac_part
= ceil_fract
;
1892 synth_info
.deepest
= alt_synth_info
.deepest
;
1893 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1894 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1895 (max_depth
+ 1) * sizeof (bool));
1900 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1901 REAL_VALUE_TYPE cint
;
1902 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1904 if (!real_identical (&whole_part
, &cint
))
1907 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1910 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1912 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1914 /* Calculate the integer part of the exponent. */
1917 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1926 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1927 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1933 fprintf (dump_file
, "1.0 / (");
1934 dump_integer_part (dump_file
, "x", n
);
1936 fprintf (dump_file
, " * ");
1937 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1938 fprintf (dump_file
, ")");
1942 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1943 fprintf (dump_file
, " / (");
1944 dump_integer_part (dump_file
, "x", n
);
1945 fprintf (dump_file
, ")");
1950 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1952 fprintf (dump_file
, " * ");
1953 dump_integer_part (dump_file
, "x", n
);
1956 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1960 tree fract_res
= NULL_TREE
;
1963 /* Calculate the fractional part of the exponent. */
1964 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1966 if (synth_info
.factors
[i
])
1968 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1971 fract_res
= sqrt_chain
;
1974 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1975 fract_res
, sqrt_chain
);
1979 tree res
= NULL_TREE
;
1986 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1987 fract_res
, integer_res
);
1991 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1992 build_real (type
, dconst1
), res
);
1996 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1997 fract_res
, integer_res
);
2001 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2002 fract_res
, integer_res
);
2006 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
2007 with location info LOC. If possible, create an equivalent and
2008 less expensive sequence of statements prior to GSI, and return an
2009 expession holding the result. */
2012 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
2013 tree arg0
, tree arg1
)
2015 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
2016 REAL_VALUE_TYPE c2
, dconst3
;
2018 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
2020 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
2021 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
2023 dconst1_4
= dconst1
;
2024 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
2026 /* If the exponent isn't a constant, there's nothing of interest
2028 if (TREE_CODE (arg1
) != REAL_CST
)
2031 /* Don't perform the operation if flag_signaling_nans is on
2032 and the operand is a signaling NaN. */
2033 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1
)))
2034 && ((TREE_CODE (arg0
) == REAL_CST
2035 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0
)))
2036 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1
))))
2039 /* If the exponent is equivalent to an integer, expand to an optimal
2040 multiplication sequence when profitable. */
2041 c
= TREE_REAL_CST (arg1
);
2042 n
= real_to_integer (&c
);
2043 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2044 c_is_int
= real_identical (&c
, &cint
);
2047 && ((n
>= -1 && n
<= 2)
2048 || (flag_unsafe_math_optimizations
2050 && powi_cost (n
) <= POWI_MAX_MULTS
)))
2051 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
2053 /* Attempt various optimizations using sqrt and cbrt. */
2054 type
= TREE_TYPE (arg0
);
2055 mode
= TYPE_MODE (type
);
2056 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
2058 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
2059 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
2062 && real_equal (&c
, &dconsthalf
)
2063 && !HONOR_SIGNED_ZEROS (mode
))
2064 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
2066 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
2068 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
2069 optimizations since 1./3. is not exactly representable. If x
2070 is negative and finite, the correct value of pow(x,1./3.) is
2071 a NaN with the "invalid" exception raised, because the value
2072 of 1./3. actually has an even denominator. The correct value
2073 of cbrt(x) is a negative real value. */
2074 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
2075 dconst1_3
= real_value_truncate (mode
, dconst_third ());
2077 if (flag_unsafe_math_optimizations
2079 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2080 && real_equal (&c
, &dconst1_3
))
2081 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
2083 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
2084 if we don't have a hardware sqrt insn. */
2085 dconst1_6
= dconst1_3
;
2086 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
2088 if (flag_unsafe_math_optimizations
2091 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2094 && real_equal (&c
, &dconst1_6
))
2097 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
2100 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
2104 /* Attempt to expand the POW as a product of square root chains.
2105 Expand the 0.25 case even when otpimising for size. */
2106 if (flag_unsafe_math_optimizations
2109 && (speed_p
|| real_equal (&c
, &dconst1_4
))
2110 && !HONOR_SIGNED_ZEROS (mode
))
2112 unsigned int max_depth
= speed_p
2113 ? param_max_pow_sqrt_depth
2116 tree expand_with_sqrts
2117 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
2119 if (expand_with_sqrts
)
2120 return expand_with_sqrts
;
2123 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
2124 n
= real_to_integer (&c2
);
2125 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2126 c2_is_int
= real_identical (&c2
, &cint
);
2128 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
2130 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
2131 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
2133 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
2134 different from pow(x, 1./3.) due to rounding and behavior with
2135 negative x, we need to constrain this transformation to unsafe
2136 math and positive x or finite math. */
2137 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
2138 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
2139 real_round (&c2
, mode
, &c2
);
2140 n
= real_to_integer (&c2
);
2141 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2142 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
2143 real_convert (&c2
, mode
, &c2
);
2145 if (flag_unsafe_math_optimizations
2147 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2148 && real_identical (&c2
, &c
)
2150 && optimize_function_for_speed_p (cfun
)
2151 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
2153 tree powi_x_ndiv3
= NULL_TREE
;
2155 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
2156 possible or profitable, give up. Skip the degenerate case when
2157 abs(n) < 3, where the result is always 1. */
2158 if (absu_hwi (n
) >= 3)
2160 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
2166 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
2167 as that creates an unnecessary variable. Instead, just produce
2168 either cbrt(x) or cbrt(x) * cbrt(x). */
2169 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
2171 if (absu_hwi (n
) % 3 == 1)
2172 powi_cbrt_x
= cbrt_x
;
2174 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2177 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
2178 if (absu_hwi (n
) < 3)
2179 result
= powi_cbrt_x
;
2181 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2182 powi_x_ndiv3
, powi_cbrt_x
);
2184 /* If n is negative, reciprocate the result. */
2186 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
2187 build_real (type
, dconst1
), result
);
2192 /* No optimizations succeeded. */
2196 /* ARG is the argument to a cabs builtin call in GSI with location info
2197 LOC. Create a sequence of statements prior to GSI that calculates
2198 sqrt(R*R + I*I), where R and I are the real and imaginary components
2199 of ARG, respectively. Return an expression holding the result. */
2202 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
2204 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
2205 tree type
= TREE_TYPE (TREE_TYPE (arg
));
2206 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
2207 machine_mode mode
= TYPE_MODE (type
);
2209 if (!flag_unsafe_math_optimizations
2210 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
2212 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
2215 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
2216 REALPART_EXPR
, arg
);
2217 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
2218 real_part
, real_part
);
2219 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
2220 IMAGPART_EXPR
, arg
);
2221 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
2222 imag_part
, imag_part
);
2223 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
2224 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
2229 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
2230 on the SSA_NAME argument of each of them. */
2234 const pass_data pass_data_cse_sincos
=
2236 GIMPLE_PASS
, /* type */
2237 "sincos", /* name */
2238 OPTGROUP_NONE
, /* optinfo_flags */
2239 TV_TREE_SINCOS
, /* tv_id */
2240 PROP_ssa
, /* properties_required */
2241 0, /* properties_provided */
2242 0, /* properties_destroyed */
2243 0, /* todo_flags_start */
2244 TODO_update_ssa
, /* todo_flags_finish */
2247 class pass_cse_sincos
: public gimple_opt_pass
2250 pass_cse_sincos (gcc::context
*ctxt
)
2251 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
2254 /* opt_pass methods: */
2255 bool gate (function
*) final override
2260 unsigned int execute (function
*) final override
;
2262 }; // class pass_cse_sincos
2265 pass_cse_sincos::execute (function
*fun
)
2268 bool cfg_changed
= false;
2270 calculate_dominance_info (CDI_DOMINATORS
);
2271 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
2273 FOR_EACH_BB_FN (bb
, fun
)
2275 gimple_stmt_iterator gsi
;
2277 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2279 gimple
*stmt
= gsi_stmt (gsi
);
2281 if (is_gimple_call (stmt
)
2282 && gimple_call_lhs (stmt
))
2285 switch (gimple_call_combined_fn (stmt
))
2290 arg
= gimple_call_arg (stmt
, 0);
2291 /* Make sure we have either sincos or cexp. */
2292 if (!targetm
.libc_has_function (function_c99_math_complex
,
2294 && !targetm
.libc_has_function (function_sincos
,
2298 if (TREE_CODE (arg
) == SSA_NAME
)
2299 cfg_changed
|= execute_cse_sincos_1 (arg
);
2308 statistics_counter_event (fun
, "sincos statements inserted",
2309 sincos_stats
.inserted
);
2310 statistics_counter_event (fun
, "conv statements removed",
2311 sincos_stats
.conv_removed
);
2313 return cfg_changed
? TODO_cleanup_cfg
: 0;
2319 make_pass_cse_sincos (gcc::context
*ctxt
)
2321 return new pass_cse_sincos (ctxt
);
2324 /* Expand powi(x,n) into an optimal number of multiplies, when n is a constant.
2325 Also expand CABS. */
2328 const pass_data pass_data_expand_powcabs
=
2330 GIMPLE_PASS
, /* type */
2331 "powcabs", /* name */
2332 OPTGROUP_NONE
, /* optinfo_flags */
2333 TV_TREE_POWCABS
, /* tv_id */
2334 PROP_ssa
, /* properties_required */
2335 PROP_gimple_opt_math
, /* properties_provided */
2336 0, /* properties_destroyed */
2337 0, /* todo_flags_start */
2338 TODO_update_ssa
, /* todo_flags_finish */
2341 class pass_expand_powcabs
: public gimple_opt_pass
2344 pass_expand_powcabs (gcc::context
*ctxt
)
2345 : gimple_opt_pass (pass_data_expand_powcabs
, ctxt
)
2348 /* opt_pass methods: */
2349 bool gate (function
*) final override
2354 unsigned int execute (function
*) final override
;
2356 }; // class pass_expand_powcabs
2359 pass_expand_powcabs::execute (function
*fun
)
2362 bool cfg_changed
= false;
2364 calculate_dominance_info (CDI_DOMINATORS
);
2366 FOR_EACH_BB_FN (bb
, fun
)
2368 gimple_stmt_iterator gsi
;
2369 bool cleanup_eh
= false;
2371 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2373 gimple
*stmt
= gsi_stmt (gsi
);
2375 /* Only the last stmt in a bb could throw, no need to call
2376 gimple_purge_dead_eh_edges if we change something in the middle
2377 of a basic block. */
2380 if (is_gimple_call (stmt
)
2381 && gimple_call_lhs (stmt
))
2383 tree arg0
, arg1
, result
;
2387 switch (gimple_call_combined_fn (stmt
))
2390 arg0
= gimple_call_arg (stmt
, 0);
2391 arg1
= gimple_call_arg (stmt
, 1);
2393 loc
= gimple_location (stmt
);
2394 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
2398 tree lhs
= gimple_get_lhs (stmt
);
2399 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2400 gimple_set_location (new_stmt
, loc
);
2401 unlink_stmt_vdef (stmt
);
2402 gsi_replace (&gsi
, new_stmt
, true);
2404 if (gimple_vdef (stmt
))
2405 release_ssa_name (gimple_vdef (stmt
));
2410 arg0
= gimple_call_arg (stmt
, 0);
2411 arg1
= gimple_call_arg (stmt
, 1);
2412 loc
= gimple_location (stmt
);
2414 if (real_minus_onep (arg0
))
2416 tree t0
, t1
, cond
, one
, minus_one
;
2419 t0
= TREE_TYPE (arg0
);
2420 t1
= TREE_TYPE (arg1
);
2421 one
= build_real (t0
, dconst1
);
2422 minus_one
= build_real (t0
, dconstm1
);
2424 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
2425 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
2426 arg1
, build_int_cst (t1
, 1));
2427 gimple_set_location (stmt
, loc
);
2428 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2430 result
= make_temp_ssa_name (t0
, NULL
, "powi");
2431 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
2433 gimple_set_location (stmt
, loc
);
2434 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2438 if (!tree_fits_shwi_p (arg1
))
2441 n
= tree_to_shwi (arg1
);
2442 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
2447 tree lhs
= gimple_get_lhs (stmt
);
2448 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2449 gimple_set_location (new_stmt
, loc
);
2450 unlink_stmt_vdef (stmt
);
2451 gsi_replace (&gsi
, new_stmt
, true);
2453 if (gimple_vdef (stmt
))
2454 release_ssa_name (gimple_vdef (stmt
));
2459 arg0
= gimple_call_arg (stmt
, 0);
2460 loc
= gimple_location (stmt
);
2461 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
2465 tree lhs
= gimple_get_lhs (stmt
);
2466 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2467 gimple_set_location (new_stmt
, loc
);
2468 unlink_stmt_vdef (stmt
);
2469 gsi_replace (&gsi
, new_stmt
, true);
2471 if (gimple_vdef (stmt
))
2472 release_ssa_name (gimple_vdef (stmt
));
2481 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
2484 return cfg_changed
? TODO_cleanup_cfg
: 0;
2490 make_pass_expand_powcabs (gcc::context
*ctxt
)
2492 return new pass_expand_powcabs (ctxt
);
2495 /* Return true if stmt is a type conversion operation that can be stripped
2496 when used in a widening multiply operation. */
2498 widening_mult_conversion_strippable_p (tree result_type
, gimple
*stmt
)
2500 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2502 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2507 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2510 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2512 /* If the type of OP has the same precision as the result, then
2513 we can strip this conversion. The multiply operation will be
2514 selected to create the correct extension as a by-product. */
2515 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2518 /* We can also strip a conversion if it preserves the signed-ness of
2519 the operation and doesn't narrow the range. */
2520 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2522 /* If the inner-most type is unsigned, then we can strip any
2523 intermediate widening operation. If it's signed, then the
2524 intermediate widening operation must also be signed. */
2525 if ((TYPE_UNSIGNED (inner_op_type
)
2526 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2527 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2533 return rhs_code
== FIXED_CONVERT_EXPR
;
2536 /* Return true if RHS is a suitable operand for a widening multiplication,
2537 assuming a target type of TYPE.
2538 There are two cases:
2540 - RHS makes some value at least twice as wide. Store that value
2541 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2543 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2544 but leave *TYPE_OUT untouched. */
2547 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2553 if (TREE_CODE (rhs
) == SSA_NAME
)
2555 /* Use tree_non_zero_bits to see if this operand is zero_extended
2556 for unsigned widening multiplications or non-negative for
2557 signed widening multiplications. */
2558 if (TREE_CODE (type
) == INTEGER_TYPE
2559 && (TYPE_PRECISION (type
) & 1) == 0
2560 && int_mode_for_size (TYPE_PRECISION (type
) / 2, 1).exists ())
2562 unsigned int prec
= TYPE_PRECISION (type
);
2563 unsigned int hprec
= prec
/ 2;
2564 wide_int bits
= wide_int::from (tree_nonzero_bits (rhs
), prec
,
2565 TYPE_SIGN (TREE_TYPE (rhs
)));
2566 if (TYPE_UNSIGNED (type
)
2567 && wi::bit_and (bits
, wi::mask (hprec
, true, prec
)) == 0)
2569 *type_out
= build_nonstandard_integer_type (hprec
, true);
2570 /* X & MODE_MASK can be simplified to (T)X. */
2571 stmt
= SSA_NAME_DEF_STMT (rhs
);
2572 if (is_gimple_assign (stmt
)
2573 && gimple_assign_rhs_code (stmt
) == BIT_AND_EXPR
2574 && TREE_CODE (gimple_assign_rhs2 (stmt
)) == INTEGER_CST
2575 && wi::to_wide (gimple_assign_rhs2 (stmt
))
2576 == wi::mask (hprec
, false, prec
))
2577 *new_rhs_out
= gimple_assign_rhs1 (stmt
);
2582 else if (!TYPE_UNSIGNED (type
)
2583 && wi::bit_and (bits
, wi::mask (hprec
- 1, true, prec
)) == 0)
2585 *type_out
= build_nonstandard_integer_type (hprec
, false);
2591 stmt
= SSA_NAME_DEF_STMT (rhs
);
2592 if (is_gimple_assign (stmt
))
2595 if (widening_mult_conversion_strippable_p (type
, stmt
))
2597 rhs1
= gimple_assign_rhs1 (stmt
);
2599 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2601 *new_rhs_out
= rhs1
;
2612 type1
= TREE_TYPE (rhs1
);
2614 if (TREE_CODE (type1
) != TREE_CODE (type
)
2615 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2618 *new_rhs_out
= rhs1
;
2623 if (TREE_CODE (rhs
) == INTEGER_CST
)
2633 /* Return true if STMT performs a widening multiplication, assuming the
2634 output type is TYPE. If so, store the unwidened types of the operands
2635 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2636 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2637 and *TYPE2_OUT would give the operands of the multiplication. */
2640 is_widening_mult_p (gimple
*stmt
,
2641 tree
*type1_out
, tree
*rhs1_out
,
2642 tree
*type2_out
, tree
*rhs2_out
)
2644 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2646 if (TREE_CODE (type
) == INTEGER_TYPE
)
2648 if (TYPE_OVERFLOW_TRAPS (type
))
2651 else if (TREE_CODE (type
) != FIXED_POINT_TYPE
)
2654 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2658 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2662 if (*type1_out
== NULL
)
2664 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2666 *type1_out
= *type2_out
;
2669 if (*type2_out
== NULL
)
2671 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2673 *type2_out
= *type1_out
;
2676 /* Ensure that the larger of the two operands comes first. */
2677 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2679 std::swap (*type1_out
, *type2_out
);
2680 std::swap (*rhs1_out
, *rhs2_out
);
2686 /* Check to see if the CALL statement is an invocation of copysign
2687 with 1. being the first argument. */
2689 is_copysign_call_with_1 (gimple
*call
)
2691 gcall
*c
= dyn_cast
<gcall
*> (call
);
2695 enum combined_fn code
= gimple_call_combined_fn (c
);
2697 if (code
== CFN_LAST
)
2700 if (builtin_fn_p (code
))
2702 switch (as_builtin_fn (code
))
2704 CASE_FLT_FN (BUILT_IN_COPYSIGN
):
2705 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN
):
2706 return real_onep (gimple_call_arg (c
, 0));
2712 if (internal_fn_p (code
))
2714 switch (as_internal_fn (code
))
2717 return real_onep (gimple_call_arg (c
, 0));
2726 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2727 This only happens when the xorsign optab is defined, if the
2728 pattern is not a xorsign pattern or if expansion fails FALSE is
2729 returned, otherwise TRUE is returned. */
2731 convert_expand_mult_copysign (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2733 tree treeop0
, treeop1
, lhs
, type
;
2734 location_t loc
= gimple_location (stmt
);
2735 lhs
= gimple_assign_lhs (stmt
);
2736 treeop0
= gimple_assign_rhs1 (stmt
);
2737 treeop1
= gimple_assign_rhs2 (stmt
);
2738 type
= TREE_TYPE (lhs
);
2739 machine_mode mode
= TYPE_MODE (type
);
2741 if (HONOR_SNANS (type
))
2744 if (TREE_CODE (treeop0
) == SSA_NAME
&& TREE_CODE (treeop1
) == SSA_NAME
)
2746 gimple
*call0
= SSA_NAME_DEF_STMT (treeop0
);
2747 if (!has_single_use (treeop0
) || !is_copysign_call_with_1 (call0
))
2749 call0
= SSA_NAME_DEF_STMT (treeop1
);
2750 if (!has_single_use (treeop1
) || !is_copysign_call_with_1 (call0
))
2755 if (optab_handler (xorsign_optab
, mode
) == CODE_FOR_nothing
)
2758 gcall
*c
= as_a
<gcall
*> (call0
);
2759 treeop0
= gimple_call_arg (c
, 1);
2762 = gimple_build_call_internal (IFN_XORSIGN
, 2, treeop1
, treeop0
);
2763 gimple_set_lhs (call_stmt
, lhs
);
2764 gimple_set_location (call_stmt
, loc
);
2765 gsi_replace (gsi
, call_stmt
, true);
2772 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2773 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2774 value is true iff we converted the statement. */
2777 convert_mult_to_widen (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2779 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2780 enum insn_code handler
;
2781 scalar_int_mode to_mode
, from_mode
, actual_mode
;
2783 int actual_precision
;
2784 location_t loc
= gimple_location (stmt
);
2785 bool from_unsigned1
, from_unsigned2
;
2787 lhs
= gimple_assign_lhs (stmt
);
2788 type
= TREE_TYPE (lhs
);
2789 if (TREE_CODE (type
) != INTEGER_TYPE
)
2792 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2795 /* if any one of rhs1 and rhs2 is subject to abnormal coalescing,
2796 avoid the tranform. */
2797 if ((TREE_CODE (rhs1
) == SSA_NAME
2798 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1
))
2799 || (TREE_CODE (rhs2
) == SSA_NAME
2800 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2
)))
2803 to_mode
= SCALAR_INT_TYPE_MODE (type
);
2804 from_mode
= SCALAR_INT_TYPE_MODE (type1
);
2805 if (to_mode
== from_mode
)
2808 from_unsigned1
= TYPE_UNSIGNED (type1
);
2809 from_unsigned2
= TYPE_UNSIGNED (type2
);
2811 if (from_unsigned1
&& from_unsigned2
)
2812 op
= umul_widen_optab
;
2813 else if (!from_unsigned1
&& !from_unsigned2
)
2814 op
= smul_widen_optab
;
2816 op
= usmul_widen_optab
;
2818 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2821 if (handler
== CODE_FOR_nothing
)
2823 if (op
!= smul_widen_optab
)
2825 /* We can use a signed multiply with unsigned types as long as
2826 there is a wider mode to use, or it is the smaller of the two
2827 types that is unsigned. Note that type1 >= type2, always. */
2828 if ((TYPE_UNSIGNED (type1
)
2829 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2830 || (TYPE_UNSIGNED (type2
)
2831 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2833 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2834 || GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2838 op
= smul_widen_optab
;
2839 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2843 if (handler
== CODE_FOR_nothing
)
2846 from_unsigned1
= from_unsigned2
= false;
2850 /* Expand can synthesize smul_widen_optab if the target
2851 supports umul_widen_optab. */
2852 op
= umul_widen_optab
;
2853 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2856 if (handler
== CODE_FOR_nothing
)
2861 /* Ensure that the inputs to the handler are in the correct precison
2862 for the opcode. This will be the full mode size. */
2863 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2864 if (2 * actual_precision
> TYPE_PRECISION (type
))
2866 if (actual_precision
!= TYPE_PRECISION (type1
)
2867 || from_unsigned1
!= TYPE_UNSIGNED (type1
)
2868 || (TREE_TYPE (rhs1
) != type1
2869 && TREE_CODE (rhs1
) != INTEGER_CST
))
2870 rhs1
= build_and_insert_cast (gsi
, loc
,
2871 build_nonstandard_integer_type
2872 (actual_precision
, from_unsigned1
), rhs1
);
2873 if (actual_precision
!= TYPE_PRECISION (type2
)
2874 || from_unsigned2
!= TYPE_UNSIGNED (type2
)
2875 || (TREE_TYPE (rhs2
) != type2
2876 && TREE_CODE (rhs2
) != INTEGER_CST
))
2877 rhs2
= build_and_insert_cast (gsi
, loc
,
2878 build_nonstandard_integer_type
2879 (actual_precision
, from_unsigned2
), rhs2
);
2881 /* Handle constants. */
2882 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2883 rhs1
= fold_convert (type1
, rhs1
);
2884 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2885 rhs2
= fold_convert (type2
, rhs2
);
2887 gimple_assign_set_rhs1 (stmt
, rhs1
);
2888 gimple_assign_set_rhs2 (stmt
, rhs2
);
2889 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2891 widen_mul_stats
.widen_mults_inserted
++;
2895 /* Process a single gimple statement STMT, which is found at the
2896 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2897 rhs (given by CODE), and try to convert it into a
2898 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2899 is true iff we converted the statement. */
2902 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
2903 enum tree_code code
)
2905 gimple
*rhs1_stmt
= NULL
, *rhs2_stmt
= NULL
;
2906 gimple
*conv1_stmt
= NULL
, *conv2_stmt
= NULL
, *conv_stmt
;
2907 tree type
, type1
, type2
, optype
;
2908 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2909 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2911 enum tree_code wmult_code
;
2912 enum insn_code handler
;
2913 scalar_mode to_mode
, from_mode
, actual_mode
;
2914 location_t loc
= gimple_location (stmt
);
2915 int actual_precision
;
2916 bool from_unsigned1
, from_unsigned2
;
2918 lhs
= gimple_assign_lhs (stmt
);
2919 type
= TREE_TYPE (lhs
);
2920 if (TREE_CODE (type
) != INTEGER_TYPE
2921 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2924 if (code
== MINUS_EXPR
)
2925 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2927 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2929 rhs1
= gimple_assign_rhs1 (stmt
);
2930 rhs2
= gimple_assign_rhs2 (stmt
);
2932 if (TREE_CODE (rhs1
) == SSA_NAME
)
2934 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2935 if (is_gimple_assign (rhs1_stmt
))
2936 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2939 if (TREE_CODE (rhs2
) == SSA_NAME
)
2941 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2942 if (is_gimple_assign (rhs2_stmt
))
2943 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2946 /* Allow for one conversion statement between the multiply
2947 and addition/subtraction statement. If there are more than
2948 one conversions then we assume they would invalidate this
2949 transformation. If that's not the case then they should have
2950 been folded before now. */
2951 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2953 conv1_stmt
= rhs1_stmt
;
2954 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2955 if (TREE_CODE (rhs1
) == SSA_NAME
)
2957 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2958 if (is_gimple_assign (rhs1_stmt
))
2959 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2964 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2966 conv2_stmt
= rhs2_stmt
;
2967 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2968 if (TREE_CODE (rhs2
) == SSA_NAME
)
2970 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2971 if (is_gimple_assign (rhs2_stmt
))
2972 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2978 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2979 is_widening_mult_p, but we still need the rhs returns.
2981 It might also appear that it would be sufficient to use the existing
2982 operands of the widening multiply, but that would limit the choice of
2983 multiply-and-accumulate instructions.
2985 If the widened-multiplication result has more than one uses, it is
2986 probably wiser not to do the conversion. Also restrict this operation
2987 to single basic block to avoid moving the multiply to a different block
2988 with a higher execution frequency. */
2989 if (code
== PLUS_EXPR
2990 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2992 if (!has_single_use (rhs1
)
2993 || gimple_bb (rhs1_stmt
) != gimple_bb (stmt
)
2994 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2995 &type2
, &mult_rhs2
))
2998 conv_stmt
= conv1_stmt
;
3000 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
3002 if (!has_single_use (rhs2
)
3003 || gimple_bb (rhs2_stmt
) != gimple_bb (stmt
)
3004 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
3005 &type2
, &mult_rhs2
))
3008 conv_stmt
= conv2_stmt
;
3013 to_mode
= SCALAR_TYPE_MODE (type
);
3014 from_mode
= SCALAR_TYPE_MODE (type1
);
3015 if (to_mode
== from_mode
)
3018 from_unsigned1
= TYPE_UNSIGNED (type1
);
3019 from_unsigned2
= TYPE_UNSIGNED (type2
);
3022 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3023 if (from_unsigned1
!= from_unsigned2
)
3025 if (!INTEGRAL_TYPE_P (type
))
3027 /* We can use a signed multiply with unsigned types as long as
3028 there is a wider mode to use, or it is the smaller of the two
3029 types that is unsigned. Note that type1 >= type2, always. */
3031 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3033 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3035 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
3036 || GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
3040 from_unsigned1
= from_unsigned2
= false;
3041 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
3045 /* If there was a conversion between the multiply and addition
3046 then we need to make sure it fits a multiply-and-accumulate.
3047 The should be a single mode change which does not change the
3051 /* We use the original, unmodified data types for this. */
3052 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
3053 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
3054 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
3055 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
3057 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
3059 /* Conversion is a truncate. */
3060 if (TYPE_PRECISION (to_type
) < data_size
)
3063 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
3065 /* Conversion is an extend. Check it's the right sort. */
3066 if (TYPE_UNSIGNED (from_type
) != is_unsigned
3067 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
3070 /* else convert is a no-op for our purposes. */
3073 /* Verify that the machine can perform a widening multiply
3074 accumulate in this mode/signedness combination, otherwise
3075 this transformation is likely to pessimize code. */
3076 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
3077 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
3078 from_mode
, &actual_mode
);
3080 if (handler
== CODE_FOR_nothing
)
3083 /* Ensure that the inputs to the handler are in the correct precison
3084 for the opcode. This will be the full mode size. */
3085 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3086 if (actual_precision
!= TYPE_PRECISION (type1
)
3087 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3088 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
3089 build_nonstandard_integer_type
3090 (actual_precision
, from_unsigned1
),
3092 if (actual_precision
!= TYPE_PRECISION (type2
)
3093 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3094 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
3095 build_nonstandard_integer_type
3096 (actual_precision
, from_unsigned2
),
3099 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
3100 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
3102 /* Handle constants. */
3103 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
3104 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
3105 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
3106 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
3108 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
3110 update_stmt (gsi_stmt (*gsi
));
3111 widen_mul_stats
.maccs_inserted
++;
3115 /* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
3116 OP2 and which we know is used in statements that can be, together with the
3117 multiplication, converted to FMAs, perform the transformation. */
3120 convert_mult_to_fma_1 (tree mul_result
, tree op1
, tree op2
)
3122 tree type
= TREE_TYPE (mul_result
);
3124 imm_use_iterator imm_iter
;
3127 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
3129 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3130 tree addop
, mulop1
= op1
, result
= mul_result
;
3131 bool negate_p
= false;
3132 gimple_seq seq
= NULL
;
3134 if (is_gimple_debug (use_stmt
))
3137 if (is_gimple_assign (use_stmt
)
3138 && gimple_assign_rhs_code (use_stmt
) == NEGATE_EXPR
)
3140 result
= gimple_assign_lhs (use_stmt
);
3141 use_operand_p use_p
;
3142 gimple
*neguse_stmt
;
3143 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3144 gsi_remove (&gsi
, true);
3145 release_defs (use_stmt
);
3147 use_stmt
= neguse_stmt
;
3148 gsi
= gsi_for_stmt (use_stmt
);
3152 tree cond
, else_value
, ops
[3], len
, bias
;
3154 if (!can_interpret_as_conditional_op_p (use_stmt
, &cond
, &code
,
3158 addop
= ops
[0] == result
? ops
[1] : ops
[0];
3160 if (code
== MINUS_EXPR
)
3162 if (ops
[0] == result
)
3163 /* a * b - c -> a * b + (-c) */
3164 addop
= gimple_build (&seq
, NEGATE_EXPR
, type
, addop
);
3166 /* a - b * c -> (-b) * c + a */
3167 negate_p
= !negate_p
;
3171 mulop1
= gimple_build (&seq
, NEGATE_EXPR
, type
, mulop1
);
3174 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
3178 = gimple_build_call_internal (IFN_COND_LEN_FMA
, 7, cond
, mulop1
, op2
,
3179 addop
, else_value
, len
, bias
);
3181 fma_stmt
= gimple_build_call_internal (IFN_COND_FMA
, 5, cond
, mulop1
,
3182 op2
, addop
, else_value
);
3184 fma_stmt
= gimple_build_call_internal (IFN_FMA
, 3, mulop1
, op2
, addop
);
3185 gimple_set_lhs (fma_stmt
, gimple_get_lhs (use_stmt
));
3186 gimple_call_set_nothrow (fma_stmt
, !stmt_can_throw_internal (cfun
,
3188 gsi_replace (&gsi
, fma_stmt
, true);
3189 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
3190 regardless of where the negation occurs. */
3191 gimple
*orig_stmt
= gsi_stmt (gsi
);
3192 if (fold_stmt (&gsi
, follow_all_ssa_edges
))
3194 if (maybe_clean_or_replace_eh_stmt (orig_stmt
, gsi_stmt (gsi
)))
3196 update_stmt (gsi_stmt (gsi
));
3199 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3201 fprintf (dump_file
, "Generated FMA ");
3202 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, TDF_NONE
);
3203 fprintf (dump_file
, "\n");
3206 /* If the FMA result is negated in a single use, fold the negation
3208 orig_stmt
= gsi_stmt (gsi
);
3209 use_operand_p use_p
;
3211 if (is_gimple_call (orig_stmt
)
3212 && gimple_call_internal_p (orig_stmt
)
3213 && gimple_call_lhs (orig_stmt
)
3214 && TREE_CODE (gimple_call_lhs (orig_stmt
)) == SSA_NAME
3215 && single_imm_use (gimple_call_lhs (orig_stmt
), &use_p
, &neg_stmt
)
3216 && is_gimple_assign (neg_stmt
)
3217 && gimple_assign_rhs_code (neg_stmt
) == NEGATE_EXPR
3218 && !stmt_could_throw_p (cfun
, neg_stmt
))
3220 gsi
= gsi_for_stmt (neg_stmt
);
3221 if (fold_stmt (&gsi
, follow_all_ssa_edges
))
3223 if (maybe_clean_or_replace_eh_stmt (neg_stmt
, gsi_stmt (gsi
)))
3225 update_stmt (gsi_stmt (gsi
));
3226 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3228 fprintf (dump_file
, "Folded FMA negation ");
3229 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, TDF_NONE
);
3230 fprintf (dump_file
, "\n");
3235 widen_mul_stats
.fmas_inserted
++;
3239 /* Data necessary to perform the actual transformation from a multiplication
3240 and an addition to an FMA after decision is taken it should be done and to
3241 then delete the multiplication statement from the function IL. */
3243 struct fma_transformation_info
3251 /* Structure containing the current state of FMA deferring, i.e. whether we are
3252 deferring, whether to continue deferring, and all data necessary to come
3253 back and perform all deferred transformations. */
3255 class fma_deferring_state
3258 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
3259 do any deferring. */
3261 fma_deferring_state (bool perform_deferring
)
3262 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL
),
3263 m_last_result (NULL_TREE
), m_deferring_p (perform_deferring
) {}
3265 /* List of FMA candidates for which we the transformation has been determined
3266 possible but we at this point in BB analysis we do not consider them
3268 auto_vec
<fma_transformation_info
, 8> m_candidates
;
3270 /* Set of results of multiplication that are part of an already deferred FMA
3272 hash_set
<tree
> m_mul_result_set
;
3274 /* The PHI that supposedly feeds back result of a FMA to another over loop
3276 gphi
*m_initial_phi
;
3278 /* Result of the last produced FMA candidate or NULL if there has not been
3282 /* If true, deferring might still be profitable. If false, transform all
3283 candidates and no longer defer. */
3287 /* Transform all deferred FMA candidates and mark STATE as no longer
3291 cancel_fma_deferring (fma_deferring_state
*state
)
3293 if (!state
->m_deferring_p
)
3296 for (unsigned i
= 0; i
< state
->m_candidates
.length (); i
++)
3298 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3299 fprintf (dump_file
, "Generating deferred FMA\n");
3301 const fma_transformation_info
&fti
= state
->m_candidates
[i
];
3302 convert_mult_to_fma_1 (fti
.mul_result
, fti
.op1
, fti
.op2
);
3304 gimple_stmt_iterator gsi
= gsi_for_stmt (fti
.mul_stmt
);
3305 gsi_remove (&gsi
, true);
3306 release_defs (fti
.mul_stmt
);
3308 state
->m_deferring_p
= false;
3311 /* If OP is an SSA name defined by a PHI node, return the PHI statement.
3312 Otherwise return NULL. */
3315 result_of_phi (tree op
)
3317 if (TREE_CODE (op
) != SSA_NAME
)
3320 return dyn_cast
<gphi
*> (SSA_NAME_DEF_STMT (op
));
3323 /* After processing statements of a BB and recording STATE, return true if the
3324 initial phi is fed by the last FMA candidate result ore one such result from
3325 previously processed BBs marked in LAST_RESULT_SET. */
3328 last_fma_candidate_feeds_initial_phi (fma_deferring_state
*state
,
3329 hash_set
<tree
> *last_result_set
)
3333 FOR_EACH_PHI_ARG (use
, state
->m_initial_phi
, iter
, SSA_OP_USE
)
3335 tree t
= USE_FROM_PTR (use
);
3336 if (t
== state
->m_last_result
3337 || last_result_set
->contains (t
))
3344 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3345 with uses in additions and subtractions to form fused multiply-add
3346 operations. Returns true if successful and MUL_STMT should be removed.
3347 If MUL_COND is nonnull, the multiplication in MUL_STMT is conditional
3348 on MUL_COND, otherwise it is unconditional.
3350 If STATE indicates that we are deferring FMA transformation, that means
3351 that we do not produce FMAs for basic blocks which look like:
3354 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
3356 accumulator_66 = _65 + accumulator_111;
3358 or its unrolled version, i.e. with several FMA candidates that feed result
3359 of one into the addend of another. Instead, we add them to a list in STATE
3360 and if we later discover an FMA candidate that is not part of such a chain,
3361 we go back and perform all deferred past candidates. */
3364 convert_mult_to_fma (gimple
*mul_stmt
, tree op1
, tree op2
,
3365 fma_deferring_state
*state
, tree mul_cond
= NULL_TREE
,
3366 tree mul_len
= NULL_TREE
, tree mul_bias
= NULL_TREE
)
3368 tree mul_result
= gimple_get_lhs (mul_stmt
);
3369 /* If there isn't a LHS then this can't be an FMA. There can be no LHS
3370 if the statement was left just for the side-effects. */
3373 tree type
= TREE_TYPE (mul_result
);
3374 gimple
*use_stmt
, *neguse_stmt
;
3375 use_operand_p use_p
;
3376 imm_use_iterator imm_iter
;
3378 if (FLOAT_TYPE_P (type
)
3379 && flag_fp_contract_mode
!= FP_CONTRACT_FAST
)
3382 /* We don't want to do bitfield reduction ops. */
3383 if (INTEGRAL_TYPE_P (type
)
3384 && (!type_has_mode_precision_p (type
) || TYPE_OVERFLOW_TRAPS (type
)))
3387 /* If the target doesn't support it, don't generate it. We assume that
3388 if fma isn't available then fms, fnma or fnms are not either. */
3389 optimization_type opt_type
= bb_optimization_type (gimple_bb (mul_stmt
));
3390 if (!direct_internal_fn_supported_p (IFN_FMA
, type
, opt_type
))
3393 /* If the multiplication has zero uses, it is kept around probably because
3394 of -fnon-call-exceptions. Don't optimize it away in that case,
3396 if (has_zero_uses (mul_result
))
3400 = (state
->m_deferring_p
3401 && maybe_le (tree_to_poly_int64 (TYPE_SIZE (type
)),
3402 param_avoid_fma_max_bits
));
3403 bool defer
= check_defer
;
3404 bool seen_negate_p
= false;
3406 /* There is no numerical difference between fused and unfused integer FMAs,
3407 and the assumption below that FMA is as cheap as addition is unlikely
3408 to be true, especially if the multiplication occurs multiple times on
3409 the same chain. E.g., for something like:
3411 (((a * b) + c) >> 1) + (a * b)
3413 we do not want to duplicate the a * b into two additions, not least
3414 because the result is not a natural FMA chain. */
3415 if (ANY_INTEGRAL_TYPE_P (type
)
3416 && !has_single_use (mul_result
))
3419 if (!dbg_cnt (form_fma
))
3422 /* Make sure that the multiplication statement becomes dead after
3423 the transformation, thus that all uses are transformed to FMAs.
3424 This means we assume that an FMA operation has the same cost
3426 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3428 tree result
= mul_result
;
3429 bool negate_p
= false;
3431 use_stmt
= USE_STMT (use_p
);
3433 if (is_gimple_debug (use_stmt
))
3436 /* For now restrict this operations to single basic blocks. In theory
3437 we would want to support sinking the multiplication in
3443 to form a fma in the then block and sink the multiplication to the
3445 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3448 /* A negate on the multiplication leads to FNMA. */
3449 if (is_gimple_assign (use_stmt
)
3450 && gimple_assign_rhs_code (use_stmt
) == NEGATE_EXPR
)
3455 /* If (due to earlier missed optimizations) we have two
3456 negates of the same value, treat them as equivalent
3457 to a single negate with multiple uses. */
3461 result
= gimple_assign_lhs (use_stmt
);
3463 /* Make sure the negate statement becomes dead with this
3464 single transformation. */
3465 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3466 &use_p
, &neguse_stmt
))
3469 /* Make sure the multiplication isn't also used on that stmt. */
3470 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3471 if (USE_FROM_PTR (usep
) == mul_result
)
3475 use_stmt
= neguse_stmt
;
3476 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3479 negate_p
= seen_negate_p
= true;
3482 tree cond
, else_value
, ops
[3], len
, bias
;
3484 if (!can_interpret_as_conditional_op_p (use_stmt
, &cond
, &code
, ops
,
3485 &else_value
, &len
, &bias
))
3491 if (ops
[1] == result
)
3492 negate_p
= !negate_p
;
3497 /* FMA can only be formed from PLUS and MINUS. */
3503 /* For COND_LEN_* operations, we may have dummpy mask which is
3504 the all true mask. Such TREE type may be mul_cond != cond
3505 but we still consider they are equal. */
3506 if (mul_cond
&& cond
!= mul_cond
3507 && !(integer_truep (mul_cond
) && integer_truep (cond
)))
3510 if (else_value
== result
)
3513 if (!direct_internal_fn_supported_p (IFN_COND_LEN_FMA
, type
,
3519 poly_int64 mul_value
, value
;
3520 if (poly_int_tree_p (mul_len
, &mul_value
)
3521 && poly_int_tree_p (len
, &value
)
3522 && maybe_ne (mul_value
, value
))
3524 else if (mul_len
!= len
)
3527 if (wi::to_widest (mul_bias
) != wi::to_widest (bias
))
3533 if (mul_cond
&& cond
!= mul_cond
)
3538 if (cond
== result
|| else_value
== result
)
3540 if (!direct_internal_fn_supported_p (IFN_COND_FMA
, type
,
3546 /* If the subtrahend (OPS[1]) is computed by a MULT_EXPR that
3547 we'll visit later, we might be able to get a more profitable
3549 OTOH, if we don't, a negate / fma pair has likely lower latency
3550 that a mult / subtract pair. */
3551 if (code
== MINUS_EXPR
3554 && !direct_internal_fn_supported_p (IFN_FMS
, type
, opt_type
)
3555 && direct_internal_fn_supported_p (IFN_FNMA
, type
, opt_type
)
3556 && TREE_CODE (ops
[1]) == SSA_NAME
3557 && has_single_use (ops
[1]))
3559 gimple
*stmt2
= SSA_NAME_DEF_STMT (ops
[1]);
3560 if (is_gimple_assign (stmt2
)
3561 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3565 /* We can't handle a * b + a * b. */
3566 if (ops
[0] == ops
[1])
3568 /* If deferring, make sure we are not looking at an instruction that
3569 wouldn't have existed if we were not. */
3570 if (state
->m_deferring_p
3571 && (state
->m_mul_result_set
.contains (ops
[0])
3572 || state
->m_mul_result_set
.contains (ops
[1])))
3577 tree use_lhs
= gimple_get_lhs (use_stmt
);
3578 if (state
->m_last_result
)
3580 if (ops
[1] == state
->m_last_result
3581 || ops
[0] == state
->m_last_result
)
3588 gcc_checking_assert (!state
->m_initial_phi
);
3590 if (ops
[0] == result
)
3591 phi
= result_of_phi (ops
[1]);
3594 gcc_assert (ops
[1] == result
);
3595 phi
= result_of_phi (ops
[0]);
3600 state
->m_initial_phi
= phi
;
3607 state
->m_last_result
= use_lhs
;
3608 check_defer
= false;
3613 /* While it is possible to validate whether or not the exact form that
3614 we've recognized is available in the backend, the assumption is that
3615 if the deferring logic above did not trigger, the transformation is
3616 never a loss. For instance, suppose the target only has the plain FMA
3617 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3618 MUL+SUB for FMA+NEG, which is still two operations. Consider
3619 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3620 form the two NEGs are independent and could be run in parallel. */
3625 fma_transformation_info fti
;
3626 fti
.mul_stmt
= mul_stmt
;
3627 fti
.mul_result
= mul_result
;
3630 state
->m_candidates
.safe_push (fti
);
3631 state
->m_mul_result_set
.add (mul_result
);
3633 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3635 fprintf (dump_file
, "Deferred generating FMA for multiplication ");
3636 print_gimple_stmt (dump_file
, mul_stmt
, 0, TDF_NONE
);
3637 fprintf (dump_file
, "\n");
3644 if (state
->m_deferring_p
)
3645 cancel_fma_deferring (state
);
3646 convert_mult_to_fma_1 (mul_result
, op1
, op2
);
3652 /* Helper function of match_arith_overflow. For MUL_OVERFLOW, if we have
3653 a check for non-zero like:
3654 _1 = x_4(D) * y_5(D);
3657 goto <bb 3>; [50.00%]
3659 goto <bb 4>; [50.00%]
3661 <bb 3> [local count: 536870913]:
3666 <bb 4> [local count: 1073741824]:
3667 # iftmp.0_3 = PHI <_10(3), 0(2)>
3668 then in addition to using .MUL_OVERFLOW (x_4(D), y_5(D)) we can also
3669 optimize the x_4(D) != 0 condition to 1. */
3672 maybe_optimize_guarding_check (vec
<gimple
*> &mul_stmts
, gimple
*cond_stmt
,
3673 gimple
*div_stmt
, bool *cfg_changed
)
3675 basic_block bb
= gimple_bb (cond_stmt
);
3676 if (gimple_bb (div_stmt
) != bb
|| !single_pred_p (bb
))
3678 edge pred_edge
= single_pred_edge (bb
);
3679 basic_block pred_bb
= pred_edge
->src
;
3680 if (EDGE_COUNT (pred_bb
->succs
) != 2)
3682 edge other_edge
= EDGE_SUCC (pred_bb
, EDGE_SUCC (pred_bb
, 0) == pred_edge
);
3683 edge other_succ_edge
= NULL
;
3684 if (gimple_code (cond_stmt
) == GIMPLE_COND
)
3686 if (EDGE_COUNT (bb
->succs
) != 2)
3688 other_succ_edge
= EDGE_SUCC (bb
, 0);
3689 if (gimple_cond_code (cond_stmt
) == NE_EXPR
)
3691 if (other_succ_edge
->flags
& EDGE_TRUE_VALUE
)
3692 other_succ_edge
= EDGE_SUCC (bb
, 1);
3694 else if (other_succ_edge
->flags
& EDGE_FALSE_VALUE
)
3695 other_succ_edge
= EDGE_SUCC (bb
, 0);
3696 if (other_edge
->dest
!= other_succ_edge
->dest
)
3699 else if (!single_succ_p (bb
) || other_edge
->dest
!= single_succ (bb
))
3701 gcond
*zero_cond
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (pred_bb
));
3702 if (zero_cond
== NULL
3703 || (gimple_cond_code (zero_cond
)
3704 != ((pred_edge
->flags
& EDGE_TRUE_VALUE
) ? NE_EXPR
: EQ_EXPR
))
3705 || !integer_zerop (gimple_cond_rhs (zero_cond
)))
3707 tree zero_cond_lhs
= gimple_cond_lhs (zero_cond
);
3708 if (TREE_CODE (zero_cond_lhs
) != SSA_NAME
)
3710 if (gimple_assign_rhs2 (div_stmt
) != zero_cond_lhs
)
3712 /* Allow the divisor to be result of a same precision cast
3713 from zero_cond_lhs. */
3714 tree rhs2
= gimple_assign_rhs2 (div_stmt
);
3715 if (TREE_CODE (rhs2
) != SSA_NAME
)
3717 gimple
*g
= SSA_NAME_DEF_STMT (rhs2
);
3718 if (!gimple_assign_cast_p (g
)
3719 || gimple_assign_rhs1 (g
) != gimple_cond_lhs (zero_cond
)
3720 || !INTEGRAL_TYPE_P (TREE_TYPE (zero_cond_lhs
))
3721 || (TYPE_PRECISION (TREE_TYPE (zero_cond_lhs
))
3722 != TYPE_PRECISION (TREE_TYPE (rhs2
))))
3725 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
3726 mul_stmts
.quick_push (div_stmt
);
3727 if (is_gimple_debug (gsi_stmt (gsi
)))
3728 gsi_next_nondebug (&gsi
);
3729 unsigned cast_count
= 0;
3730 while (gsi_stmt (gsi
) != cond_stmt
)
3732 /* If original mul_stmt has a single use, allow it in the same bb,
3733 we are looking then just at __builtin_mul_overflow_p.
3734 Though, in that case the original mul_stmt will be replaced
3735 by .MUL_OVERFLOW, REALPART_EXPR and IMAGPART_EXPR stmts. */
3739 FOR_EACH_VEC_ELT (mul_stmts
, i
, mul_stmt
)
3741 if (gsi_stmt (gsi
) == mul_stmt
)
3747 if (!ok
&& gimple_assign_cast_p (gsi_stmt (gsi
)) && ++cast_count
< 4)
3751 gsi_next_nondebug (&gsi
);
3753 if (gimple_code (cond_stmt
) == GIMPLE_COND
)
3755 basic_block succ_bb
= other_edge
->dest
;
3756 for (gphi_iterator gpi
= gsi_start_phis (succ_bb
); !gsi_end_p (gpi
);
3759 gphi
*phi
= gpi
.phi ();
3760 tree v1
= gimple_phi_arg_def (phi
, other_edge
->dest_idx
);
3761 tree v2
= gimple_phi_arg_def (phi
, other_succ_edge
->dest_idx
);
3762 if (!operand_equal_p (v1
, v2
, 0))
3768 tree lhs
= gimple_assign_lhs (cond_stmt
);
3769 if (!lhs
|| !INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
3771 gsi_next_nondebug (&gsi
);
3772 if (!gsi_end_p (gsi
))
3774 if (gimple_assign_rhs_code (cond_stmt
) == COND_EXPR
)
3776 gimple
*cast_stmt
= gsi_stmt (gsi
);
3777 if (!gimple_assign_cast_p (cast_stmt
))
3779 tree new_lhs
= gimple_assign_lhs (cast_stmt
);
3780 gsi_next_nondebug (&gsi
);
3781 if (!gsi_end_p (gsi
)
3783 || !INTEGRAL_TYPE_P (TREE_TYPE (new_lhs
))
3784 || TYPE_PRECISION (TREE_TYPE (new_lhs
)) <= 1)
3788 edge succ_edge
= single_succ_edge (bb
);
3789 basic_block succ_bb
= succ_edge
->dest
;
3790 gsi
= gsi_start_phis (succ_bb
);
3791 if (gsi_end_p (gsi
))
3793 gphi
*phi
= as_a
<gphi
*> (gsi_stmt (gsi
));
3795 if (!gsi_end_p (gsi
))
3797 if (gimple_phi_arg_def (phi
, succ_edge
->dest_idx
) != lhs
)
3799 tree other_val
= gimple_phi_arg_def (phi
, other_edge
->dest_idx
);
3800 if (gimple_assign_rhs_code (cond_stmt
) == COND_EXPR
)
3802 tree cond
= gimple_assign_rhs1 (cond_stmt
);
3803 if (TREE_CODE (cond
) == NE_EXPR
)
3805 if (!operand_equal_p (other_val
,
3806 gimple_assign_rhs3 (cond_stmt
), 0))
3809 else if (!operand_equal_p (other_val
,
3810 gimple_assign_rhs2 (cond_stmt
), 0))
3813 else if (gimple_assign_rhs_code (cond_stmt
) == NE_EXPR
)
3815 if (!integer_zerop (other_val
))
3818 else if (!integer_onep (other_val
))
3821 if (pred_edge
->flags
& EDGE_TRUE_VALUE
)
3822 gimple_cond_make_true (zero_cond
);
3824 gimple_cond_make_false (zero_cond
);
3825 update_stmt (zero_cond
);
3826 *cfg_changed
= true;
3829 /* Helper function for arith_overflow_check_p. Return true
3830 if VAL1 is equal to VAL2 cast to corresponding integral type
3831 with other signedness or vice versa. */
3834 arith_cast_equal_p (tree val1
, tree val2
)
3836 if (TREE_CODE (val1
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
3837 return wi::eq_p (wi::to_wide (val1
), wi::to_wide (val2
));
3838 else if (TREE_CODE (val1
) != SSA_NAME
|| TREE_CODE (val2
) != SSA_NAME
)
3840 if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (val1
))
3841 && gimple_assign_rhs1 (SSA_NAME_DEF_STMT (val1
)) == val2
)
3843 if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (val2
))
3844 && gimple_assign_rhs1 (SSA_NAME_DEF_STMT (val2
)) == val1
)
3849 /* Helper function of match_arith_overflow. Return 1
3850 if USE_STMT is unsigned overflow check ovf != 0 for
3851 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3855 arith_overflow_check_p (gimple
*stmt
, gimple
*cast_stmt
, gimple
*&use_stmt
,
3856 tree maxval
, tree
*other
)
3858 enum tree_code ccode
= ERROR_MARK
;
3859 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
3860 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3861 tree lhs
= gimple_assign_lhs (cast_stmt
? cast_stmt
: stmt
);
3862 tree rhs1
= gimple_assign_rhs1 (stmt
);
3863 tree rhs2
= gimple_assign_rhs2 (stmt
);
3864 tree multop
= NULL_TREE
, divlhs
= NULL_TREE
;
3865 gimple
*cur_use_stmt
= use_stmt
;
3867 if (code
== MULT_EXPR
)
3869 if (!is_gimple_assign (use_stmt
))
3871 if (gimple_assign_rhs_code (use_stmt
) != TRUNC_DIV_EXPR
)
3873 if (gimple_assign_rhs1 (use_stmt
) != lhs
)
3877 if (arith_cast_equal_p (gimple_assign_rhs2 (use_stmt
), rhs1
))
3879 else if (arith_cast_equal_p (gimple_assign_rhs2 (use_stmt
), rhs2
))
3884 else if (gimple_assign_rhs2 (use_stmt
) == rhs1
)
3886 else if (operand_equal_p (gimple_assign_rhs2 (use_stmt
), rhs2
, 0))
3890 if (stmt_ends_bb_p (use_stmt
))
3892 divlhs
= gimple_assign_lhs (use_stmt
);
3896 if (!single_imm_use (divlhs
, &use
, &cur_use_stmt
))
3898 if (cast_stmt
&& gimple_assign_cast_p (cur_use_stmt
))
3900 tree cast_lhs
= gimple_assign_lhs (cur_use_stmt
);
3901 if (INTEGRAL_TYPE_P (TREE_TYPE (cast_lhs
))
3902 && TYPE_UNSIGNED (TREE_TYPE (cast_lhs
))
3903 && (TYPE_PRECISION (TREE_TYPE (cast_lhs
))
3904 == TYPE_PRECISION (TREE_TYPE (divlhs
)))
3905 && single_imm_use (cast_lhs
, &use
, &cur_use_stmt
))
3914 if (gimple_code (cur_use_stmt
) == GIMPLE_COND
)
3916 ccode
= gimple_cond_code (cur_use_stmt
);
3917 crhs1
= gimple_cond_lhs (cur_use_stmt
);
3918 crhs2
= gimple_cond_rhs (cur_use_stmt
);
3920 else if (is_gimple_assign (cur_use_stmt
))
3922 if (gimple_assign_rhs_class (cur_use_stmt
) == GIMPLE_BINARY_RHS
)
3924 ccode
= gimple_assign_rhs_code (cur_use_stmt
);
3925 crhs1
= gimple_assign_rhs1 (cur_use_stmt
);
3926 crhs2
= gimple_assign_rhs2 (cur_use_stmt
);
3928 else if (gimple_assign_rhs_code (cur_use_stmt
) == COND_EXPR
)
3930 tree cond
= gimple_assign_rhs1 (cur_use_stmt
);
3931 if (COMPARISON_CLASS_P (cond
))
3933 ccode
= TREE_CODE (cond
);
3934 crhs1
= TREE_OPERAND (cond
, 0);
3935 crhs2
= TREE_OPERAND (cond
, 1);
3946 if (TREE_CODE_CLASS (ccode
) != tcc_comparison
)
3955 /* r = a + b; r > maxval or r <= maxval */
3957 && TREE_CODE (crhs2
) == INTEGER_CST
3958 && tree_int_cst_equal (crhs2
, maxval
))
3959 return ccode
== GT_EXPR
? 1 : -1;
3962 /* r = a - b; r > a or r <= a
3963 r = a + b; a > r or a <= r or b > r or b <= r. */
3964 if ((code
== MINUS_EXPR
&& crhs1
== lhs
&& crhs2
== rhs1
)
3965 || (code
== PLUS_EXPR
&& (crhs1
== rhs1
|| crhs1
== rhs2
)
3967 return ccode
== GT_EXPR
? 1 : -1;
3968 /* r = ~a; b > r or b <= r. */
3969 if (code
== BIT_NOT_EXPR
&& crhs2
== lhs
)
3973 return ccode
== GT_EXPR
? 1 : -1;
3980 /* r = a - b; a < r or a >= r
3981 r = a + b; r < a or r >= a or r < b or r >= b. */
3982 if ((code
== MINUS_EXPR
&& crhs1
== rhs1
&& crhs2
== lhs
)
3983 || (code
== PLUS_EXPR
&& crhs1
== lhs
3984 && (crhs2
== rhs1
|| crhs2
== rhs2
)))
3985 return ccode
== LT_EXPR
? 1 : -1;
3986 /* r = ~a; r < b or r >= b. */
3987 if (code
== BIT_NOT_EXPR
&& crhs1
== lhs
)
3991 return ccode
== LT_EXPR
? 1 : -1;
3996 /* r = a * b; _1 = r / a; _1 == b
3997 r = a * b; _1 = r / b; _1 == a
3998 r = a * b; _1 = r / a; _1 != b
3999 r = a * b; _1 = r / b; _1 != a. */
4000 if (code
== MULT_EXPR
)
4004 if ((crhs1
== divlhs
&& arith_cast_equal_p (crhs2
, multop
))
4005 || (crhs2
== divlhs
&& arith_cast_equal_p (crhs1
, multop
)))
4007 use_stmt
= cur_use_stmt
;
4008 return ccode
== NE_EXPR
? 1 : -1;
4011 else if ((crhs1
== divlhs
&& operand_equal_p (crhs2
, multop
, 0))
4012 || (crhs2
== divlhs
&& crhs1
== multop
))
4014 use_stmt
= cur_use_stmt
;
4015 return ccode
== NE_EXPR
? 1 : -1;
4025 /* Recognize for unsigned x
4028 where there are other uses of x and replace it with
4029 _7 = .SUB_OVERFLOW (y, z);
4030 x = REALPART_EXPR <_7>;
4031 _8 = IMAGPART_EXPR <_7>;
4033 and similarly for addition.
4040 where y and z have unsigned types with maximum max
4041 and there are other uses of x and all of those cast x
4042 back to that unsigned type and again replace it with
4043 _7 = .ADD_OVERFLOW (y, z);
4044 _9 = REALPART_EXPR <_7>;
4045 _8 = IMAGPART_EXPR <_7>;
4047 and replace (utype) x with _9.
4053 _7 = .ADD_OVERFLOW (y, z);
4054 _8 = IMAGPART_EXPR <_7>;
4060 goto <bb 3>; [50.00%]
4062 goto <bb 4>; [50.00%]
4064 <bb 3> [local count: 536870913]:
4069 <bb 4> [local count: 1073741824]:
4070 # iftmp.0_3 = PHI <_10(3), 0(2)>
4072 _7 = .MUL_OVERFLOW (x, y);
4073 z = IMAGPART_EXPR <_7>;
4074 _8 = IMAGPART_EXPR <_7>;
4076 iftmp.0_3 = (int) _9; */
4079 match_arith_overflow (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
4080 enum tree_code code
, bool *cfg_changed
)
4082 tree lhs
= gimple_assign_lhs (stmt
);
4083 tree type
= TREE_TYPE (lhs
);
4084 use_operand_p use_p
;
4085 imm_use_iterator iter
;
4086 bool use_seen
= false;
4087 bool ovf_use_seen
= false;
4089 gimple
*add_stmt
= NULL
;
4090 bool add_first
= false;
4091 gimple
*cond_stmt
= NULL
;
4092 gimple
*cast_stmt
= NULL
;
4093 tree cast_lhs
= NULL_TREE
;
4095 gcc_checking_assert (code
== PLUS_EXPR
4096 || code
== MINUS_EXPR
4097 || code
== MULT_EXPR
4098 || code
== BIT_NOT_EXPR
);
4099 if (!INTEGRAL_TYPE_P (type
)
4100 || !TYPE_UNSIGNED (type
)
4101 || has_zero_uses (lhs
)
4102 || (code
!= PLUS_EXPR
4103 && code
!= MULT_EXPR
4104 && optab_handler (code
== MINUS_EXPR
? usubv4_optab
: uaddv4_optab
,
4105 TYPE_MODE (type
)) == CODE_FOR_nothing
))
4108 tree rhs1
= gimple_assign_rhs1 (stmt
);
4109 tree rhs2
= gimple_assign_rhs2 (stmt
);
4110 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
4112 use_stmt
= USE_STMT (use_p
);
4113 if (is_gimple_debug (use_stmt
))
4116 tree other
= NULL_TREE
;
4117 if (arith_overflow_check_p (stmt
, NULL
, use_stmt
, NULL_TREE
, &other
))
4119 if (code
== BIT_NOT_EXPR
)
4122 if (TREE_CODE (other
) != SSA_NAME
)
4128 cond_stmt
= use_stmt
;
4130 ovf_use_seen
= true;
4135 if (code
== MULT_EXPR
4136 && cast_stmt
== NULL
4137 && gimple_assign_cast_p (use_stmt
))
4139 cast_lhs
= gimple_assign_lhs (use_stmt
);
4140 if (INTEGRAL_TYPE_P (TREE_TYPE (cast_lhs
))
4141 && !TYPE_UNSIGNED (TREE_TYPE (cast_lhs
))
4142 && (TYPE_PRECISION (TREE_TYPE (cast_lhs
))
4143 == TYPE_PRECISION (TREE_TYPE (lhs
))))
4144 cast_stmt
= use_stmt
;
4146 cast_lhs
= NULL_TREE
;
4149 if (ovf_use_seen
&& use_seen
)
4154 && code
== MULT_EXPR
4157 if (TREE_CODE (rhs1
) != SSA_NAME
4158 || (TREE_CODE (rhs2
) != SSA_NAME
&& TREE_CODE (rhs2
) != INTEGER_CST
))
4160 FOR_EACH_IMM_USE_FAST (use_p
, iter
, cast_lhs
)
4162 use_stmt
= USE_STMT (use_p
);
4163 if (is_gimple_debug (use_stmt
))
4166 if (arith_overflow_check_p (stmt
, cast_stmt
, use_stmt
,
4168 ovf_use_seen
= true;
4174 cast_lhs
= NULL_TREE
;
4177 tree maxval
= NULL_TREE
;
4179 || (code
!= MULT_EXPR
&& (code
== BIT_NOT_EXPR
? use_seen
: !use_seen
))
4180 || (code
== PLUS_EXPR
4181 && optab_handler (uaddv4_optab
,
4182 TYPE_MODE (type
)) == CODE_FOR_nothing
)
4183 || (code
== MULT_EXPR
4184 && optab_handler (cast_stmt
? mulv4_optab
: umulv4_optab
,
4185 TYPE_MODE (type
)) == CODE_FOR_nothing
4188 || !can_mult_highpart_p (TYPE_MODE (type
), true))))
4190 if (code
!= PLUS_EXPR
)
4192 if (TREE_CODE (rhs1
) != SSA_NAME
4193 || !gimple_assign_cast_p (SSA_NAME_DEF_STMT (rhs1
)))
4195 rhs1
= gimple_assign_rhs1 (SSA_NAME_DEF_STMT (rhs1
));
4196 tree type1
= TREE_TYPE (rhs1
);
4197 if (!INTEGRAL_TYPE_P (type1
)
4198 || !TYPE_UNSIGNED (type1
)
4199 || TYPE_PRECISION (type1
) >= TYPE_PRECISION (type
)
4200 || (TYPE_PRECISION (type1
)
4201 != GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (type1
))))
4203 if (TREE_CODE (rhs2
) == INTEGER_CST
)
4205 if (wi::ne_p (wi::rshift (wi::to_wide (rhs2
),
4206 TYPE_PRECISION (type1
),
4209 rhs2
= fold_convert (type1
, rhs2
);
4213 if (TREE_CODE (rhs2
) != SSA_NAME
4214 || !gimple_assign_cast_p (SSA_NAME_DEF_STMT (rhs2
)))
4216 rhs2
= gimple_assign_rhs1 (SSA_NAME_DEF_STMT (rhs2
));
4217 tree type2
= TREE_TYPE (rhs2
);
4218 if (!INTEGRAL_TYPE_P (type2
)
4219 || !TYPE_UNSIGNED (type2
)
4220 || TYPE_PRECISION (type2
) >= TYPE_PRECISION (type
)
4221 || (TYPE_PRECISION (type2
)
4222 != GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (type2
))))
4225 if (TYPE_PRECISION (type1
) >= TYPE_PRECISION (TREE_TYPE (rhs2
)))
4228 type
= TREE_TYPE (rhs2
);
4230 if (TREE_CODE (type
) != INTEGER_TYPE
4231 || optab_handler (uaddv4_optab
,
4232 TYPE_MODE (type
)) == CODE_FOR_nothing
)
4235 maxval
= wide_int_to_tree (type
, wi::max_value (TYPE_PRECISION (type
),
4237 ovf_use_seen
= false;
4239 basic_block use_bb
= NULL
;
4240 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
4242 use_stmt
= USE_STMT (use_p
);
4243 if (is_gimple_debug (use_stmt
))
4246 if (arith_overflow_check_p (stmt
, NULL
, use_stmt
, maxval
, NULL
))
4248 ovf_use_seen
= true;
4249 use_bb
= gimple_bb (use_stmt
);
4253 if (!gimple_assign_cast_p (use_stmt
)
4254 || gimple_assign_rhs_code (use_stmt
) == VIEW_CONVERT_EXPR
)
4256 tree use_lhs
= gimple_assign_lhs (use_stmt
);
4257 if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
4258 || (TYPE_PRECISION (TREE_TYPE (use_lhs
))
4259 > TYPE_PRECISION (type
)))
4266 if (!useless_type_conversion_p (type
, TREE_TYPE (rhs1
)))
4270 tree new_rhs1
= make_ssa_name (type
);
4271 gimple
*g
= gimple_build_assign (new_rhs1
, NOP_EXPR
, rhs1
);
4272 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
4275 else if (!useless_type_conversion_p (type
, TREE_TYPE (rhs2
)))
4279 tree new_rhs2
= make_ssa_name (type
);
4280 gimple
*g
= gimple_build_assign (new_rhs2
, NOP_EXPR
, rhs2
);
4281 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
4286 /* If there are no uses of the wider addition, check if
4287 forwprop has not created a narrower addition.
4288 Require it to be in the same bb as the overflow check. */
4289 FOR_EACH_IMM_USE_FAST (use_p
, iter
, rhs1
)
4291 use_stmt
= USE_STMT (use_p
);
4292 if (is_gimple_debug (use_stmt
))
4295 if (use_stmt
== stmt
)
4298 if (!is_gimple_assign (use_stmt
)
4299 || gimple_bb (use_stmt
) != use_bb
4300 || gimple_assign_rhs_code (use_stmt
) != PLUS_EXPR
)
4303 if (gimple_assign_rhs1 (use_stmt
) == rhs1
)
4305 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt
),
4309 else if (gimple_assign_rhs2 (use_stmt
) == rhs1
)
4311 if (gimple_assign_rhs1 (use_stmt
) != rhs2
)
4317 add_stmt
= use_stmt
;
4320 if (add_stmt
== NULL
)
4323 /* If stmt and add_stmt are in the same bb, we need to find out
4324 which one is earlier. If they are in different bbs, we've
4325 checked add_stmt is in the same bb as one of the uses of the
4326 stmt lhs, so stmt needs to dominate add_stmt too. */
4327 if (gimple_bb (stmt
) == gimple_bb (add_stmt
))
4329 gimple_stmt_iterator gsif
= *gsi
;
4330 gimple_stmt_iterator gsib
= *gsi
;
4332 /* Search both forward and backward from stmt and have a small
4334 for (i
= 0; i
< 128; i
++)
4336 if (!gsi_end_p (gsib
))
4338 gsi_prev_nondebug (&gsib
);
4339 if (gsi_stmt (gsib
) == add_stmt
)
4345 else if (gsi_end_p (gsif
))
4347 if (!gsi_end_p (gsif
))
4349 gsi_next_nondebug (&gsif
);
4350 if (gsi_stmt (gsif
) == add_stmt
)
4357 *gsi
= gsi_for_stmt (add_stmt
);
4362 if (code
== BIT_NOT_EXPR
)
4363 *gsi
= gsi_for_stmt (cond_stmt
);
4365 auto_vec
<gimple
*, 8> mul_stmts
;
4366 if (code
== MULT_EXPR
&& cast_stmt
)
4368 type
= TREE_TYPE (cast_lhs
);
4369 gimple
*g
= SSA_NAME_DEF_STMT (rhs1
);
4370 if (gimple_assign_cast_p (g
)
4371 && useless_type_conversion_p (type
,
4372 TREE_TYPE (gimple_assign_rhs1 (g
)))
4373 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (g
)))
4374 rhs1
= gimple_assign_rhs1 (g
);
4377 g
= gimple_build_assign (make_ssa_name (type
), NOP_EXPR
, rhs1
);
4378 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
4379 rhs1
= gimple_assign_lhs (g
);
4380 mul_stmts
.quick_push (g
);
4382 if (TREE_CODE (rhs2
) == INTEGER_CST
)
4383 rhs2
= fold_convert (type
, rhs2
);
4386 g
= SSA_NAME_DEF_STMT (rhs2
);
4387 if (gimple_assign_cast_p (g
)
4388 && useless_type_conversion_p (type
,
4389 TREE_TYPE (gimple_assign_rhs1 (g
)))
4390 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (g
)))
4391 rhs2
= gimple_assign_rhs1 (g
);
4394 g
= gimple_build_assign (make_ssa_name (type
), NOP_EXPR
, rhs2
);
4395 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
4396 rhs2
= gimple_assign_lhs (g
);
4397 mul_stmts
.quick_push (g
);
4401 tree ctype
= build_complex_type (type
);
4402 gcall
*g
= gimple_build_call_internal (code
== MULT_EXPR
4404 : code
!= MINUS_EXPR
4405 ? IFN_ADD_OVERFLOW
: IFN_SUB_OVERFLOW
,
4407 tree ctmp
= make_ssa_name (ctype
);
4408 gimple_call_set_lhs (g
, ctmp
);
4409 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
4410 tree new_lhs
= (maxval
|| cast_stmt
) ? make_ssa_name (type
) : lhs
;
4412 if (code
!= BIT_NOT_EXPR
)
4414 g2
= gimple_build_assign (new_lhs
, REALPART_EXPR
,
4415 build1 (REALPART_EXPR
, type
, ctmp
));
4416 if (maxval
|| cast_stmt
)
4418 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
4420 *gsi
= gsi_for_stmt (stmt
);
4423 gsi_replace (gsi
, g2
, true);
4424 if (code
== MULT_EXPR
)
4426 mul_stmts
.quick_push (g
);
4427 mul_stmts
.quick_push (g2
);
4430 g2
= gimple_build_assign (lhs
, NOP_EXPR
, new_lhs
);
4431 gsi_replace (gsi
, g2
, true);
4432 mul_stmts
.quick_push (g2
);
4436 tree ovf
= make_ssa_name (type
);
4437 g2
= gimple_build_assign (ovf
, IMAGPART_EXPR
,
4438 build1 (IMAGPART_EXPR
, type
, ctmp
));
4439 if (code
!= BIT_NOT_EXPR
)
4440 gsi_insert_after (gsi
, g2
, GSI_NEW_STMT
);
4442 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
4443 if (code
== MULT_EXPR
)
4444 mul_stmts
.quick_push (g2
);
4446 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, cast_lhs
? cast_lhs
: lhs
)
4448 if (is_gimple_debug (use_stmt
))
4451 gimple
*orig_use_stmt
= use_stmt
;
4452 int ovf_use
= arith_overflow_check_p (stmt
, cast_stmt
, use_stmt
,
4456 gcc_assert (code
!= BIT_NOT_EXPR
);
4459 tree use_lhs
= gimple_assign_lhs (use_stmt
);
4460 gimple_assign_set_rhs1 (use_stmt
, new_lhs
);
4461 if (useless_type_conversion_p (TREE_TYPE (use_lhs
),
4462 TREE_TYPE (new_lhs
)))
4463 gimple_assign_set_rhs_code (use_stmt
, SSA_NAME
);
4464 update_stmt (use_stmt
);
4468 if (gimple_code (use_stmt
) == GIMPLE_COND
)
4470 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
4471 gimple_cond_set_lhs (cond_stmt
, ovf
);
4472 gimple_cond_set_rhs (cond_stmt
, build_int_cst (type
, 0));
4473 gimple_cond_set_code (cond_stmt
, ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
4477 gcc_checking_assert (is_gimple_assign (use_stmt
));
4478 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
4480 gimple_assign_set_rhs1 (use_stmt
, ovf
);
4481 gimple_assign_set_rhs2 (use_stmt
, build_int_cst (type
, 0));
4482 gimple_assign_set_rhs_code (use_stmt
,
4483 ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
4487 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
4489 tree cond
= build2 (ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
,
4490 boolean_type_node
, ovf
,
4491 build_int_cst (type
, 0));
4492 gimple_assign_set_rhs1 (use_stmt
, cond
);
4495 update_stmt (use_stmt
);
4496 if (code
== MULT_EXPR
&& use_stmt
!= orig_use_stmt
)
4498 gimple_stmt_iterator gsi2
= gsi_for_stmt (orig_use_stmt
);
4499 maybe_optimize_guarding_check (mul_stmts
, use_stmt
, orig_use_stmt
,
4503 if (single_imm_use (gimple_assign_lhs (orig_use_stmt
), &use
,
4505 && gimple_assign_cast_p (cast_stmt
))
4507 gimple_stmt_iterator gsi3
= gsi_for_stmt (cast_stmt
);
4508 gsi_remove (&gsi3
, true);
4509 release_ssa_name (gimple_assign_lhs (cast_stmt
));
4511 gsi_remove (&gsi2
, true);
4512 release_ssa_name (gimple_assign_lhs (orig_use_stmt
));
4517 gimple_stmt_iterator gsi2
= gsi_for_stmt (stmt
);
4518 gsi_remove (&gsi2
, true);
4521 gimple
*g
= gimple_build_assign (gimple_assign_lhs (add_stmt
),
4523 gsi2
= gsi_for_stmt (add_stmt
);
4524 gsi_replace (&gsi2
, g
, true);
4527 else if (code
== BIT_NOT_EXPR
)
4529 *gsi
= gsi_for_stmt (stmt
);
4530 gsi_remove (gsi
, true);
4531 release_ssa_name (lhs
);
4537 /* Helper of match_uaddc_usubc. Look through an integral cast
4538 which should preserve [0, 1] range value (unless source has
4539 1-bit signed type) and the cast has single use. */
4542 uaddc_cast (gimple
*g
)
4544 if (!gimple_assign_cast_p (g
))
4546 tree op
= gimple_assign_rhs1 (g
);
4547 if (TREE_CODE (op
) == SSA_NAME
4548 && INTEGRAL_TYPE_P (TREE_TYPE (op
))
4549 && (TYPE_PRECISION (TREE_TYPE (op
)) > 1
4550 || TYPE_UNSIGNED (TREE_TYPE (op
)))
4551 && has_single_use (gimple_assign_lhs (g
)))
4552 return SSA_NAME_DEF_STMT (op
);
4556 /* Helper of match_uaddc_usubc. Look through a NE_EXPR
4557 comparison with 0 which also preserves [0, 1] value range. */
4560 uaddc_ne0 (gimple
*g
)
4562 if (is_gimple_assign (g
)
4563 && gimple_assign_rhs_code (g
) == NE_EXPR
4564 && integer_zerop (gimple_assign_rhs2 (g
))
4565 && TREE_CODE (gimple_assign_rhs1 (g
)) == SSA_NAME
4566 && has_single_use (gimple_assign_lhs (g
)))
4567 return SSA_NAME_DEF_STMT (gimple_assign_rhs1 (g
));
4571 /* Return true if G is {REAL,IMAG}PART_EXPR PART with SSA_NAME
4575 uaddc_is_cplxpart (gimple
*g
, tree_code part
)
4577 return (is_gimple_assign (g
)
4578 && gimple_assign_rhs_code (g
) == part
4579 && TREE_CODE (TREE_OPERAND (gimple_assign_rhs1 (g
), 0)) == SSA_NAME
);
4582 /* Try to match e.g.
4583 _29 = .ADD_OVERFLOW (_3, _4);
4584 _30 = REALPART_EXPR <_29>;
4585 _31 = IMAGPART_EXPR <_29>;
4586 _32 = .ADD_OVERFLOW (_30, _38);
4587 _33 = REALPART_EXPR <_32>;
4588 _34 = IMAGPART_EXPR <_32>;
4591 _36 = .UADDC (_3, _4, _38);
4592 _33 = REALPART_EXPR <_36>;
4593 _35 = IMAGPART_EXPR <_36>;
4595 _22 = .SUB_OVERFLOW (_6, _5);
4596 _23 = REALPART_EXPR <_22>;
4597 _24 = IMAGPART_EXPR <_22>;
4598 _25 = .SUB_OVERFLOW (_23, _37);
4599 _26 = REALPART_EXPR <_25>;
4600 _27 = IMAGPART_EXPR <_25>;
4603 _29 = .USUBC (_6, _5, _37);
4604 _26 = REALPART_EXPR <_29>;
4605 _288 = IMAGPART_EXPR <_29>;
4606 provided _38 or _37 above have [0, 1] range
4607 and _3, _4 and _30 or _6, _5 and _23 are unsigned
4608 integral types with the same precision. Whether + or | or ^ is
4609 used on the IMAGPART_EXPR results doesn't matter, with one of
4610 added or subtracted operands in [0, 1] range at most one
4611 .ADD_OVERFLOW or .SUB_OVERFLOW will indicate overflow. */
4614 match_uaddc_usubc (gimple_stmt_iterator
*gsi
, gimple
*stmt
, tree_code code
)
4617 rhs
[0] = gimple_assign_rhs1 (stmt
);
4618 rhs
[1] = gimple_assign_rhs2 (stmt
);
4621 tree type
= TREE_TYPE (rhs
[0]);
4622 if (!INTEGRAL_TYPE_P (type
) || !TYPE_UNSIGNED (type
))
4625 auto_vec
<gimple
*, 2> temp_stmts
;
4626 if (code
!= BIT_IOR_EXPR
&& code
!= BIT_XOR_EXPR
)
4628 /* If overflow flag is ignored on the MSB limb, we can end up with
4629 the most significant limb handled as r = op1 + op2 + ovf1 + ovf2;
4630 or r = op1 - op2 - ovf1 - ovf2; or various equivalent expressions
4631 thereof. Handle those like the ovf = ovf1 + ovf2; case to recognize
4632 the limb below the MSB, but also create another .UADDC/.USUBC call
4635 First look through assignments with the same rhs code as CODE,
4636 with the exception that subtraction of a constant is canonicalized
4637 into addition of its negation. rhs[0] will be minuend for
4638 subtractions and one of addends for addition, all other assigned
4639 rhs[i] operands will be subtrahends or other addends. */
4640 while (TREE_CODE (rhs
[0]) == SSA_NAME
&& !rhs
[3])
4642 gimple
*g
= SSA_NAME_DEF_STMT (rhs
[0]);
4643 if (has_single_use (rhs
[0])
4644 && is_gimple_assign (g
)
4645 && (gimple_assign_rhs_code (g
) == code
4646 || (code
== MINUS_EXPR
4647 && gimple_assign_rhs_code (g
) == PLUS_EXPR
4648 && TREE_CODE (gimple_assign_rhs2 (g
)) == INTEGER_CST
)))
4650 tree r2
= gimple_assign_rhs2 (g
);
4651 if (gimple_assign_rhs_code (g
) != code
)
4653 r2
= const_unop (NEGATE_EXPR
, TREE_TYPE (r2
), r2
);
4657 rhs
[0] = gimple_assign_rhs1 (g
);
4658 tree
&r
= rhs
[2] ? rhs
[3] : rhs
[2];
4660 temp_stmts
.quick_push (g
);
4665 for (int i
= 1; i
<= 2; ++i
)
4666 while (rhs
[i
] && TREE_CODE (rhs
[i
]) == SSA_NAME
&& !rhs
[3])
4668 gimple
*g
= SSA_NAME_DEF_STMT (rhs
[i
]);
4669 if (has_single_use (rhs
[i
])
4670 && is_gimple_assign (g
)
4671 && gimple_assign_rhs_code (g
) == PLUS_EXPR
)
4673 rhs
[i
] = gimple_assign_rhs1 (g
);
4675 rhs
[3] = gimple_assign_rhs2 (g
);
4677 rhs
[2] = gimple_assign_rhs2 (g
);
4678 temp_stmts
.quick_push (g
);
4683 /* If there are just 3 addends or one minuend and two subtrahends,
4684 check for UADDC or USUBC being pattern recognized earlier.
4685 Say r = op1 + op2 + ovf1 + ovf2; where the (ovf1 + ovf2) part
4686 got pattern matched earlier as __imag__ .UADDC (arg1, arg2, arg3)
4688 if (rhs
[2] && !rhs
[3])
4690 for (int i
= (code
== MINUS_EXPR
? 1 : 0); i
< 3; ++i
)
4691 if (TREE_CODE (rhs
[i
]) == SSA_NAME
)
4693 gimple
*im
= uaddc_cast (SSA_NAME_DEF_STMT (rhs
[i
]));
4694 im
= uaddc_ne0 (im
);
4695 if (uaddc_is_cplxpart (im
, IMAGPART_EXPR
))
4697 /* We found one of the 3 addends or 2 subtrahends to be
4698 __imag__ of something, verify it is .UADDC/.USUBC. */
4699 tree rhs1
= gimple_assign_rhs1 (im
);
4700 gimple
*ovf
= SSA_NAME_DEF_STMT (TREE_OPERAND (rhs1
, 0));
4701 tree ovf_lhs
= NULL_TREE
;
4702 tree ovf_arg1
= NULL_TREE
, ovf_arg2
= NULL_TREE
;
4703 if (gimple_call_internal_p (ovf
, code
== PLUS_EXPR
4705 : IFN_SUB_OVERFLOW
))
4707 /* Or verify it is .ADD_OVERFLOW/.SUB_OVERFLOW.
4708 This is for the case of 2 chained .UADDC/.USUBC,
4709 where the first one uses 0 carry-in and the second
4710 one ignores the carry-out.
4712 _16 = .ADD_OVERFLOW (_1, _2);
4713 _17 = REALPART_EXPR <_16>;
4714 _18 = IMAGPART_EXPR <_16>;
4717 where the first 3 statements come from the lower
4718 limb addition and the last 2 from the higher limb
4719 which ignores carry-out. */
4720 ovf_lhs
= gimple_call_lhs (ovf
);
4721 tree ovf_lhs_type
= TREE_TYPE (TREE_TYPE (ovf_lhs
));
4722 ovf_arg1
= gimple_call_arg (ovf
, 0);
4723 ovf_arg2
= gimple_call_arg (ovf
, 1);
4724 /* In that case we need to punt if the types don't
4726 if (!types_compatible_p (type
, ovf_lhs_type
)
4727 || !types_compatible_p (type
, TREE_TYPE (ovf_arg1
))
4728 || !types_compatible_p (type
,
4729 TREE_TYPE (ovf_arg2
)))
4730 ovf_lhs
= NULL_TREE
;
4733 for (int i
= (code
== PLUS_EXPR
? 1 : 0);
4736 tree r
= gimple_call_arg (ovf
, i
);
4737 if (TREE_CODE (r
) != SSA_NAME
)
4739 if (uaddc_is_cplxpart (SSA_NAME_DEF_STMT (r
),
4742 /* Punt if one of the args which isn't
4743 subtracted isn't __real__; that could
4744 then prevent better match later.
4746 _3 = .ADD_OVERFLOW (_1, _2);
4747 _4 = REALPART_EXPR <_3>;
4748 _5 = IMAGPART_EXPR <_3>;
4749 _7 = .ADD_OVERFLOW (_4, _6);
4750 _8 = REALPART_EXPR <_7>;
4751 _9 = IMAGPART_EXPR <_7>;
4755 We want to match this when called on
4756 the last stmt as a pair of .UADDC calls,
4757 but without this check we could turn
4758 that prematurely on _13 = _12 + _9;
4759 stmt into .UADDC with 0 carry-in just
4760 on the second .ADD_OVERFLOW call and
4761 another replacing the _12 and _13
4763 ovf_lhs
= NULL_TREE
;
4770 use_operand_p use_p
;
4771 imm_use_iterator iter
;
4772 tree re_lhs
= NULL_TREE
;
4773 FOR_EACH_IMM_USE_FAST (use_p
, iter
, ovf_lhs
)
4775 gimple
*use_stmt
= USE_STMT (use_p
);
4776 if (is_gimple_debug (use_stmt
))
4780 if (!uaddc_is_cplxpart (use_stmt
,
4783 ovf_lhs
= NULL_TREE
;
4786 re_lhs
= gimple_assign_lhs (use_stmt
);
4788 if (ovf_lhs
&& re_lhs
)
4790 FOR_EACH_IMM_USE_FAST (use_p
, iter
, re_lhs
)
4792 gimple
*use_stmt
= USE_STMT (use_p
);
4793 if (is_gimple_debug (use_stmt
))
4796 = gimple_call_internal_fn (ovf
);
4797 /* Punt if the __real__ of lhs is used
4798 in the same .*_OVERFLOW call.
4800 _3 = .ADD_OVERFLOW (_1, _2);
4801 _4 = REALPART_EXPR <_3>;
4802 _5 = IMAGPART_EXPR <_3>;
4803 _7 = .ADD_OVERFLOW (_4, _6);
4804 _8 = REALPART_EXPR <_7>;
4805 _9 = IMAGPART_EXPR <_7>;
4809 We want to match this when called on
4810 the last stmt as a pair of .UADDC calls,
4811 but without this check we could turn
4812 that prematurely on _13 = _12 + _5;
4813 stmt into .UADDC with 0 carry-in just
4814 on the first .ADD_OVERFLOW call and
4815 another replacing the _12 and _13
4817 if (gimple_call_internal_p (use_stmt
, ifn
))
4819 ovf_lhs
= NULL_TREE
;
4827 || gimple_call_internal_p (ovf
,
4829 ? IFN_UADDC
: IFN_USUBC
))
4830 && (optab_handler (code
== PLUS_EXPR
4831 ? uaddc5_optab
: usubc5_optab
,
4833 != CODE_FOR_nothing
))
4835 /* And in that case build another .UADDC/.USUBC
4836 call for the most significand limb addition.
4837 Overflow bit is ignored here. */
4839 std::swap (rhs
[i
], rhs
[2]);
4841 = gimple_build_call_internal (code
== PLUS_EXPR
4846 tree nlhs
= make_ssa_name (build_complex_type (type
));
4847 gimple_call_set_lhs (g
, nlhs
);
4848 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
4849 tree ilhs
= gimple_assign_lhs (stmt
);
4850 g
= gimple_build_assign (ilhs
, REALPART_EXPR
,
4851 build1 (REALPART_EXPR
,
4854 gsi_replace (gsi
, g
, true);
4855 /* And if it is initialized from result of __imag__
4856 of .{ADD,SUB}_OVERFLOW call, replace that
4857 call with .U{ADD,SUB}C call with the same arguments,
4858 just 0 added as third argument. This isn't strictly
4859 necessary, .ADD_OVERFLOW (x, y) and .UADDC (x, y, 0)
4860 produce the same result, but may result in better
4861 generated code on some targets where the backend can
4862 better prepare in how the result will be used. */
4865 tree zero
= build_zero_cst (type
);
4866 g
= gimple_build_call_internal (code
== PLUS_EXPR
4871 gimple_call_set_lhs (g
, ovf_lhs
);
4872 gimple_stmt_iterator gsi2
= gsi_for_stmt (ovf
);
4873 gsi_replace (&gsi2
, g
, true);
4881 if (code
== MINUS_EXPR
&& !rhs
[2])
4883 if (code
== MINUS_EXPR
)
4884 /* Code below expects rhs[0] and rhs[1] to have the IMAGPART_EXPRs.
4885 So, for MINUS_EXPR swap the single added rhs operand (others are
4886 subtracted) to rhs[3]. */
4887 std::swap (rhs
[0], rhs
[3]);
4889 /* Walk from both operands of STMT (for +/- even sometimes from
4890 all the 4 addends or 3 subtrahends), see through casts and != 0
4891 statements which would preserve [0, 1] range of values and
4892 check which is initialized from __imag__. */
4893 gimple
*im1
= NULL
, *im2
= NULL
;
4894 for (int i
= 0; i
< (code
== MINUS_EXPR
? 3 : 4); i
++)
4895 if (rhs
[i
] && TREE_CODE (rhs
[i
]) == SSA_NAME
)
4897 gimple
*im
= uaddc_cast (SSA_NAME_DEF_STMT (rhs
[i
]));
4898 im
= uaddc_ne0 (im
);
4899 if (uaddc_is_cplxpart (im
, IMAGPART_EXPR
))
4905 std::swap (rhs
[0], rhs
[i
]);
4911 std::swap (rhs
[1], rhs
[i
]);
4916 /* If we don't find at least two, punt. */
4919 /* Check they are __imag__ of .ADD_OVERFLOW or .SUB_OVERFLOW call results,
4920 either both .ADD_OVERFLOW or both .SUB_OVERFLOW and that we have
4921 uaddc5/usubc5 named pattern for the corresponding mode. */
4923 = SSA_NAME_DEF_STMT (TREE_OPERAND (gimple_assign_rhs1 (im1
), 0));
4925 = SSA_NAME_DEF_STMT (TREE_OPERAND (gimple_assign_rhs1 (im2
), 0));
4927 if (!is_gimple_call (ovf1
)
4928 || !gimple_call_internal_p (ovf1
)
4929 || ((ifn
= gimple_call_internal_fn (ovf1
)) != IFN_ADD_OVERFLOW
4930 && ifn
!= IFN_SUB_OVERFLOW
)
4931 || !gimple_call_internal_p (ovf2
, ifn
)
4932 || optab_handler (ifn
== IFN_ADD_OVERFLOW
? uaddc5_optab
: usubc5_optab
,
4933 TYPE_MODE (type
)) == CODE_FOR_nothing
4935 && optab_handler (code
== PLUS_EXPR
? uaddc5_optab
: usubc5_optab
,
4936 TYPE_MODE (type
)) == CODE_FOR_nothing
))
4938 tree arg1
, arg2
, arg3
= NULL_TREE
;
4939 gimple
*re1
= NULL
, *re2
= NULL
;
4940 /* On one of the two calls, one of the .ADD_OVERFLOW/.SUB_OVERFLOW arguments
4941 should be initialized from __real__ of the other of the two calls.
4942 Though, for .SUB_OVERFLOW, it has to be the first argument, not the
4944 for (int i
= (ifn
== IFN_ADD_OVERFLOW
? 1 : 0); i
>= 0; --i
)
4945 for (gimple
*ovf
= ovf1
; ovf
; ovf
= (ovf
== ovf1
? ovf2
: NULL
))
4947 tree arg
= gimple_call_arg (ovf
, i
);
4948 if (TREE_CODE (arg
) != SSA_NAME
)
4950 re1
= SSA_NAME_DEF_STMT (arg
);
4951 if (uaddc_is_cplxpart (re1
, REALPART_EXPR
)
4952 && (SSA_NAME_DEF_STMT (TREE_OPERAND (gimple_assign_rhs1 (re1
), 0))
4953 == (ovf
== ovf1
? ovf2
: ovf1
)))
4957 /* Make sure ovf2 is the .*_OVERFLOW call with argument
4958 initialized from __real__ of ovf1. */
4959 std::swap (rhs
[0], rhs
[1]);
4960 std::swap (im1
, im2
);
4961 std::swap (ovf1
, ovf2
);
4963 arg3
= gimple_call_arg (ovf
, 1 - i
);
4970 arg1
= gimple_call_arg (ovf1
, 0);
4971 arg2
= gimple_call_arg (ovf1
, 1);
4972 if (!types_compatible_p (type
, TREE_TYPE (arg1
)))
4974 int kind
[2] = { 0, 0 };
4975 tree arg_im
[2] = { NULL_TREE
, NULL_TREE
};
4976 /* At least one of arg2 and arg3 should have type compatible
4977 with arg1/rhs[0], and the other one should have value in [0, 1]
4978 range. If both are in [0, 1] range and type compatible with
4979 arg1/rhs[0], try harder to find after looking through casts,
4980 != 0 comparisons which one is initialized to __imag__ of
4981 .{ADD,SUB}_OVERFLOW or .U{ADD,SUB}C call results. */
4982 for (int i
= 0; i
< 2; ++i
)
4984 tree arg
= i
== 0 ? arg2
: arg3
;
4985 if (types_compatible_p (type
, TREE_TYPE (arg
)))
4987 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg
))
4988 || (TYPE_PRECISION (TREE_TYPE (arg
)) == 1
4989 && !TYPE_UNSIGNED (TREE_TYPE (arg
))))
4991 if (tree_zero_one_valued_p (arg
))
4993 if (TREE_CODE (arg
) == SSA_NAME
)
4995 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
4996 if (gimple_assign_cast_p (g
))
4998 tree op
= gimple_assign_rhs1 (g
);
4999 if (TREE_CODE (op
) == SSA_NAME
5000 && INTEGRAL_TYPE_P (TREE_TYPE (op
)))
5001 g
= SSA_NAME_DEF_STMT (op
);
5004 if (!uaddc_is_cplxpart (g
, IMAGPART_EXPR
))
5006 arg_im
[i
] = gimple_assign_lhs (g
);
5007 g
= SSA_NAME_DEF_STMT (TREE_OPERAND (gimple_assign_rhs1 (g
), 0));
5008 if (!is_gimple_call (g
) || !gimple_call_internal_p (g
))
5010 switch (gimple_call_internal_fn (g
))
5012 case IFN_ADD_OVERFLOW
:
5013 case IFN_SUB_OVERFLOW
:
5023 /* Make arg2 the one with compatible type and arg3 the one
5024 with [0, 1] range. If both is true for both operands,
5025 prefer as arg3 result of __imag__ of some ifn. */
5026 if ((kind
[0] & 1) == 0 || ((kind
[1] & 1) != 0 && kind
[0] > kind
[1]))
5028 std::swap (arg2
, arg3
);
5029 std::swap (kind
[0], kind
[1]);
5030 std::swap (arg_im
[0], arg_im
[1]);
5032 if ((kind
[0] & 1) == 0 || (kind
[1] & 6) == 0)
5034 if (!has_single_use (gimple_assign_lhs (im1
))
5035 || !has_single_use (gimple_assign_lhs (im2
))
5036 || !has_single_use (gimple_assign_lhs (re1
))
5037 || num_imm_uses (gimple_call_lhs (ovf1
)) != 2)
5039 /* Check that ovf2's result is used in __real__ and set re2
5040 to that statement. */
5041 use_operand_p use_p
;
5042 imm_use_iterator iter
;
5043 tree lhs
= gimple_call_lhs (ovf2
);
5044 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
5046 gimple
*use_stmt
= USE_STMT (use_p
);
5047 if (is_gimple_debug (use_stmt
))
5049 if (use_stmt
== im2
)
5053 if (!uaddc_is_cplxpart (use_stmt
, REALPART_EXPR
))
5057 /* Build .UADDC/.USUBC call which will be placed before the stmt. */
5058 gimple_stmt_iterator gsi2
= gsi_for_stmt (ovf2
);
5060 if ((kind
[1] & 4) != 0 && types_compatible_p (type
, TREE_TYPE (arg_im
[1])))
5062 if ((kind
[1] & 1) == 0)
5064 if (TREE_CODE (arg3
) == INTEGER_CST
)
5065 arg3
= fold_convert (type
, arg3
);
5068 g
= gimple_build_assign (make_ssa_name (type
), NOP_EXPR
, arg3
);
5069 gsi_insert_before (&gsi2
, g
, GSI_SAME_STMT
);
5070 arg3
= gimple_assign_lhs (g
);
5073 g
= gimple_build_call_internal (ifn
== IFN_ADD_OVERFLOW
5074 ? IFN_UADDC
: IFN_USUBC
,
5075 3, arg1
, arg2
, arg3
);
5076 tree nlhs
= make_ssa_name (TREE_TYPE (lhs
));
5077 gimple_call_set_lhs (g
, nlhs
);
5078 gsi_insert_before (&gsi2
, g
, GSI_SAME_STMT
);
5079 /* In the case where stmt is | or ^ of two overflow flags
5080 or addition of those, replace stmt with __imag__ of the above
5081 added call. In case of arg1 + arg2 + (ovf1 + ovf2) or
5082 arg1 - arg2 - (ovf1 + ovf2) just emit it before stmt. */
5083 tree ilhs
= rhs
[2] ? make_ssa_name (type
) : gimple_assign_lhs (stmt
);
5084 g
= gimple_build_assign (ilhs
, IMAGPART_EXPR
,
5085 build1 (IMAGPART_EXPR
, TREE_TYPE (ilhs
), nlhs
));
5088 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5089 /* Remove some further statements which can't be kept in the IL because
5090 they can use SSA_NAMEs whose setter is going to be removed too. */
5091 for (gimple
*g2
: temp_stmts
)
5093 gsi2
= gsi_for_stmt (g2
);
5094 gsi_remove (&gsi2
, true);
5099 gsi_replace (gsi
, g
, true);
5100 /* Remove some statements which can't be kept in the IL because they
5101 use SSA_NAME whose setter is going to be removed too. */
5103 for (int i
= 0; i
< 2; i
++)
5104 if (rhs1
== gimple_assign_lhs (im2
))
5108 g
= SSA_NAME_DEF_STMT (rhs1
);
5109 rhs1
= gimple_assign_rhs1 (g
);
5110 gsi2
= gsi_for_stmt (g
);
5111 gsi_remove (&gsi2
, true);
5114 gcc_checking_assert (rhs1
== gimple_assign_lhs (im2
));
5115 gsi2
= gsi_for_stmt (im2
);
5116 gsi_remove (&gsi2
, true);
5118 /* Replace the re2 statement with __real__ of the newly added
5119 .UADDC/.USUBC call. */
5122 gsi2
= gsi_for_stmt (re2
);
5123 tree rlhs
= gimple_assign_lhs (re2
);
5124 g
= gimple_build_assign (rlhs
, REALPART_EXPR
,
5125 build1 (REALPART_EXPR
, TREE_TYPE (rlhs
), nlhs
));
5126 gsi_replace (&gsi2
, g
, true);
5130 /* If this is the arg1 + arg2 + (ovf1 + ovf2) or
5131 arg1 - arg2 - (ovf1 + ovf2) case for the most significant limb,
5132 replace stmt with __real__ of another .UADDC/.USUBC call which
5133 handles the most significant limb. Overflow flag from this is
5135 g
= gimple_build_call_internal (code
== PLUS_EXPR
5136 ? IFN_UADDC
: IFN_USUBC
,
5137 3, rhs
[3], rhs
[2], ilhs
);
5138 nlhs
= make_ssa_name (TREE_TYPE (lhs
));
5139 gimple_call_set_lhs (g
, nlhs
);
5140 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5141 ilhs
= gimple_assign_lhs (stmt
);
5142 g
= gimple_build_assign (ilhs
, REALPART_EXPR
,
5143 build1 (REALPART_EXPR
, TREE_TYPE (ilhs
), nlhs
));
5144 gsi_replace (gsi
, g
, true);
5146 if (TREE_CODE (arg3
) == SSA_NAME
)
5148 /* When pattern recognizing the second least significant limb
5149 above (i.e. first pair of .{ADD,SUB}_OVERFLOW calls for one limb),
5150 check if the [0, 1] range argument (i.e. carry in) isn't the
5151 result of another .{ADD,SUB}_OVERFLOW call (one handling the
5152 least significant limb). Again look through casts and != 0. */
5153 gimple
*im3
= SSA_NAME_DEF_STMT (arg3
);
5154 for (int i
= 0; i
< 2; ++i
)
5156 gimple
*im4
= uaddc_cast (im3
);
5162 im3
= uaddc_ne0 (im3
);
5163 if (uaddc_is_cplxpart (im3
, IMAGPART_EXPR
))
5166 = SSA_NAME_DEF_STMT (TREE_OPERAND (gimple_assign_rhs1 (im3
), 0));
5167 if (gimple_call_internal_p (ovf3
, ifn
))
5169 lhs
= gimple_call_lhs (ovf3
);
5170 arg1
= gimple_call_arg (ovf3
, 0);
5171 arg2
= gimple_call_arg (ovf3
, 1);
5172 if (types_compatible_p (type
, TREE_TYPE (TREE_TYPE (lhs
)))
5173 && types_compatible_p (type
, TREE_TYPE (arg1
))
5174 && types_compatible_p (type
, TREE_TYPE (arg2
)))
5176 /* And if it is initialized from result of __imag__
5177 of .{ADD,SUB}_OVERFLOW call, replace that
5178 call with .U{ADD,SUB}C call with the same arguments,
5179 just 0 added as third argument. This isn't strictly
5180 necessary, .ADD_OVERFLOW (x, y) and .UADDC (x, y, 0)
5181 produce the same result, but may result in better
5182 generated code on some targets where the backend can
5183 better prepare in how the result will be used. */
5184 g
= gimple_build_call_internal (ifn
== IFN_ADD_OVERFLOW
5185 ? IFN_UADDC
: IFN_USUBC
,
5187 build_zero_cst (type
));
5188 gimple_call_set_lhs (g
, lhs
);
5189 gsi2
= gsi_for_stmt (ovf3
);
5190 gsi_replace (&gsi2
, g
, true);
5198 /* Replace .POPCOUNT (x) == 1 or .POPCOUNT (x) != 1 with
5199 (x & (x - 1)) > x - 1 or (x & (x - 1)) <= x - 1 if .POPCOUNT
5200 isn't a direct optab. */
5203 match_single_bit_test (gimple_stmt_iterator
*gsi
, gimple
*stmt
)
5206 enum tree_code code
;
5207 if (gimple_code (stmt
) == GIMPLE_COND
)
5209 clhs
= gimple_cond_lhs (stmt
);
5210 crhs
= gimple_cond_rhs (stmt
);
5211 code
= gimple_cond_code (stmt
);
5215 clhs
= gimple_assign_rhs1 (stmt
);
5216 crhs
= gimple_assign_rhs2 (stmt
);
5217 code
= gimple_assign_rhs_code (stmt
);
5219 if (code
!= EQ_EXPR
&& code
!= NE_EXPR
)
5221 if (TREE_CODE (clhs
) != SSA_NAME
|| !integer_onep (crhs
))
5223 gimple
*call
= SSA_NAME_DEF_STMT (clhs
);
5224 combined_fn cfn
= gimple_call_combined_fn (call
);
5232 if (!has_single_use (clhs
))
5234 tree arg
= gimple_call_arg (call
, 0);
5235 tree type
= TREE_TYPE (arg
);
5236 if (!INTEGRAL_TYPE_P (type
))
5238 bool nonzero_arg
= tree_expr_nonzero_p (arg
);
5239 if (direct_internal_fn_supported_p (IFN_POPCOUNT
, type
, OPTIMIZE_FOR_BOTH
))
5241 /* Tell expand_POPCOUNT the popcount result is only used in equality
5242 comparison with one, so that it can decide based on rtx costs. */
5243 gimple
*g
= gimple_build_call_internal (IFN_POPCOUNT
, 2, arg
,
5244 nonzero_arg
? integer_zero_node
5245 : integer_one_node
);
5246 gimple_call_set_lhs (g
, gimple_call_lhs (call
));
5247 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
5248 gsi_replace (&gsi2
, g
, true);
5251 tree argm1
= make_ssa_name (type
);
5252 gimple
*g
= gimple_build_assign (argm1
, PLUS_EXPR
, arg
,
5253 build_int_cst (type
, -1));
5254 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5255 g
= gimple_build_assign (make_ssa_name (type
),
5256 nonzero_arg
? BIT_AND_EXPR
: BIT_XOR_EXPR
,
5258 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5262 argm1
= build_zero_cst (type
);
5266 cmpcode
= code
== EQ_EXPR
? GT_EXPR
: LE_EXPR
;
5267 if (gcond
*cond
= dyn_cast
<gcond
*> (stmt
))
5269 gimple_cond_set_lhs (cond
, gimple_assign_lhs (g
));
5270 gimple_cond_set_rhs (cond
, argm1
);
5271 gimple_cond_set_code (cond
, cmpcode
);
5275 gimple_assign_set_rhs1 (stmt
, gimple_assign_lhs (g
));
5276 gimple_assign_set_rhs2 (stmt
, argm1
);
5277 gimple_assign_set_rhs_code (stmt
, cmpcode
);
5280 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
5281 gsi_remove (&gsi2
, true);
5282 release_defs (call
);
5285 /* Return true if target has support for divmod. */
5288 target_supports_divmod_p (optab divmod_optab
, optab div_optab
, machine_mode mode
)
5290 /* If target supports hardware divmod insn, use it for divmod. */
5291 if (optab_handler (divmod_optab
, mode
) != CODE_FOR_nothing
)
5294 /* Check if libfunc for divmod is available. */
5295 rtx libfunc
= optab_libfunc (divmod_optab
, mode
);
5296 if (libfunc
!= NULL_RTX
)
5298 /* If optab_handler exists for div_optab, perhaps in a wider mode,
5299 we don't want to use the libfunc even if it exists for given mode. */
5300 machine_mode div_mode
;
5301 FOR_EACH_MODE_FROM (div_mode
, mode
)
5302 if (optab_handler (div_optab
, div_mode
) != CODE_FOR_nothing
)
5305 return targetm
.expand_divmod_libfunc
!= NULL
;
5311 /* Check if stmt is candidate for divmod transform. */
5314 divmod_candidate_p (gassign
*stmt
)
5316 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
5317 machine_mode mode
= TYPE_MODE (type
);
5318 optab divmod_optab
, div_optab
;
5320 if (TYPE_UNSIGNED (type
))
5322 divmod_optab
= udivmod_optab
;
5323 div_optab
= udiv_optab
;
5327 divmod_optab
= sdivmod_optab
;
5328 div_optab
= sdiv_optab
;
5331 tree op1
= gimple_assign_rhs1 (stmt
);
5332 tree op2
= gimple_assign_rhs2 (stmt
);
5334 /* Disable the transform if either is a constant, since division-by-constant
5335 may have specialized expansion. */
5336 if (CONSTANT_CLASS_P (op1
))
5339 if (CONSTANT_CLASS_P (op2
))
5341 if (integer_pow2p (op2
))
5344 if (element_precision (type
) <= HOST_BITS_PER_WIDE_INT
5345 && element_precision (type
) <= BITS_PER_WORD
)
5348 /* If the divisor is not power of 2 and the precision wider than
5349 HWI, expand_divmod punts on that, so in that case it is better
5350 to use divmod optab or libfunc. Similarly if choose_multiplier
5351 might need pre/post shifts of BITS_PER_WORD or more. */
5354 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
5355 expand using the [su]divv optabs. */
5356 if (TYPE_OVERFLOW_TRAPS (type
))
5359 if (!target_supports_divmod_p (divmod_optab
, div_optab
, mode
))
5365 /* This function looks for:
5366 t1 = a TRUNC_DIV_EXPR b;
5367 t2 = a TRUNC_MOD_EXPR b;
5368 and transforms it to the following sequence:
5369 complex_tmp = DIVMOD (a, b);
5370 t1 = REALPART_EXPR(a);
5371 t2 = IMAGPART_EXPR(b);
5372 For conditions enabling the transform see divmod_candidate_p().
5374 The pass has three parts:
5375 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
5376 other trunc_div_expr and trunc_mod_expr stmts.
5377 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
5379 3) Insert DIVMOD call just before top_stmt and update entries in
5380 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
5381 IMAGPART_EXPR for mod). */
5384 convert_to_divmod (gassign
*stmt
)
5386 if (stmt_can_throw_internal (cfun
, stmt
)
5387 || !divmod_candidate_p (stmt
))
5390 tree op1
= gimple_assign_rhs1 (stmt
);
5391 tree op2
= gimple_assign_rhs2 (stmt
);
5393 imm_use_iterator use_iter
;
5395 auto_vec
<gimple
*> stmts
;
5397 gimple
*top_stmt
= stmt
;
5398 basic_block top_bb
= gimple_bb (stmt
);
5400 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
5401 at-least stmt and possibly other trunc_div/trunc_mod stmts
5402 having same operands as stmt. */
5404 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, op1
)
5406 if (is_gimple_assign (use_stmt
)
5407 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
5408 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
5409 && operand_equal_p (op1
, gimple_assign_rhs1 (use_stmt
), 0)
5410 && operand_equal_p (op2
, gimple_assign_rhs2 (use_stmt
), 0))
5412 if (stmt_can_throw_internal (cfun
, use_stmt
))
5415 basic_block bb
= gimple_bb (use_stmt
);
5419 if (gimple_uid (use_stmt
) < gimple_uid (top_stmt
))
5420 top_stmt
= use_stmt
;
5422 else if (dominated_by_p (CDI_DOMINATORS
, top_bb
, bb
))
5425 top_stmt
= use_stmt
;
5430 tree top_op1
= gimple_assign_rhs1 (top_stmt
);
5431 tree top_op2
= gimple_assign_rhs2 (top_stmt
);
5433 stmts
.safe_push (top_stmt
);
5434 bool div_seen
= (gimple_assign_rhs_code (top_stmt
) == TRUNC_DIV_EXPR
);
5436 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
5437 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
5438 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
5439 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
5441 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, top_op1
)
5443 if (is_gimple_assign (use_stmt
)
5444 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
5445 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
5446 && operand_equal_p (top_op1
, gimple_assign_rhs1 (use_stmt
), 0)
5447 && operand_equal_p (top_op2
, gimple_assign_rhs2 (use_stmt
), 0))
5449 if (use_stmt
== top_stmt
5450 || stmt_can_throw_internal (cfun
, use_stmt
)
5451 || !dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
), top_bb
))
5454 stmts
.safe_push (use_stmt
);
5455 if (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
)
5463 /* Part 3: Create libcall to internal fn DIVMOD:
5464 divmod_tmp = DIVMOD (op1, op2). */
5466 gcall
*call_stmt
= gimple_build_call_internal (IFN_DIVMOD
, 2, op1
, op2
);
5467 tree res
= make_temp_ssa_name (build_complex_type (TREE_TYPE (op1
)),
5468 call_stmt
, "divmod_tmp");
5469 gimple_call_set_lhs (call_stmt
, res
);
5470 /* We rejected throwing statements above. */
5471 gimple_call_set_nothrow (call_stmt
, true);
5473 /* Insert the call before top_stmt. */
5474 gimple_stmt_iterator top_stmt_gsi
= gsi_for_stmt (top_stmt
);
5475 gsi_insert_before (&top_stmt_gsi
, call_stmt
, GSI_SAME_STMT
);
5477 widen_mul_stats
.divmod_calls_inserted
++;
5479 /* Update all statements in stmts vector:
5480 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
5481 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
5483 for (unsigned i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
5487 switch (gimple_assign_rhs_code (use_stmt
))
5489 case TRUNC_DIV_EXPR
:
5490 new_rhs
= fold_build1 (REALPART_EXPR
, TREE_TYPE (op1
), res
);
5493 case TRUNC_MOD_EXPR
:
5494 new_rhs
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (op1
), res
);
5501 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
5502 gimple_assign_set_rhs_from_tree (&gsi
, new_rhs
);
5503 update_stmt (use_stmt
);
5509 /* Process a single gimple assignment STMT, which has a RSHIFT_EXPR as
5510 its rhs, and try to convert it into a MULT_HIGHPART_EXPR. The return
5511 value is true iff we converted the statement. */
5514 convert_mult_to_highpart (gassign
*stmt
, gimple_stmt_iterator
*gsi
)
5516 tree lhs
= gimple_assign_lhs (stmt
);
5517 tree stype
= TREE_TYPE (lhs
);
5518 tree sarg0
= gimple_assign_rhs1 (stmt
);
5519 tree sarg1
= gimple_assign_rhs2 (stmt
);
5521 if (TREE_CODE (stype
) != INTEGER_TYPE
5522 || TREE_CODE (sarg1
) != INTEGER_CST
5523 || TREE_CODE (sarg0
) != SSA_NAME
5524 || !tree_fits_uhwi_p (sarg1
)
5525 || !has_single_use (sarg0
))
5528 gassign
*def
= dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (sarg0
));
5532 enum tree_code mcode
= gimple_assign_rhs_code (def
);
5533 if (mcode
== NOP_EXPR
)
5535 tree tmp
= gimple_assign_rhs1 (def
);
5536 if (TREE_CODE (tmp
) != SSA_NAME
|| !has_single_use (tmp
))
5538 def
= dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (tmp
));
5541 mcode
= gimple_assign_rhs_code (def
);
5544 if (mcode
!= WIDEN_MULT_EXPR
5545 || gimple_bb (def
) != gimple_bb (stmt
))
5547 tree mtype
= TREE_TYPE (gimple_assign_lhs (def
));
5548 if (TREE_CODE (mtype
) != INTEGER_TYPE
5549 || TYPE_PRECISION (mtype
) != TYPE_PRECISION (stype
))
5552 tree mop1
= gimple_assign_rhs1 (def
);
5553 tree mop2
= gimple_assign_rhs2 (def
);
5554 tree optype
= TREE_TYPE (mop1
);
5555 bool unsignedp
= TYPE_UNSIGNED (optype
);
5556 unsigned int prec
= TYPE_PRECISION (optype
);
5558 if (unsignedp
!= TYPE_UNSIGNED (mtype
)
5559 || TYPE_PRECISION (mtype
) != 2 * prec
)
5562 unsigned HOST_WIDE_INT bits
= tree_to_uhwi (sarg1
);
5563 if (bits
< prec
|| bits
>= 2 * prec
)
5566 /* For the time being, require operands to have the same sign. */
5567 if (unsignedp
!= TYPE_UNSIGNED (TREE_TYPE (mop2
)))
5570 machine_mode mode
= TYPE_MODE (optype
);
5571 optab tab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
5572 if (optab_handler (tab
, mode
) == CODE_FOR_nothing
)
5575 location_t loc
= gimple_location (stmt
);
5576 tree highpart1
= build_and_insert_binop (gsi
, loc
, "highparttmp",
5577 MULT_HIGHPART_EXPR
, mop1
, mop2
);
5578 tree highpart2
= highpart1
;
5579 tree ntype
= optype
;
5581 if (TYPE_UNSIGNED (stype
) != TYPE_UNSIGNED (optype
))
5583 ntype
= TYPE_UNSIGNED (stype
) ? unsigned_type_for (optype
)
5584 : signed_type_for (optype
);
5585 highpart2
= build_and_insert_cast (gsi
, loc
, ntype
, highpart1
);
5588 highpart2
= build_and_insert_binop (gsi
, loc
, "highparttmp",
5589 RSHIFT_EXPR
, highpart2
,
5590 build_int_cst (ntype
, bits
- prec
));
5592 gassign
*new_stmt
= gimple_build_assign (lhs
, NOP_EXPR
, highpart2
);
5593 gsi_replace (gsi
, new_stmt
, true);
5595 widen_mul_stats
.highpart_mults_inserted
++;
5599 /* If target has spaceship<MODE>3 expander, pattern recognize
5600 <bb 2> [local count: 1073741824]:
5601 if (a_2(D) == b_3(D))
5602 goto <bb 6>; [34.00%]
5604 goto <bb 3>; [66.00%]
5606 <bb 3> [local count: 708669601]:
5607 if (a_2(D) < b_3(D))
5608 goto <bb 6>; [1.04%]
5610 goto <bb 4>; [98.96%]
5612 <bb 4> [local count: 701299439]:
5613 if (a_2(D) > b_3(D))
5614 goto <bb 5>; [48.89%]
5616 goto <bb 6>; [51.11%]
5618 <bb 5> [local count: 342865295]:
5620 <bb 6> [local count: 1073741824]:
5622 <bb 2> [local count: 1073741824]:
5623 _1 = .SPACESHIP (a_2(D), b_3(D));
5625 goto <bb 6>; [34.00%]
5627 goto <bb 3>; [66.00%]
5629 <bb 3> [local count: 708669601]:
5631 goto <bb 6>; [1.04%]
5633 goto <bb 4>; [98.96%]
5635 <bb 4> [local count: 701299439]:
5637 goto <bb 5>; [48.89%]
5639 goto <bb 6>; [51.11%]
5641 <bb 5> [local count: 342865295]:
5643 <bb 6> [local count: 1073741824]:
5644 so that the backend can emit optimal comparison and
5645 conditional jump sequence. */
5648 optimize_spaceship (gcond
*stmt
)
5650 enum tree_code code
= gimple_cond_code (stmt
);
5651 if (code
!= EQ_EXPR
&& code
!= NE_EXPR
)
5653 tree arg1
= gimple_cond_lhs (stmt
);
5654 tree arg2
= gimple_cond_rhs (stmt
);
5655 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg1
))
5656 || optab_handler (spaceship_optab
,
5657 TYPE_MODE (TREE_TYPE (arg1
))) == CODE_FOR_nothing
5658 || operand_equal_p (arg1
, arg2
, 0))
5661 basic_block bb0
= gimple_bb (stmt
), bb1
, bb2
= NULL
;
5662 edge em1
= NULL
, e1
= NULL
, e2
= NULL
;
5663 bb1
= EDGE_SUCC (bb0
, 1)->dest
;
5664 if (((EDGE_SUCC (bb0
, 0)->flags
& EDGE_TRUE_VALUE
) != 0) ^ (code
== EQ_EXPR
))
5665 bb1
= EDGE_SUCC (bb0
, 0)->dest
;
5667 gcond
*g
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (bb1
));
5669 || !single_pred_p (bb1
)
5670 || (operand_equal_p (gimple_cond_lhs (g
), arg1
, 0)
5671 ? !operand_equal_p (gimple_cond_rhs (g
), arg2
, 0)
5672 : (!operand_equal_p (gimple_cond_lhs (g
), arg2
, 0)
5673 || !operand_equal_p (gimple_cond_rhs (g
), arg1
, 0)))
5674 || !cond_only_block_p (bb1
))
5677 enum tree_code ccode
= (operand_equal_p (gimple_cond_lhs (g
), arg1
, 0)
5678 ? LT_EXPR
: GT_EXPR
);
5679 switch (gimple_cond_code (g
))
5686 ccode
= ccode
== LT_EXPR
? GT_EXPR
: LT_EXPR
;
5692 for (int i
= 0; i
< 2; ++i
)
5694 /* With NaNs, </<=/>/>= are false, so we need to look for the
5695 third comparison on the false edge from whatever non-equality
5696 comparison the second comparison is. */
5697 if (HONOR_NANS (TREE_TYPE (arg1
))
5698 && (EDGE_SUCC (bb1
, i
)->flags
& EDGE_TRUE_VALUE
) != 0)
5701 bb2
= EDGE_SUCC (bb1
, i
)->dest
;
5702 g
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (bb2
));
5704 || !single_pred_p (bb2
)
5705 || (operand_equal_p (gimple_cond_lhs (g
), arg1
, 0)
5706 ? !operand_equal_p (gimple_cond_rhs (g
), arg2
, 0)
5707 : (!operand_equal_p (gimple_cond_lhs (g
), arg2
, 0)
5708 || !operand_equal_p (gimple_cond_rhs (g
), arg1
, 0)))
5709 || !cond_only_block_p (bb2
)
5710 || EDGE_SUCC (bb2
, 0)->dest
== EDGE_SUCC (bb2
, 1)->dest
)
5713 enum tree_code ccode2
5714 = (operand_equal_p (gimple_cond_lhs (g
), arg1
, 0) ? LT_EXPR
: GT_EXPR
);
5715 switch (gimple_cond_code (g
))
5722 ccode2
= ccode2
== LT_EXPR
? GT_EXPR
: LT_EXPR
;
5727 if (HONOR_NANS (TREE_TYPE (arg1
)) && ccode
== ccode2
)
5730 if ((ccode
== LT_EXPR
)
5731 ^ ((EDGE_SUCC (bb1
, i
)->flags
& EDGE_TRUE_VALUE
) != 0))
5733 em1
= EDGE_SUCC (bb1
, 1 - i
);
5734 e1
= EDGE_SUCC (bb2
, 0);
5735 e2
= EDGE_SUCC (bb2
, 1);
5736 if ((ccode2
== LT_EXPR
) ^ ((e1
->flags
& EDGE_TRUE_VALUE
) == 0))
5741 e1
= EDGE_SUCC (bb1
, 1 - i
);
5742 em1
= EDGE_SUCC (bb2
, 0);
5743 e2
= EDGE_SUCC (bb2
, 1);
5744 if ((ccode2
!= LT_EXPR
) ^ ((em1
->flags
& EDGE_TRUE_VALUE
) == 0))
5745 std::swap (em1
, e2
);
5752 if ((ccode
== LT_EXPR
)
5753 ^ ((EDGE_SUCC (bb1
, 0)->flags
& EDGE_TRUE_VALUE
) != 0))
5755 em1
= EDGE_SUCC (bb1
, 1);
5756 e1
= EDGE_SUCC (bb1
, 0);
5757 e2
= (e1
->flags
& EDGE_TRUE_VALUE
) ? em1
: e1
;
5761 em1
= EDGE_SUCC (bb1
, 0);
5762 e1
= EDGE_SUCC (bb1
, 1);
5763 e2
= (e1
->flags
& EDGE_TRUE_VALUE
) ? em1
: e1
;
5767 gcall
*gc
= gimple_build_call_internal (IFN_SPACESHIP
, 2, arg1
, arg2
);
5768 tree lhs
= make_ssa_name (integer_type_node
);
5769 gimple_call_set_lhs (gc
, lhs
);
5770 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
5771 gsi_insert_before (&gsi
, gc
, GSI_SAME_STMT
);
5773 gimple_cond_set_lhs (stmt
, lhs
);
5774 gimple_cond_set_rhs (stmt
, integer_zero_node
);
5777 gcond
*cond
= as_a
<gcond
*> (*gsi_last_bb (bb1
));
5778 gimple_cond_set_lhs (cond
, lhs
);
5779 if (em1
->src
== bb1
&& e2
!= em1
)
5781 gimple_cond_set_rhs (cond
, integer_minus_one_node
);
5782 gimple_cond_set_code (cond
, (em1
->flags
& EDGE_TRUE_VALUE
)
5783 ? EQ_EXPR
: NE_EXPR
);
5787 gcc_assert (e1
->src
== bb1
&& e2
!= e1
);
5788 gimple_cond_set_rhs (cond
, integer_one_node
);
5789 gimple_cond_set_code (cond
, (e1
->flags
& EDGE_TRUE_VALUE
)
5790 ? EQ_EXPR
: NE_EXPR
);
5794 if (e2
!= e1
&& e2
!= em1
)
5796 cond
= as_a
<gcond
*> (*gsi_last_bb (bb2
));
5797 gimple_cond_set_lhs (cond
, lhs
);
5798 if (em1
->src
== bb2
)
5799 gimple_cond_set_rhs (cond
, integer_minus_one_node
);
5802 gcc_assert (e1
->src
== bb2
);
5803 gimple_cond_set_rhs (cond
, integer_one_node
);
5805 gimple_cond_set_code (cond
,
5806 (e2
->flags
& EDGE_TRUE_VALUE
) ? NE_EXPR
: EQ_EXPR
);
5810 wide_int wm1
= wi::minus_one (TYPE_PRECISION (integer_type_node
));
5811 wide_int w2
= wi::two (TYPE_PRECISION (integer_type_node
));
5812 value_range
vr (TREE_TYPE (lhs
), wm1
, w2
);
5813 set_range_info (lhs
, vr
);
5817 /* Find integer multiplications where the operands are extended from
5818 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
5819 or MULT_HIGHPART_EXPR where appropriate. */
5823 const pass_data pass_data_optimize_widening_mul
=
5825 GIMPLE_PASS
, /* type */
5826 "widening_mul", /* name */
5827 OPTGROUP_NONE
, /* optinfo_flags */
5828 TV_TREE_WIDEN_MUL
, /* tv_id */
5829 PROP_ssa
, /* properties_required */
5830 0, /* properties_provided */
5831 0, /* properties_destroyed */
5832 0, /* todo_flags_start */
5833 TODO_update_ssa
, /* todo_flags_finish */
5836 class pass_optimize_widening_mul
: public gimple_opt_pass
5839 pass_optimize_widening_mul (gcc::context
*ctxt
)
5840 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
5843 /* opt_pass methods: */
5844 bool gate (function
*) final override
5846 return flag_expensive_optimizations
&& optimize
;
5849 unsigned int execute (function
*) final override
;
5851 }; // class pass_optimize_widening_mul
5853 /* Walker class to perform the transformation in reverse dominance order. */
5855 class math_opts_dom_walker
: public dom_walker
5858 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
5859 if walking modidifes the CFG. */
5861 math_opts_dom_walker (bool *cfg_changed_p
)
5862 : dom_walker (CDI_DOMINATORS
), m_last_result_set (),
5863 m_cfg_changed_p (cfg_changed_p
) {}
5865 /* The actual actions performed in the walk. */
5867 void after_dom_children (basic_block
) final override
;
5869 /* Set of results of chains of multiply and add statement combinations that
5870 were not transformed into FMAs because of active deferring. */
5871 hash_set
<tree
> m_last_result_set
;
5873 /* Pointer to a flag of the user that needs to be set if CFG has been
5875 bool *m_cfg_changed_p
;
5879 math_opts_dom_walker::after_dom_children (basic_block bb
)
5881 gimple_stmt_iterator gsi
;
5883 fma_deferring_state
fma_state (param_avoid_fma_max_bits
> 0);
5885 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
5887 gimple
*stmt
= gsi_stmt (gsi
);
5888 enum tree_code code
;
5890 if (is_gimple_assign (stmt
))
5892 code
= gimple_assign_rhs_code (stmt
);
5896 if (!convert_mult_to_widen (stmt
, &gsi
)
5897 && !convert_expand_mult_copysign (stmt
, &gsi
)
5898 && convert_mult_to_fma (stmt
,
5899 gimple_assign_rhs1 (stmt
),
5900 gimple_assign_rhs2 (stmt
),
5903 gsi_remove (&gsi
, true);
5904 release_defs (stmt
);
5907 match_arith_overflow (&gsi
, stmt
, code
, m_cfg_changed_p
);
5912 if (!convert_plusminus_to_widen (&gsi
, stmt
, code
))
5914 match_arith_overflow (&gsi
, stmt
, code
, m_cfg_changed_p
);
5915 if (gsi_stmt (gsi
) == stmt
)
5916 match_uaddc_usubc (&gsi
, stmt
, code
);
5921 if (match_arith_overflow (&gsi
, stmt
, code
, m_cfg_changed_p
))
5925 case TRUNC_MOD_EXPR
:
5926 convert_to_divmod (as_a
<gassign
*> (stmt
));
5930 convert_mult_to_highpart (as_a
<gassign
*> (stmt
), &gsi
);
5935 match_uaddc_usubc (&gsi
, stmt
, code
);
5940 match_single_bit_test (&gsi
, stmt
);
5946 else if (is_gimple_call (stmt
))
5948 switch (gimple_call_combined_fn (stmt
))
5951 if (gimple_call_lhs (stmt
)
5952 && TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
5953 && real_equal (&TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
5955 && convert_mult_to_fma (stmt
,
5956 gimple_call_arg (stmt
, 0),
5957 gimple_call_arg (stmt
, 0),
5960 unlink_stmt_vdef (stmt
);
5961 if (gsi_remove (&gsi
, true)
5962 && gimple_purge_dead_eh_edges (bb
))
5963 *m_cfg_changed_p
= true;
5964 release_defs (stmt
);
5970 if (convert_mult_to_fma (stmt
,
5971 gimple_call_arg (stmt
, 1),
5972 gimple_call_arg (stmt
, 2),
5974 gimple_call_arg (stmt
, 0)))
5977 gsi_remove (&gsi
, true);
5978 release_defs (stmt
);
5983 case CFN_COND_LEN_MUL
:
5984 if (convert_mult_to_fma (stmt
,
5985 gimple_call_arg (stmt
, 1),
5986 gimple_call_arg (stmt
, 2),
5988 gimple_call_arg (stmt
, 0),
5989 gimple_call_arg (stmt
, 4),
5990 gimple_call_arg (stmt
, 5)))
5993 gsi_remove (&gsi
, true);
5994 release_defs (stmt
);
6000 cancel_fma_deferring (&fma_state
);
6007 else if (gimple_code (stmt
) == GIMPLE_COND
)
6009 match_single_bit_test (&gsi
, stmt
);
6010 optimize_spaceship (as_a
<gcond
*> (stmt
));
6014 if (fma_state
.m_deferring_p
6015 && fma_state
.m_initial_phi
)
6017 gcc_checking_assert (fma_state
.m_last_result
);
6018 if (!last_fma_candidate_feeds_initial_phi (&fma_state
,
6019 &m_last_result_set
))
6020 cancel_fma_deferring (&fma_state
);
6022 m_last_result_set
.add (fma_state
.m_last_result
);
6028 pass_optimize_widening_mul::execute (function
*fun
)
6030 bool cfg_changed
= false;
6032 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
6033 calculate_dominance_info (CDI_DOMINATORS
);
6034 renumber_gimple_stmt_uids (cfun
);
6036 math_opts_dom_walker (&cfg_changed
).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
6038 statistics_counter_event (fun
, "widening multiplications inserted",
6039 widen_mul_stats
.widen_mults_inserted
);
6040 statistics_counter_event (fun
, "widening maccs inserted",
6041 widen_mul_stats
.maccs_inserted
);
6042 statistics_counter_event (fun
, "fused multiply-adds inserted",
6043 widen_mul_stats
.fmas_inserted
);
6044 statistics_counter_event (fun
, "divmod calls inserted",
6045 widen_mul_stats
.divmod_calls_inserted
);
6046 statistics_counter_event (fun
, "highpart multiplications inserted",
6047 widen_mul_stats
.highpart_mults_inserted
);
6049 return cfg_changed
? TODO_cleanup_cfg
: 0;
6055 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
6057 return new pass_optimize_widening_mul (ctxt
);