1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Currently, the only mini-pass in this file tries to CSE reciprocal
22 operations. These are common in sequences such as this one:
24 modulus = sqrt(x*x + y*y + z*z);
29 that can be optimized to
31 modulus = sqrt(x*x + y*y + z*z);
32 rmodulus = 1.0 / modulus;
37 We do this for loop invariant divisors, and with this pass whenever
38 we notice that a division has the same divisor multiple times.
40 Of course, like in PRE, we don't insert a division if a dominator
41 already has one. However, this cannot be done as an extension of
42 PRE for several reasons.
44 First of all, with some experiments it was found out that the
45 transformation is not always useful if there are only two divisions
46 hy the same divisor. This is probably because modern processors
47 can pipeline the divisions; on older, in-order processors it should
48 still be effective to optimize two divisions by the same number.
49 We make this a param, and it shall be called N in the remainder of
52 Second, if trapping math is active, we have less freedom on where
53 to insert divisions: we can only do so in basic blocks that already
54 contain one. (If divisions don't trap, instead, we can insert
55 divisions elsewhere, which will be in blocks that are common dominators
56 of those that have the division).
58 We really don't want to compute the reciprocal unless a division will
59 be found. To do this, we won't insert the division in a basic block
60 that has less than N divisions *post-dominating* it.
62 The algorithm constructs a subset of the dominator tree, holding the
63 blocks containing the divisions and the common dominators to them,
64 and walk it twice. The first walk is in post-order, and it annotates
65 each block with the number of divisions that post-dominate it: this
66 gives information on where divisions can be inserted profitably.
67 The second walk is in pre-order, and it inserts divisions as explained
68 above, and replaces divisions by multiplications.
70 In the best case, the cost of the pass is O(n_statements). In the
71 worst-case, the cost is due to creating the dominator tree subset,
72 with a cost of O(n_basic_blocks ^ 2); however this can only happen
73 for n_statements / n_basic_blocks statements. So, the amortized cost
74 of creating the dominator tree subset is O(n_basic_blocks) and the
75 worst-case cost of the pass is O(n_statements * n_basic_blocks).
77 More practically, the cost will be small because there are few
78 divisions, and they tend to be in the same basic block, so insert_bb
79 is called very few times.
81 If we did this using domwalk.c, an efficient implementation would have
82 to work on all the variables in a single pass, because we could not
83 work on just a subset of the dominator tree, as we do now, and the
84 cost would also be something like O(n_statements * n_basic_blocks).
85 The data structures would be more complex in order to work on all the
86 variables in a single pass. */
90 #include "coretypes.h"
94 #include "tree-flow.h"
95 #include "tree-pass.h"
96 #include "alloc-pool.h"
97 #include "basic-block.h"
99 #include "gimple-pretty-print.h"
101 /* FIXME: RTL headers have to be included here for optabs. */
102 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
103 #include "expr.h" /* Because optabs.h wants sepops. */
106 /* This structure represents one basic block that either computes a
107 division, or is a common dominator for basic block that compute a
110 /* The basic block represented by this structure. */
113 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
117 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
118 was inserted in BB. */
119 gimple recip_def_stmt
;
121 /* Pointer to a list of "struct occurrence"s for blocks dominated
123 struct occurrence
*children
;
125 /* Pointer to the next "struct occurrence"s in the list of blocks
126 sharing a common dominator. */
127 struct occurrence
*next
;
129 /* The number of divisions that are in BB before compute_merit. The
130 number of divisions that are in BB or post-dominate it after
134 /* True if the basic block has a division, false if it is a common
135 dominator for basic blocks that do. If it is false and trapping
136 math is active, BB is not a candidate for inserting a reciprocal. */
137 bool bb_has_division
;
142 /* Number of 1.0/X ops inserted. */
145 /* Number of 1.0/FUNC ops inserted. */
151 /* Number of cexpi calls inserted. */
157 /* Number of hand-written 16-bit bswaps found. */
160 /* Number of hand-written 32-bit bswaps found. */
163 /* Number of hand-written 64-bit bswaps found. */
169 /* Number of widening multiplication ops inserted. */
170 int widen_mults_inserted
;
172 /* Number of integer multiply-and-accumulate ops inserted. */
175 /* Number of fp fused multiply-add ops inserted. */
179 /* The instance of "struct occurrence" representing the highest
180 interesting block in the dominator tree. */
181 static struct occurrence
*occ_head
;
183 /* Allocation pool for getting instances of "struct occurrence". */
184 static alloc_pool occ_pool
;
188 /* Allocate and return a new struct occurrence for basic block BB, and
189 whose children list is headed by CHILDREN. */
190 static struct occurrence
*
191 occ_new (basic_block bb
, struct occurrence
*children
)
193 struct occurrence
*occ
;
195 bb
->aux
= occ
= (struct occurrence
*) pool_alloc (occ_pool
);
196 memset (occ
, 0, sizeof (struct occurrence
));
199 occ
->children
= children
;
204 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
205 list of "struct occurrence"s, one per basic block, having IDOM as
206 their common dominator.
208 We try to insert NEW_OCC as deep as possible in the tree, and we also
209 insert any other block that is a common dominator for BB and one
210 block already in the tree. */
213 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
214 struct occurrence
**p_head
)
216 struct occurrence
*occ
, **p_occ
;
218 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
220 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
221 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
224 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
227 occ
->next
= new_occ
->children
;
228 new_occ
->children
= occ
;
230 /* Try the next block (it may as well be dominated by BB). */
233 else if (dom
== occ_bb
)
235 /* OCC_BB dominates BB. Tail recurse to look deeper. */
236 insert_bb (new_occ
, dom
, &occ
->children
);
240 else if (dom
!= idom
)
242 gcc_assert (!dom
->aux
);
244 /* There is a dominator between IDOM and BB, add it and make
245 two children out of NEW_OCC and OCC. First, remove OCC from
251 /* None of the previous blocks has DOM as a dominator: if we tail
252 recursed, we would reexamine them uselessly. Just switch BB with
253 DOM, and go on looking for blocks dominated by DOM. */
254 new_occ
= occ_new (dom
, new_occ
);
259 /* Nothing special, go on with the next element. */
264 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
265 new_occ
->next
= *p_head
;
269 /* Register that we found a division in BB. */
272 register_division_in (basic_block bb
)
274 struct occurrence
*occ
;
276 occ
= (struct occurrence
*) bb
->aux
;
279 occ
= occ_new (bb
, NULL
);
280 insert_bb (occ
, ENTRY_BLOCK_PTR
, &occ_head
);
283 occ
->bb_has_division
= true;
284 occ
->num_divisions
++;
288 /* Compute the number of divisions that postdominate each block in OCC and
292 compute_merit (struct occurrence
*occ
)
294 struct occurrence
*occ_child
;
295 basic_block dom
= occ
->bb
;
297 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
300 if (occ_child
->children
)
301 compute_merit (occ_child
);
304 bb
= single_noncomplex_succ (dom
);
308 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
309 occ
->num_divisions
+= occ_child
->num_divisions
;
314 /* Return whether USE_STMT is a floating-point division by DEF. */
316 is_division_by (gimple use_stmt
, tree def
)
318 return is_gimple_assign (use_stmt
)
319 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
320 && gimple_assign_rhs2 (use_stmt
) == def
321 /* Do not recognize x / x as valid division, as we are getting
322 confused later by replacing all immediate uses x in such
324 && gimple_assign_rhs1 (use_stmt
) != def
;
327 /* Walk the subset of the dominator tree rooted at OCC, setting the
328 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
329 the given basic block. The field may be left NULL, of course,
330 if it is not possible or profitable to do the optimization.
332 DEF_BSI is an iterator pointing at the statement defining DEF.
333 If RECIP_DEF is set, a dominator already has a computation that can
337 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
338 tree def
, tree recip_def
, int threshold
)
342 gimple_stmt_iterator gsi
;
343 struct occurrence
*occ_child
;
346 && (occ
->bb_has_division
|| !flag_trapping_math
)
347 && occ
->num_divisions
>= threshold
)
349 /* Make a variable with the replacement and substitute it. */
350 type
= TREE_TYPE (def
);
351 recip_def
= create_tmp_reg (type
, "reciptmp");
352 new_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, recip_def
,
353 build_one_cst (type
), def
);
355 if (occ
->bb_has_division
)
357 /* Case 1: insert before an existing division. */
358 gsi
= gsi_after_labels (occ
->bb
);
359 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
362 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
364 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
366 /* Case 2: insert right after the definition. Note that this will
367 never happen if the definition statement can throw, because in
368 that case the sole successor of the statement's basic block will
369 dominate all the uses as well. */
370 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
374 /* Case 3: insert in a basic block not containing defs/uses. */
375 gsi
= gsi_after_labels (occ
->bb
);
376 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
379 reciprocal_stats
.rdivs_inserted
++;
381 occ
->recip_def_stmt
= new_stmt
;
384 occ
->recip_def
= recip_def
;
385 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
386 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
390 /* Replace the division at USE_P with a multiplication by the reciprocal, if
394 replace_reciprocal (use_operand_p use_p
)
396 gimple use_stmt
= USE_STMT (use_p
);
397 basic_block bb
= gimple_bb (use_stmt
);
398 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
400 if (optimize_bb_for_speed_p (bb
)
401 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
403 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
404 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
405 SET_USE (use_p
, occ
->recip_def
);
406 fold_stmt_inplace (&gsi
);
407 update_stmt (use_stmt
);
412 /* Free OCC and return one more "struct occurrence" to be freed. */
414 static struct occurrence
*
415 free_bb (struct occurrence
*occ
)
417 struct occurrence
*child
, *next
;
419 /* First get the two pointers hanging off OCC. */
421 child
= occ
->children
;
423 pool_free (occ_pool
, occ
);
425 /* Now ensure that we don't recurse unless it is necessary. */
431 next
= free_bb (next
);
438 /* Look for floating-point divisions among DEF's uses, and try to
439 replace them by multiplications with the reciprocal. Add
440 as many statements computing the reciprocal as needed.
442 DEF must be a GIMPLE register of a floating-point type. */
445 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
448 imm_use_iterator use_iter
;
449 struct occurrence
*occ
;
450 int count
= 0, threshold
;
452 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
454 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
456 gimple use_stmt
= USE_STMT (use_p
);
457 if (is_division_by (use_stmt
, def
))
459 register_division_in (gimple_bb (use_stmt
));
464 /* Do the expensive part only if we can hope to optimize something. */
465 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
466 if (count
>= threshold
)
469 for (occ
= occ_head
; occ
; occ
= occ
->next
)
472 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
475 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
477 if (is_division_by (use_stmt
, def
))
479 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
480 replace_reciprocal (use_p
);
485 for (occ
= occ_head
; occ
; )
492 gate_cse_reciprocals (void)
494 return optimize
&& flag_reciprocal_math
;
497 /* Go through all the floating-point SSA_NAMEs, and call
498 execute_cse_reciprocals_1 on each of them. */
500 execute_cse_reciprocals (void)
505 occ_pool
= create_alloc_pool ("dominators for recip",
506 sizeof (struct occurrence
),
507 n_basic_blocks
/ 3 + 1);
509 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
510 calculate_dominance_info (CDI_DOMINATORS
);
511 calculate_dominance_info (CDI_POST_DOMINATORS
);
513 #ifdef ENABLE_CHECKING
515 gcc_assert (!bb
->aux
);
518 for (arg
= DECL_ARGUMENTS (cfun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
519 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
520 && is_gimple_reg (arg
))
522 tree name
= ssa_default_def (cfun
, arg
);
524 execute_cse_reciprocals_1 (NULL
, name
);
529 gimple_stmt_iterator gsi
;
533 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
535 phi
= gsi_stmt (gsi
);
536 def
= PHI_RESULT (phi
);
537 if (! virtual_operand_p (def
)
538 && FLOAT_TYPE_P (TREE_TYPE (def
)))
539 execute_cse_reciprocals_1 (NULL
, def
);
542 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
544 gimple stmt
= gsi_stmt (gsi
);
546 if (gimple_has_lhs (stmt
)
547 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
548 && FLOAT_TYPE_P (TREE_TYPE (def
))
549 && TREE_CODE (def
) == SSA_NAME
)
550 execute_cse_reciprocals_1 (&gsi
, def
);
553 if (optimize_bb_for_size_p (bb
))
556 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
557 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
559 gimple stmt
= gsi_stmt (gsi
);
562 if (is_gimple_assign (stmt
)
563 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
565 tree arg1
= gimple_assign_rhs2 (stmt
);
568 if (TREE_CODE (arg1
) != SSA_NAME
)
571 stmt1
= SSA_NAME_DEF_STMT (arg1
);
573 if (is_gimple_call (stmt1
)
574 && gimple_call_lhs (stmt1
)
575 && (fndecl
= gimple_call_fndecl (stmt1
))
576 && (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
577 || DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
))
579 enum built_in_function code
;
584 code
= DECL_FUNCTION_CODE (fndecl
);
585 md_code
= DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
;
587 fndecl
= targetm
.builtin_reciprocal (code
, md_code
, false);
591 /* Check that all uses of the SSA name are divisions,
592 otherwise replacing the defining statement will do
595 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
597 gimple stmt2
= USE_STMT (use_p
);
598 if (is_gimple_debug (stmt2
))
600 if (!is_gimple_assign (stmt2
)
601 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
602 || gimple_assign_rhs1 (stmt2
) == arg1
603 || gimple_assign_rhs2 (stmt2
) != arg1
)
612 gimple_replace_lhs (stmt1
, arg1
);
613 gimple_call_set_fndecl (stmt1
, fndecl
);
615 reciprocal_stats
.rfuncs_inserted
++;
617 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
619 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
620 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
621 fold_stmt_inplace (&gsi
);
629 statistics_counter_event (cfun
, "reciprocal divs inserted",
630 reciprocal_stats
.rdivs_inserted
);
631 statistics_counter_event (cfun
, "reciprocal functions inserted",
632 reciprocal_stats
.rfuncs_inserted
);
634 free_dominance_info (CDI_DOMINATORS
);
635 free_dominance_info (CDI_POST_DOMINATORS
);
636 free_alloc_pool (occ_pool
);
640 struct gimple_opt_pass pass_cse_reciprocals
=
645 gate_cse_reciprocals
, /* gate */
646 execute_cse_reciprocals
, /* execute */
649 0, /* static_pass_number */
651 PROP_ssa
, /* properties_required */
652 0, /* properties_provided */
653 0, /* properties_destroyed */
654 0, /* todo_flags_start */
655 TODO_update_ssa
| TODO_verify_ssa
656 | TODO_verify_stmts
/* todo_flags_finish */
660 /* Records an occurrence at statement USE_STMT in the vector of trees
661 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
662 is not yet initialized. Returns true if the occurrence was pushed on
663 the vector. Adjusts *TOP_BB to be the basic block dominating all
664 statements in the vector. */
667 maybe_record_sincos (VEC(gimple
, heap
) **stmts
,
668 basic_block
*top_bb
, gimple use_stmt
)
670 basic_block use_bb
= gimple_bb (use_stmt
);
672 && (*top_bb
== use_bb
673 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
674 VEC_safe_push (gimple
, heap
, *stmts
, use_stmt
);
676 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
678 VEC_safe_push (gimple
, heap
, *stmts
, use_stmt
);
687 /* Look for sin, cos and cexpi calls with the same argument NAME and
688 create a single call to cexpi CSEing the result in this case.
689 We first walk over all immediate uses of the argument collecting
690 statements that we can CSE in a vector and in a second pass replace
691 the statement rhs with a REALPART or IMAGPART expression on the
692 result of the cexpi call we insert before the use statement that
693 dominates all other candidates. */
696 execute_cse_sincos_1 (tree name
)
698 gimple_stmt_iterator gsi
;
699 imm_use_iterator use_iter
;
700 tree fndecl
, res
, type
;
701 gimple def_stmt
, use_stmt
, stmt
;
702 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
703 VEC(gimple
, heap
) *stmts
= NULL
;
704 basic_block top_bb
= NULL
;
706 bool cfg_changed
= false;
708 type
= TREE_TYPE (name
);
709 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
711 if (gimple_code (use_stmt
) != GIMPLE_CALL
712 || !gimple_call_lhs (use_stmt
)
713 || !(fndecl
= gimple_call_fndecl (use_stmt
))
714 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
717 switch (DECL_FUNCTION_CODE (fndecl
))
719 CASE_FLT_FN (BUILT_IN_COS
):
720 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
723 CASE_FLT_FN (BUILT_IN_SIN
):
724 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
727 CASE_FLT_FN (BUILT_IN_CEXPI
):
728 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
735 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
737 VEC_free(gimple
, heap
, stmts
);
741 /* Simply insert cexpi at the beginning of top_bb but not earlier than
742 the name def statement. */
743 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
746 stmt
= gimple_build_call (fndecl
, 1, name
);
747 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
748 gimple_call_set_lhs (stmt
, res
);
750 def_stmt
= SSA_NAME_DEF_STMT (name
);
751 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
752 && gimple_code (def_stmt
) != GIMPLE_PHI
753 && gimple_bb (def_stmt
) == top_bb
)
755 gsi
= gsi_for_stmt (def_stmt
);
756 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
760 gsi
= gsi_after_labels (top_bb
);
761 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
763 sincos_stats
.inserted
++;
765 /* And adjust the recorded old call sites. */
766 for (i
= 0; VEC_iterate(gimple
, stmts
, i
, use_stmt
); ++i
)
769 fndecl
= gimple_call_fndecl (use_stmt
);
771 switch (DECL_FUNCTION_CODE (fndecl
))
773 CASE_FLT_FN (BUILT_IN_COS
):
774 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
777 CASE_FLT_FN (BUILT_IN_SIN
):
778 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
781 CASE_FLT_FN (BUILT_IN_CEXPI
):
789 /* Replace call with a copy. */
790 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
792 gsi
= gsi_for_stmt (use_stmt
);
793 gsi_replace (&gsi
, stmt
, true);
794 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
798 VEC_free(gimple
, heap
, stmts
);
803 /* To evaluate powi(x,n), the floating point value x raised to the
804 constant integer exponent n, we use a hybrid algorithm that
805 combines the "window method" with look-up tables. For an
806 introduction to exponentiation algorithms and "addition chains",
807 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
808 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
809 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
810 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
812 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
813 multiplications to inline before calling the system library's pow
814 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
815 so this default never requires calling pow, powf or powl. */
817 #ifndef POWI_MAX_MULTS
818 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
821 /* The size of the "optimal power tree" lookup table. All
822 exponents less than this value are simply looked up in the
823 powi_table below. This threshold is also used to size the
824 cache of pseudo registers that hold intermediate results. */
825 #define POWI_TABLE_SIZE 256
827 /* The size, in bits of the window, used in the "window method"
828 exponentiation algorithm. This is equivalent to a radix of
829 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
830 #define POWI_WINDOW_SIZE 3
832 /* The following table is an efficient representation of an
833 "optimal power tree". For each value, i, the corresponding
834 value, j, in the table states than an optimal evaluation
835 sequence for calculating pow(x,i) can be found by evaluating
836 pow(x,j)*pow(x,i-j). An optimal power tree for the first
837 100 integers is given in Knuth's "Seminumerical algorithms". */
839 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
841 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
842 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
843 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
844 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
845 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
846 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
847 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
848 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
849 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
850 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
851 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
852 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
853 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
854 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
855 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
856 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
857 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
858 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
859 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
860 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
861 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
862 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
863 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
864 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
865 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
866 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
867 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
868 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
869 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
870 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
871 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
872 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
876 /* Return the number of multiplications required to calculate
877 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
878 subroutine of powi_cost. CACHE is an array indicating
879 which exponents have already been calculated. */
882 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
884 /* If we've already calculated this exponent, then this evaluation
885 doesn't require any additional multiplications. */
890 return powi_lookup_cost (n
- powi_table
[n
], cache
)
891 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
894 /* Return the number of multiplications required to calculate
895 powi(x,n) for an arbitrary x, given the exponent N. This
896 function needs to be kept in sync with powi_as_mults below. */
899 powi_cost (HOST_WIDE_INT n
)
901 bool cache
[POWI_TABLE_SIZE
];
902 unsigned HOST_WIDE_INT digit
;
903 unsigned HOST_WIDE_INT val
;
909 /* Ignore the reciprocal when calculating the cost. */
910 val
= (n
< 0) ? -n
: n
;
912 /* Initialize the exponent cache. */
913 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
918 while (val
>= POWI_TABLE_SIZE
)
922 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
923 result
+= powi_lookup_cost (digit
, cache
)
924 + POWI_WINDOW_SIZE
+ 1;
925 val
>>= POWI_WINDOW_SIZE
;
934 return result
+ powi_lookup_cost (val
, cache
);
937 /* Recursive subroutine of powi_as_mults. This function takes the
938 array, CACHE, of already calculated exponents and an exponent N and
939 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
942 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
943 HOST_WIDE_INT n
, tree
*cache
)
945 tree op0
, op1
, ssa_target
;
946 unsigned HOST_WIDE_INT digit
;
949 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
952 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
954 if (n
< POWI_TABLE_SIZE
)
956 cache
[n
] = ssa_target
;
957 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
958 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
962 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
963 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
964 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
968 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
972 mult_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, ssa_target
, op0
, op1
);
973 gimple_set_location (mult_stmt
, loc
);
974 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
979 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
980 This function needs to be kept in sync with powi_cost above. */
983 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
984 tree arg0
, HOST_WIDE_INT n
)
986 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
991 return build_real (type
, dconst1
);
993 memset (cache
, 0, sizeof (cache
));
996 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1000 /* If the original exponent was negative, reciprocate the result. */
1001 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1002 div_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, target
,
1003 build_real (type
, dconst1
),
1005 gimple_set_location (div_stmt
, loc
);
1006 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1011 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1012 location info LOC. If the arguments are appropriate, create an
1013 equivalent sequence of statements prior to GSI using an optimal
1014 number of multiplications, and return an expession holding the
1018 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1019 tree arg0
, HOST_WIDE_INT n
)
1021 /* Avoid largest negative number. */
1023 && ((n
>= -1 && n
<= 2)
1024 || (optimize_function_for_speed_p (cfun
)
1025 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1026 return powi_as_mults (gsi
, loc
, arg0
, n
);
1031 /* Build a gimple call statement that calls FN with argument ARG.
1032 Set the lhs of the call statement to a fresh SSA name. Insert the
1033 statement prior to GSI's current position, and return the fresh
1037 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1043 call_stmt
= gimple_build_call (fn
, 1, arg
);
1044 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1045 gimple_set_lhs (call_stmt
, ssa_target
);
1046 gimple_set_location (call_stmt
, loc
);
1047 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1052 /* Build a gimple binary operation with the given CODE and arguments
1053 ARG0, ARG1, assigning the result to a new SSA name for variable
1054 TARGET. Insert the statement prior to GSI's current position, and
1055 return the fresh SSA name.*/
1058 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1059 const char *name
, enum tree_code code
,
1060 tree arg0
, tree arg1
)
1062 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1063 gimple stmt
= gimple_build_assign_with_ops (code
, result
, arg0
, arg1
);
1064 gimple_set_location (stmt
, loc
);
1065 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1069 /* Build a gimple reference operation with the given CODE and argument
1070 ARG, assigning the result to a new SSA name of TYPE with NAME.
1071 Insert the statement prior to GSI's current position, and return
1072 the fresh SSA name. */
1075 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1076 const char *name
, enum tree_code code
, tree arg0
)
1078 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1079 gimple stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1080 gimple_set_location (stmt
, loc
);
1081 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1085 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1086 prior to GSI's current position, and return the fresh SSA name. */
1089 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1090 tree type
, tree val
)
1092 tree result
= make_ssa_name (type
, NULL
);
1093 gimple stmt
= gimple_build_assign_with_ops (NOP_EXPR
, result
, val
, NULL_TREE
);
1094 gimple_set_location (stmt
, loc
);
1095 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1099 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1100 with location info LOC. If possible, create an equivalent and
1101 less expensive sequence of statements prior to GSI, and return an
1102 expession holding the result. */
1105 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1106 tree arg0
, tree arg1
)
1108 REAL_VALUE_TYPE c
, cint
, dconst1_4
, dconst3_4
, dconst1_3
, dconst1_6
;
1109 REAL_VALUE_TYPE c2
, dconst3
;
1111 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, sqrt_sqrt
, result
, cbrt_x
, powi_cbrt_x
;
1112 enum machine_mode mode
;
1113 bool hw_sqrt_exists
;
1115 /* If the exponent isn't a constant, there's nothing of interest
1117 if (TREE_CODE (arg1
) != REAL_CST
)
1120 /* If the exponent is equivalent to an integer, expand to an optimal
1121 multiplication sequence when profitable. */
1122 c
= TREE_REAL_CST (arg1
);
1123 n
= real_to_integer (&c
);
1124 real_from_integer (&cint
, VOIDmode
, n
, n
< 0 ? -1 : 0, 0);
1126 if (real_identical (&c
, &cint
)
1127 && ((n
>= -1 && n
<= 2)
1128 || (flag_unsafe_math_optimizations
1129 && optimize_insn_for_speed_p ()
1130 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1131 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1133 /* Attempt various optimizations using sqrt and cbrt. */
1134 type
= TREE_TYPE (arg0
);
1135 mode
= TYPE_MODE (type
);
1136 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1138 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1139 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1142 && REAL_VALUES_EQUAL (c
, dconsthalf
)
1143 && !HONOR_SIGNED_ZEROS (mode
))
1144 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1146 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1147 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1148 so do this optimization even if -Os. Don't do this optimization
1149 if we don't have a hardware sqrt insn. */
1150 dconst1_4
= dconst1
;
1151 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1152 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1154 if (flag_unsafe_math_optimizations
1156 && REAL_VALUES_EQUAL (c
, dconst1_4
)
1160 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1163 return build_and_insert_call (gsi
, loc
, sqrtfn
, sqrt_arg0
);
1166 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1167 optimizing for space. Don't do this optimization if we don't have
1168 a hardware sqrt insn. */
1169 real_from_integer (&dconst3_4
, VOIDmode
, 3, 0, 0);
1170 SET_REAL_EXP (&dconst3_4
, REAL_EXP (&dconst3_4
) - 2);
1172 if (flag_unsafe_math_optimizations
1174 && optimize_function_for_speed_p (cfun
)
1175 && REAL_VALUES_EQUAL (c
, dconst3_4
)
1179 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1182 sqrt_sqrt
= build_and_insert_call (gsi
, loc
, sqrtfn
, sqrt_arg0
);
1184 /* sqrt(x) * sqrt(sqrt(x)) */
1185 return build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1186 sqrt_arg0
, sqrt_sqrt
);
1189 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1190 optimizations since 1./3. is not exactly representable. If x
1191 is negative and finite, the correct value of pow(x,1./3.) is
1192 a NaN with the "invalid" exception raised, because the value
1193 of 1./3. actually has an even denominator. The correct value
1194 of cbrt(x) is a negative real value. */
1195 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1196 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1198 if (flag_unsafe_math_optimizations
1200 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1201 && REAL_VALUES_EQUAL (c
, dconst1_3
))
1202 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1204 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1205 if we don't have a hardware sqrt insn. */
1206 dconst1_6
= dconst1_3
;
1207 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1209 if (flag_unsafe_math_optimizations
1212 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1213 && optimize_function_for_speed_p (cfun
)
1215 && REAL_VALUES_EQUAL (c
, dconst1_6
))
1218 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1221 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1224 /* Optimize pow(x,c), where n = 2c for some nonzero integer n, into
1226 sqrt(x) * powi(x, n/2), n > 0;
1227 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1229 Do not calculate the powi factor when n/2 = 0. */
1230 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1231 n
= real_to_integer (&c2
);
1232 real_from_integer (&cint
, VOIDmode
, n
, n
< 0 ? -1 : 0, 0);
1234 if (flag_unsafe_math_optimizations
1236 && real_identical (&c2
, &cint
))
1238 tree powi_x_ndiv2
= NULL_TREE
;
1240 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1241 possible or profitable, give up. Skip the degenerate case when
1242 n is 1 or -1, where the result is always 1. */
1243 if (absu_hwi (n
) != 1)
1245 powi_x_ndiv2
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1251 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1252 result of the optimal multiply sequence just calculated. */
1253 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1255 if (absu_hwi (n
) == 1)
1258 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1259 sqrt_arg0
, powi_x_ndiv2
);
1261 /* If n is negative, reciprocate the result. */
1263 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1264 build_real (type
, dconst1
), result
);
1268 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1270 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1271 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1273 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1274 different from pow(x, 1./3.) due to rounding and behavior with
1275 negative x, we need to constrain this transformation to unsafe
1276 math and positive x or finite math. */
1277 real_from_integer (&dconst3
, VOIDmode
, 3, 0, 0);
1278 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1279 real_round (&c2
, mode
, &c2
);
1280 n
= real_to_integer (&c2
);
1281 real_from_integer (&cint
, VOIDmode
, n
, n
< 0 ? -1 : 0, 0);
1282 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1283 real_convert (&c2
, mode
, &c2
);
1285 if (flag_unsafe_math_optimizations
1287 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1288 && real_identical (&c2
, &c
)
1289 && optimize_function_for_speed_p (cfun
)
1290 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1292 tree powi_x_ndiv3
= NULL_TREE
;
1294 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1295 possible or profitable, give up. Skip the degenerate case when
1296 abs(n) < 3, where the result is always 1. */
1297 if (absu_hwi (n
) >= 3)
1299 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1305 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1306 as that creates an unnecessary variable. Instead, just produce
1307 either cbrt(x) or cbrt(x) * cbrt(x). */
1308 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1310 if (absu_hwi (n
) % 3 == 1)
1311 powi_cbrt_x
= cbrt_x
;
1313 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1316 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1317 if (absu_hwi (n
) < 3)
1318 result
= powi_cbrt_x
;
1320 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1321 powi_x_ndiv3
, powi_cbrt_x
);
1323 /* If n is negative, reciprocate the result. */
1325 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1326 build_real (type
, dconst1
), result
);
1331 /* No optimizations succeeded. */
1335 /* ARG is the argument to a cabs builtin call in GSI with location info
1336 LOC. Create a sequence of statements prior to GSI that calculates
1337 sqrt(R*R + I*I), where R and I are the real and imaginary components
1338 of ARG, respectively. Return an expression holding the result. */
1341 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1343 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1344 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1345 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1346 enum machine_mode mode
= TYPE_MODE (type
);
1348 if (!flag_unsafe_math_optimizations
1349 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1351 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1354 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1355 REALPART_EXPR
, arg
);
1356 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1357 real_part
, real_part
);
1358 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1359 IMAGPART_EXPR
, arg
);
1360 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1361 imag_part
, imag_part
);
1362 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1363 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1368 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1369 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1370 an optimal number of multiplies, when n is a constant. */
1373 execute_cse_sincos (void)
1376 bool cfg_changed
= false;
1378 calculate_dominance_info (CDI_DOMINATORS
);
1379 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1383 gimple_stmt_iterator gsi
;
1384 bool cleanup_eh
= false;
1386 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1388 gimple stmt
= gsi_stmt (gsi
);
1391 /* Only the last stmt in a bb could throw, no need to call
1392 gimple_purge_dead_eh_edges if we change something in the middle
1393 of a basic block. */
1396 if (is_gimple_call (stmt
)
1397 && gimple_call_lhs (stmt
)
1398 && (fndecl
= gimple_call_fndecl (stmt
))
1399 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1401 tree arg
, arg0
, arg1
, result
;
1405 switch (DECL_FUNCTION_CODE (fndecl
))
1407 CASE_FLT_FN (BUILT_IN_COS
):
1408 CASE_FLT_FN (BUILT_IN_SIN
):
1409 CASE_FLT_FN (BUILT_IN_CEXPI
):
1410 /* Make sure we have either sincos or cexp. */
1411 if (!TARGET_HAS_SINCOS
&& !TARGET_C99_FUNCTIONS
)
1414 arg
= gimple_call_arg (stmt
, 0);
1415 if (TREE_CODE (arg
) == SSA_NAME
)
1416 cfg_changed
|= execute_cse_sincos_1 (arg
);
1419 CASE_FLT_FN (BUILT_IN_POW
):
1420 arg0
= gimple_call_arg (stmt
, 0);
1421 arg1
= gimple_call_arg (stmt
, 1);
1423 loc
= gimple_location (stmt
);
1424 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1428 tree lhs
= gimple_get_lhs (stmt
);
1429 gimple new_stmt
= gimple_build_assign (lhs
, result
);
1430 gimple_set_location (new_stmt
, loc
);
1431 unlink_stmt_vdef (stmt
);
1432 gsi_replace (&gsi
, new_stmt
, true);
1434 if (gimple_vdef (stmt
))
1435 release_ssa_name (gimple_vdef (stmt
));
1439 CASE_FLT_FN (BUILT_IN_POWI
):
1440 arg0
= gimple_call_arg (stmt
, 0);
1441 arg1
= gimple_call_arg (stmt
, 1);
1442 if (!host_integerp (arg1
, 0))
1445 n
= TREE_INT_CST_LOW (arg1
);
1446 loc
= gimple_location (stmt
);
1447 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1451 tree lhs
= gimple_get_lhs (stmt
);
1452 gimple new_stmt
= gimple_build_assign (lhs
, result
);
1453 gimple_set_location (new_stmt
, loc
);
1454 unlink_stmt_vdef (stmt
);
1455 gsi_replace (&gsi
, new_stmt
, true);
1457 if (gimple_vdef (stmt
))
1458 release_ssa_name (gimple_vdef (stmt
));
1462 CASE_FLT_FN (BUILT_IN_CABS
):
1463 arg0
= gimple_call_arg (stmt
, 0);
1464 loc
= gimple_location (stmt
);
1465 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1469 tree lhs
= gimple_get_lhs (stmt
);
1470 gimple new_stmt
= gimple_build_assign (lhs
, result
);
1471 gimple_set_location (new_stmt
, loc
);
1472 unlink_stmt_vdef (stmt
);
1473 gsi_replace (&gsi
, new_stmt
, true);
1475 if (gimple_vdef (stmt
))
1476 release_ssa_name (gimple_vdef (stmt
));
1485 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1488 statistics_counter_event (cfun
, "sincos statements inserted",
1489 sincos_stats
.inserted
);
1491 free_dominance_info (CDI_DOMINATORS
);
1492 return cfg_changed
? TODO_cleanup_cfg
: 0;
1496 gate_cse_sincos (void)
1498 /* We no longer require either sincos or cexp, since powi expansion
1499 piggybacks on this pass. */
1503 struct gimple_opt_pass pass_cse_sincos
=
1507 "sincos", /* name */
1508 gate_cse_sincos
, /* gate */
1509 execute_cse_sincos
, /* execute */
1512 0, /* static_pass_number */
1513 TV_NONE
, /* tv_id */
1514 PROP_ssa
, /* properties_required */
1515 0, /* properties_provided */
1516 0, /* properties_destroyed */
1517 0, /* todo_flags_start */
1518 TODO_update_ssa
| TODO_verify_ssa
1519 | TODO_verify_stmts
/* todo_flags_finish */
1523 /* A symbolic number is used to detect byte permutation and selection
1524 patterns. Therefore the field N contains an artificial number
1525 consisting of byte size markers:
1527 0 - byte has the value 0
1528 1..size - byte contains the content of the byte
1529 number indexed with that value minus one */
1531 struct symbolic_number
{
1532 unsigned HOST_WIDEST_INT n
;
1536 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1537 number N. Return false if the requested operation is not permitted
1538 on a symbolic number. */
1541 do_shift_rotate (enum tree_code code
,
1542 struct symbolic_number
*n
,
1548 /* Zero out the extra bits of N in order to avoid them being shifted
1549 into the significant bits. */
1550 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1551 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << (n
->size
* BITS_PER_UNIT
)) - 1;
1562 n
->n
= (n
->n
<< count
) | (n
->n
>> ((n
->size
* BITS_PER_UNIT
) - count
));
1565 n
->n
= (n
->n
>> count
) | (n
->n
<< ((n
->size
* BITS_PER_UNIT
) - count
));
1570 /* Zero unused bits for size. */
1571 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1572 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << (n
->size
* BITS_PER_UNIT
)) - 1;
1576 /* Perform sanity checking for the symbolic number N and the gimple
1580 verify_symbolic_number_p (struct symbolic_number
*n
, gimple stmt
)
1584 lhs_type
= gimple_expr_type (stmt
);
1586 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
1589 if (TYPE_PRECISION (lhs_type
) != n
->size
* BITS_PER_UNIT
)
1595 /* find_bswap_1 invokes itself recursively with N and tries to perform
1596 the operation given by the rhs of STMT on the result. If the
1597 operation could successfully be executed the function returns the
1598 tree expression of the source operand and NULL otherwise. */
1601 find_bswap_1 (gimple stmt
, struct symbolic_number
*n
, int limit
)
1603 enum tree_code code
;
1604 tree rhs1
, rhs2
= NULL
;
1605 gimple rhs1_stmt
, rhs2_stmt
;
1607 enum gimple_rhs_class rhs_class
;
1609 if (!limit
|| !is_gimple_assign (stmt
))
1612 rhs1
= gimple_assign_rhs1 (stmt
);
1614 if (TREE_CODE (rhs1
) != SSA_NAME
)
1617 code
= gimple_assign_rhs_code (stmt
);
1618 rhs_class
= gimple_assign_rhs_class (stmt
);
1619 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
1621 if (rhs_class
== GIMPLE_BINARY_RHS
)
1622 rhs2
= gimple_assign_rhs2 (stmt
);
1624 /* Handle unary rhs and binary rhs with integer constants as second
1627 if (rhs_class
== GIMPLE_UNARY_RHS
1628 || (rhs_class
== GIMPLE_BINARY_RHS
1629 && TREE_CODE (rhs2
) == INTEGER_CST
))
1631 if (code
!= BIT_AND_EXPR
1632 && code
!= LSHIFT_EXPR
1633 && code
!= RSHIFT_EXPR
1634 && code
!= LROTATE_EXPR
1635 && code
!= RROTATE_EXPR
1637 && code
!= CONVERT_EXPR
)
1640 source_expr1
= find_bswap_1 (rhs1_stmt
, n
, limit
- 1);
1642 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1643 to initialize the symbolic number. */
1646 /* Set up the symbolic number N by setting each byte to a
1647 value between 1 and the byte size of rhs1. The highest
1648 order byte is set to n->size and the lowest order
1650 n
->size
= TYPE_PRECISION (TREE_TYPE (rhs1
));
1651 if (n
->size
% BITS_PER_UNIT
!= 0)
1653 n
->size
/= BITS_PER_UNIT
;
1654 n
->n
= (sizeof (HOST_WIDEST_INT
) < 8 ? 0 :
1655 (unsigned HOST_WIDEST_INT
)0x08070605 << 32 | 0x04030201);
1657 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1658 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 <<
1659 (n
->size
* BITS_PER_UNIT
)) - 1;
1661 source_expr1
= rhs1
;
1669 unsigned HOST_WIDEST_INT val
= widest_int_cst_value (rhs2
);
1670 unsigned HOST_WIDEST_INT tmp
= val
;
1672 /* Only constants masking full bytes are allowed. */
1673 for (i
= 0; i
< n
->size
; i
++, tmp
>>= BITS_PER_UNIT
)
1674 if ((tmp
& 0xff) != 0 && (tmp
& 0xff) != 0xff)
1684 if (!do_shift_rotate (code
, n
, (int)TREE_INT_CST_LOW (rhs2
)))
1691 type_size
= TYPE_PRECISION (gimple_expr_type (stmt
));
1692 if (type_size
% BITS_PER_UNIT
!= 0)
1695 if (type_size
/ BITS_PER_UNIT
< (int)(sizeof (HOST_WIDEST_INT
)))
1697 /* If STMT casts to a smaller type mask out the bits not
1698 belonging to the target type. */
1699 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << type_size
) - 1;
1701 n
->size
= type_size
/ BITS_PER_UNIT
;
1707 return verify_symbolic_number_p (n
, stmt
) ? source_expr1
: NULL
;
1710 /* Handle binary rhs. */
1712 if (rhs_class
== GIMPLE_BINARY_RHS
)
1714 struct symbolic_number n1
, n2
;
1717 if (code
!= BIT_IOR_EXPR
)
1720 if (TREE_CODE (rhs2
) != SSA_NAME
)
1723 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
1728 source_expr1
= find_bswap_1 (rhs1_stmt
, &n1
, limit
- 1);
1733 source_expr2
= find_bswap_1 (rhs2_stmt
, &n2
, limit
- 1);
1735 if (source_expr1
!= source_expr2
1736 || n1
.size
!= n2
.size
)
1742 if (!verify_symbolic_number_p (n
, stmt
))
1749 return source_expr1
;
1754 /* Check if STMT completes a bswap implementation consisting of ORs,
1755 SHIFTs and ANDs. Return the source tree expression on which the
1756 byte swap is performed and NULL if no bswap was found. */
1759 find_bswap (gimple stmt
)
1761 /* The number which the find_bswap result should match in order to
1762 have a full byte swap. The number is shifted to the left according
1763 to the size of the symbolic number before using it. */
1764 unsigned HOST_WIDEST_INT cmp
=
1765 sizeof (HOST_WIDEST_INT
) < 8 ? 0 :
1766 (unsigned HOST_WIDEST_INT
)0x01020304 << 32 | 0x05060708;
1768 struct symbolic_number n
;
1772 /* The last parameter determines the depth search limit. It usually
1773 correlates directly to the number of bytes to be touched. We
1774 increase that number by three here in order to also
1775 cover signed -> unsigned converions of the src operand as can be seen
1776 in libgcc, and for initial shift/and operation of the src operand. */
1777 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
1778 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
1779 source_expr
= find_bswap_1 (stmt
, &n
, limit
);
1784 /* Zero out the extra bits of N and CMP. */
1785 if (n
.size
< (int)sizeof (HOST_WIDEST_INT
))
1787 unsigned HOST_WIDEST_INT mask
=
1788 ((unsigned HOST_WIDEST_INT
)1 << (n
.size
* BITS_PER_UNIT
)) - 1;
1791 cmp
>>= (sizeof (HOST_WIDEST_INT
) - n
.size
) * BITS_PER_UNIT
;
1794 /* A complete byte swap should make the symbolic number to start
1795 with the largest digit in the highest order byte. */
1802 /* Find manual byte swap implementations and turn them into a bswap
1803 builtin invokation. */
1806 execute_optimize_bswap (void)
1809 bool bswap16_p
, bswap32_p
, bswap64_p
;
1810 bool changed
= false;
1811 tree bswap16_type
= NULL_TREE
, bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1813 if (BITS_PER_UNIT
!= 8)
1816 if (sizeof (HOST_WIDEST_INT
) < 8)
1819 bswap16_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP16
)
1820 && optab_handler (bswap_optab
, HImode
) != CODE_FOR_nothing
);
1821 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1822 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1823 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1824 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1825 || (bswap32_p
&& word_mode
== SImode
)));
1827 if (!bswap16_p
&& !bswap32_p
&& !bswap64_p
)
1830 /* Determine the argument type of the builtins. The code later on
1831 assumes that the return and argument type are the same. */
1834 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP16
);
1835 bswap16_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1840 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1841 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1846 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1847 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1850 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1854 gimple_stmt_iterator gsi
;
1856 /* We do a reverse scan for bswap patterns to make sure we get the
1857 widest match. As bswap pattern matching doesn't handle
1858 previously inserted smaller bswap replacements as sub-
1859 patterns, the wider variant wouldn't be detected. */
1860 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
1862 gimple stmt
= gsi_stmt (gsi
);
1863 tree bswap_src
, bswap_type
;
1865 tree fndecl
= NULL_TREE
;
1869 if (!is_gimple_assign (stmt
)
1870 || gimple_assign_rhs_code (stmt
) != BIT_IOR_EXPR
)
1873 type_size
= TYPE_PRECISION (gimple_expr_type (stmt
));
1880 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP16
);
1881 bswap_type
= bswap16_type
;
1887 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1888 bswap_type
= bswap32_type
;
1894 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1895 bswap_type
= bswap64_type
;
1905 bswap_src
= find_bswap (stmt
);
1911 if (type_size
== 16)
1912 bswap_stats
.found_16bit
++;
1913 else if (type_size
== 32)
1914 bswap_stats
.found_32bit
++;
1916 bswap_stats
.found_64bit
++;
1918 bswap_tmp
= bswap_src
;
1920 /* Convert the src expression if necessary. */
1921 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp
), bswap_type
))
1923 gimple convert_stmt
;
1924 bswap_tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1925 convert_stmt
= gimple_build_assign_with_ops
1926 (NOP_EXPR
, bswap_tmp
, bswap_src
, NULL
);
1927 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1930 call
= gimple_build_call (fndecl
, 1, bswap_tmp
);
1932 bswap_tmp
= gimple_assign_lhs (stmt
);
1934 /* Convert the result if necessary. */
1935 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp
), bswap_type
))
1937 gimple convert_stmt
;
1938 bswap_tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1939 convert_stmt
= gimple_build_assign_with_ops
1940 (NOP_EXPR
, gimple_assign_lhs (stmt
), bswap_tmp
, NULL
);
1941 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1944 gimple_call_set_lhs (call
, bswap_tmp
);
1948 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1950 print_gimple_stmt (dump_file
, stmt
, 0, 0);
1953 gsi_insert_after (&gsi
, call
, GSI_SAME_STMT
);
1954 gsi_remove (&gsi
, true);
1958 statistics_counter_event (cfun
, "16-bit bswap implementations found",
1959 bswap_stats
.found_16bit
);
1960 statistics_counter_event (cfun
, "32-bit bswap implementations found",
1961 bswap_stats
.found_32bit
);
1962 statistics_counter_event (cfun
, "64-bit bswap implementations found",
1963 bswap_stats
.found_64bit
);
1965 return (changed
? TODO_update_ssa
| TODO_verify_ssa
1966 | TODO_verify_stmts
: 0);
1970 gate_optimize_bswap (void)
1972 return flag_expensive_optimizations
&& optimize
;
1975 struct gimple_opt_pass pass_optimize_bswap
=
1980 gate_optimize_bswap
, /* gate */
1981 execute_optimize_bswap
, /* execute */
1984 0, /* static_pass_number */
1985 TV_NONE
, /* tv_id */
1986 PROP_ssa
, /* properties_required */
1987 0, /* properties_provided */
1988 0, /* properties_destroyed */
1989 0, /* todo_flags_start */
1990 0 /* todo_flags_finish */
1994 /* Return true if stmt is a type conversion operation that can be stripped
1995 when used in a widening multiply operation. */
1997 widening_mult_conversion_strippable_p (tree result_type
, gimple stmt
)
1999 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2001 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2006 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2009 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2011 /* If the type of OP has the same precision as the result, then
2012 we can strip this conversion. The multiply operation will be
2013 selected to create the correct extension as a by-product. */
2014 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2017 /* We can also strip a conversion if it preserves the signed-ness of
2018 the operation and doesn't narrow the range. */
2019 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2021 /* If the inner-most type is unsigned, then we can strip any
2022 intermediate widening operation. If it's signed, then the
2023 intermediate widening operation must also be signed. */
2024 if ((TYPE_UNSIGNED (inner_op_type
)
2025 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2026 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2032 return rhs_code
== FIXED_CONVERT_EXPR
;
2035 /* Return true if RHS is a suitable operand for a widening multiplication,
2036 assuming a target type of TYPE.
2037 There are two cases:
2039 - RHS makes some value at least twice as wide. Store that value
2040 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2042 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2043 but leave *TYPE_OUT untouched. */
2046 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2052 if (TREE_CODE (rhs
) == SSA_NAME
)
2054 stmt
= SSA_NAME_DEF_STMT (rhs
);
2055 if (is_gimple_assign (stmt
))
2057 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2061 rhs1
= gimple_assign_rhs1 (stmt
);
2063 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2065 *new_rhs_out
= rhs1
;
2074 type1
= TREE_TYPE (rhs1
);
2076 if (TREE_CODE (type1
) != TREE_CODE (type
)
2077 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2080 *new_rhs_out
= rhs1
;
2085 if (TREE_CODE (rhs
) == INTEGER_CST
)
2095 /* Return true if STMT performs a widening multiplication, assuming the
2096 output type is TYPE. If so, store the unwidened types of the operands
2097 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2098 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2099 and *TYPE2_OUT would give the operands of the multiplication. */
2102 is_widening_mult_p (gimple stmt
,
2103 tree
*type1_out
, tree
*rhs1_out
,
2104 tree
*type2_out
, tree
*rhs2_out
)
2106 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2108 if (TREE_CODE (type
) != INTEGER_TYPE
2109 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2112 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2116 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2120 if (*type1_out
== NULL
)
2122 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2124 *type1_out
= *type2_out
;
2127 if (*type2_out
== NULL
)
2129 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2131 *type2_out
= *type1_out
;
2134 /* Ensure that the larger of the two operands comes first. */
2135 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2139 *type1_out
= *type2_out
;
2142 *rhs1_out
= *rhs2_out
;
2149 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2150 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2151 value is true iff we converted the statement. */
2154 convert_mult_to_widen (gimple stmt
, gimple_stmt_iterator
*gsi
)
2156 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2157 enum insn_code handler
;
2158 enum machine_mode to_mode
, from_mode
, actual_mode
;
2160 int actual_precision
;
2161 location_t loc
= gimple_location (stmt
);
2162 bool from_unsigned1
, from_unsigned2
;
2164 lhs
= gimple_assign_lhs (stmt
);
2165 type
= TREE_TYPE (lhs
);
2166 if (TREE_CODE (type
) != INTEGER_TYPE
)
2169 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2172 to_mode
= TYPE_MODE (type
);
2173 from_mode
= TYPE_MODE (type1
);
2174 from_unsigned1
= TYPE_UNSIGNED (type1
);
2175 from_unsigned2
= TYPE_UNSIGNED (type2
);
2177 if (from_unsigned1
&& from_unsigned2
)
2178 op
= umul_widen_optab
;
2179 else if (!from_unsigned1
&& !from_unsigned2
)
2180 op
= smul_widen_optab
;
2182 op
= usmul_widen_optab
;
2184 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2187 if (handler
== CODE_FOR_nothing
)
2189 if (op
!= smul_widen_optab
)
2191 /* We can use a signed multiply with unsigned types as long as
2192 there is a wider mode to use, or it is the smaller of the two
2193 types that is unsigned. Note that type1 >= type2, always. */
2194 if ((TYPE_UNSIGNED (type1
)
2195 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2196 || (TYPE_UNSIGNED (type2
)
2197 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2199 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
2200 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2204 op
= smul_widen_optab
;
2205 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2209 if (handler
== CODE_FOR_nothing
)
2212 from_unsigned1
= from_unsigned2
= false;
2218 /* Ensure that the inputs to the handler are in the correct precison
2219 for the opcode. This will be the full mode size. */
2220 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2221 if (2 * actual_precision
> TYPE_PRECISION (type
))
2223 if (actual_precision
!= TYPE_PRECISION (type1
)
2224 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2225 rhs1
= build_and_insert_cast (gsi
, loc
,
2226 build_nonstandard_integer_type
2227 (actual_precision
, from_unsigned1
), rhs1
);
2228 if (actual_precision
!= TYPE_PRECISION (type2
)
2229 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2230 rhs2
= build_and_insert_cast (gsi
, loc
,
2231 build_nonstandard_integer_type
2232 (actual_precision
, from_unsigned2
), rhs2
);
2234 /* Handle constants. */
2235 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2236 rhs1
= fold_convert (type1
, rhs1
);
2237 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2238 rhs2
= fold_convert (type2
, rhs2
);
2240 gimple_assign_set_rhs1 (stmt
, rhs1
);
2241 gimple_assign_set_rhs2 (stmt
, rhs2
);
2242 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2244 widen_mul_stats
.widen_mults_inserted
++;
2248 /* Process a single gimple statement STMT, which is found at the
2249 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2250 rhs (given by CODE), and try to convert it into a
2251 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2252 is true iff we converted the statement. */
2255 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple stmt
,
2256 enum tree_code code
)
2258 gimple rhs1_stmt
= NULL
, rhs2_stmt
= NULL
;
2259 gimple conv1_stmt
= NULL
, conv2_stmt
= NULL
, conv_stmt
;
2260 tree type
, type1
, type2
, optype
;
2261 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2262 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2264 enum tree_code wmult_code
;
2265 enum insn_code handler
;
2266 enum machine_mode to_mode
, from_mode
, actual_mode
;
2267 location_t loc
= gimple_location (stmt
);
2268 int actual_precision
;
2269 bool from_unsigned1
, from_unsigned2
;
2271 lhs
= gimple_assign_lhs (stmt
);
2272 type
= TREE_TYPE (lhs
);
2273 if (TREE_CODE (type
) != INTEGER_TYPE
2274 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2277 if (code
== MINUS_EXPR
)
2278 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2280 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2282 rhs1
= gimple_assign_rhs1 (stmt
);
2283 rhs2
= gimple_assign_rhs2 (stmt
);
2285 if (TREE_CODE (rhs1
) == SSA_NAME
)
2287 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2288 if (is_gimple_assign (rhs1_stmt
))
2289 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2292 if (TREE_CODE (rhs2
) == SSA_NAME
)
2294 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2295 if (is_gimple_assign (rhs2_stmt
))
2296 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2299 /* Allow for one conversion statement between the multiply
2300 and addition/subtraction statement. If there are more than
2301 one conversions then we assume they would invalidate this
2302 transformation. If that's not the case then they should have
2303 been folded before now. */
2304 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2306 conv1_stmt
= rhs1_stmt
;
2307 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2308 if (TREE_CODE (rhs1
) == SSA_NAME
)
2310 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2311 if (is_gimple_assign (rhs1_stmt
))
2312 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2317 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2319 conv2_stmt
= rhs2_stmt
;
2320 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2321 if (TREE_CODE (rhs2
) == SSA_NAME
)
2323 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2324 if (is_gimple_assign (rhs2_stmt
))
2325 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2331 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2332 is_widening_mult_p, but we still need the rhs returns.
2334 It might also appear that it would be sufficient to use the existing
2335 operands of the widening multiply, but that would limit the choice of
2336 multiply-and-accumulate instructions. */
2337 if (code
== PLUS_EXPR
2338 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2340 if (!is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2341 &type2
, &mult_rhs2
))
2344 conv_stmt
= conv1_stmt
;
2346 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
2348 if (!is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
2349 &type2
, &mult_rhs2
))
2352 conv_stmt
= conv2_stmt
;
2357 to_mode
= TYPE_MODE (type
);
2358 from_mode
= TYPE_MODE (type1
);
2359 from_unsigned1
= TYPE_UNSIGNED (type1
);
2360 from_unsigned2
= TYPE_UNSIGNED (type2
);
2363 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2364 if (from_unsigned1
!= from_unsigned2
)
2366 if (!INTEGRAL_TYPE_P (type
))
2368 /* We can use a signed multiply with unsigned types as long as
2369 there is a wider mode to use, or it is the smaller of the two
2370 types that is unsigned. Note that type1 >= type2, always. */
2372 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2374 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2376 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
2377 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
2381 from_unsigned1
= from_unsigned2
= false;
2382 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
2386 /* If there was a conversion between the multiply and addition
2387 then we need to make sure it fits a multiply-and-accumulate.
2388 The should be a single mode change which does not change the
2392 /* We use the original, unmodified data types for this. */
2393 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
2394 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
2395 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
2396 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
2398 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
2400 /* Conversion is a truncate. */
2401 if (TYPE_PRECISION (to_type
) < data_size
)
2404 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
2406 /* Conversion is an extend. Check it's the right sort. */
2407 if (TYPE_UNSIGNED (from_type
) != is_unsigned
2408 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
2411 /* else convert is a no-op for our purposes. */
2414 /* Verify that the machine can perform a widening multiply
2415 accumulate in this mode/signedness combination, otherwise
2416 this transformation is likely to pessimize code. */
2417 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
2418 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
2419 from_mode
, 0, &actual_mode
);
2421 if (handler
== CODE_FOR_nothing
)
2424 /* Ensure that the inputs to the handler are in the correct precison
2425 for the opcode. This will be the full mode size. */
2426 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2427 if (actual_precision
!= TYPE_PRECISION (type1
)
2428 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2429 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
2430 build_nonstandard_integer_type
2431 (actual_precision
, from_unsigned1
),
2433 if (actual_precision
!= TYPE_PRECISION (type2
)
2434 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2435 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
2436 build_nonstandard_integer_type
2437 (actual_precision
, from_unsigned2
),
2440 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
2441 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
2443 /* Handle constants. */
2444 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
2445 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
2446 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
2447 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
2449 gimple_assign_set_rhs_with_ops_1 (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
2451 update_stmt (gsi_stmt (*gsi
));
2452 widen_mul_stats
.maccs_inserted
++;
2456 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2457 with uses in additions and subtractions to form fused multiply-add
2458 operations. Returns true if successful and MUL_STMT should be removed. */
2461 convert_mult_to_fma (gimple mul_stmt
, tree op1
, tree op2
)
2463 tree mul_result
= gimple_get_lhs (mul_stmt
);
2464 tree type
= TREE_TYPE (mul_result
);
2465 gimple use_stmt
, neguse_stmt
, fma_stmt
;
2466 use_operand_p use_p
;
2467 imm_use_iterator imm_iter
;
2469 if (FLOAT_TYPE_P (type
)
2470 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
2473 /* We don't want to do bitfield reduction ops. */
2474 if (INTEGRAL_TYPE_P (type
)
2475 && (TYPE_PRECISION (type
)
2476 != GET_MODE_PRECISION (TYPE_MODE (type
))))
2479 /* If the target doesn't support it, don't generate it. We assume that
2480 if fma isn't available then fms, fnma or fnms are not either. */
2481 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
2484 /* If the multiplication has zero uses, it is kept around probably because
2485 of -fnon-call-exceptions. Don't optimize it away in that case,
2487 if (has_zero_uses (mul_result
))
2490 /* Make sure that the multiplication statement becomes dead after
2491 the transformation, thus that all uses are transformed to FMAs.
2492 This means we assume that an FMA operation has the same cost
2494 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
2496 enum tree_code use_code
;
2497 tree result
= mul_result
;
2498 bool negate_p
= false;
2500 use_stmt
= USE_STMT (use_p
);
2502 if (is_gimple_debug (use_stmt
))
2505 /* For now restrict this operations to single basic blocks. In theory
2506 we would want to support sinking the multiplication in
2512 to form a fma in the then block and sink the multiplication to the
2514 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2517 if (!is_gimple_assign (use_stmt
))
2520 use_code
= gimple_assign_rhs_code (use_stmt
);
2522 /* A negate on the multiplication leads to FNMA. */
2523 if (use_code
== NEGATE_EXPR
)
2528 result
= gimple_assign_lhs (use_stmt
);
2530 /* Make sure the negate statement becomes dead with this
2531 single transformation. */
2532 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
2533 &use_p
, &neguse_stmt
))
2536 /* Make sure the multiplication isn't also used on that stmt. */
2537 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
2538 if (USE_FROM_PTR (usep
) == mul_result
)
2542 use_stmt
= neguse_stmt
;
2543 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2545 if (!is_gimple_assign (use_stmt
))
2548 use_code
= gimple_assign_rhs_code (use_stmt
);
2555 if (gimple_assign_rhs2 (use_stmt
) == result
)
2556 negate_p
= !negate_p
;
2561 /* FMA can only be formed from PLUS and MINUS. */
2565 /* We can't handle a * b + a * b. */
2566 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
2569 /* While it is possible to validate whether or not the exact form
2570 that we've recognized is available in the backend, the assumption
2571 is that the transformation is never a loss. For instance, suppose
2572 the target only has the plain FMA pattern available. Consider
2573 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2574 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2575 still have 3 operations, but in the FMA form the two NEGs are
2576 independent and could be run in parallel. */
2579 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
2581 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2582 enum tree_code use_code
;
2583 tree addop
, mulop1
= op1
, result
= mul_result
;
2584 bool negate_p
= false;
2586 if (is_gimple_debug (use_stmt
))
2589 use_code
= gimple_assign_rhs_code (use_stmt
);
2590 if (use_code
== NEGATE_EXPR
)
2592 result
= gimple_assign_lhs (use_stmt
);
2593 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
2594 gsi_remove (&gsi
, true);
2595 release_defs (use_stmt
);
2597 use_stmt
= neguse_stmt
;
2598 gsi
= gsi_for_stmt (use_stmt
);
2599 use_code
= gimple_assign_rhs_code (use_stmt
);
2603 if (gimple_assign_rhs1 (use_stmt
) == result
)
2605 addop
= gimple_assign_rhs2 (use_stmt
);
2606 /* a * b - c -> a * b + (-c) */
2607 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2608 addop
= force_gimple_operand_gsi (&gsi
,
2609 build1 (NEGATE_EXPR
,
2611 true, NULL_TREE
, true,
2616 addop
= gimple_assign_rhs1 (use_stmt
);
2617 /* a - b * c -> (-b) * c + a */
2618 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2619 negate_p
= !negate_p
;
2623 mulop1
= force_gimple_operand_gsi (&gsi
,
2624 build1 (NEGATE_EXPR
,
2626 true, NULL_TREE
, true,
2629 fma_stmt
= gimple_build_assign_with_ops (FMA_EXPR
,
2630 gimple_assign_lhs (use_stmt
),
2633 gsi_replace (&gsi
, fma_stmt
, true);
2634 widen_mul_stats
.fmas_inserted
++;
2640 /* Find integer multiplications where the operands are extended from
2641 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2642 where appropriate. */
2645 execute_optimize_widening_mul (void)
2648 bool cfg_changed
= false;
2650 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
2654 gimple_stmt_iterator gsi
;
2656 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
2658 gimple stmt
= gsi_stmt (gsi
);
2659 enum tree_code code
;
2661 if (is_gimple_assign (stmt
))
2663 code
= gimple_assign_rhs_code (stmt
);
2667 if (!convert_mult_to_widen (stmt
, &gsi
)
2668 && convert_mult_to_fma (stmt
,
2669 gimple_assign_rhs1 (stmt
),
2670 gimple_assign_rhs2 (stmt
)))
2672 gsi_remove (&gsi
, true);
2673 release_defs (stmt
);
2680 convert_plusminus_to_widen (&gsi
, stmt
, code
);
2686 else if (is_gimple_call (stmt
)
2687 && gimple_call_lhs (stmt
))
2689 tree fndecl
= gimple_call_fndecl (stmt
);
2691 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
2693 switch (DECL_FUNCTION_CODE (fndecl
))
2698 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
2699 && REAL_VALUES_EQUAL
2700 (TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
2702 && convert_mult_to_fma (stmt
,
2703 gimple_call_arg (stmt
, 0),
2704 gimple_call_arg (stmt
, 0)))
2706 unlink_stmt_vdef (stmt
);
2707 if (gsi_remove (&gsi
, true)
2708 && gimple_purge_dead_eh_edges (bb
))
2710 release_defs (stmt
);
2723 statistics_counter_event (cfun
, "widening multiplications inserted",
2724 widen_mul_stats
.widen_mults_inserted
);
2725 statistics_counter_event (cfun
, "widening maccs inserted",
2726 widen_mul_stats
.maccs_inserted
);
2727 statistics_counter_event (cfun
, "fused multiply-adds inserted",
2728 widen_mul_stats
.fmas_inserted
);
2730 return cfg_changed
? TODO_cleanup_cfg
: 0;
2734 gate_optimize_widening_mul (void)
2736 return flag_expensive_optimizations
&& optimize
;
2739 struct gimple_opt_pass pass_optimize_widening_mul
=
2743 "widening_mul", /* name */
2744 gate_optimize_widening_mul
, /* gate */
2745 execute_optimize_widening_mul
, /* execute */
2748 0, /* static_pass_number */
2749 TV_NONE
, /* tv_id */
2750 PROP_ssa
, /* properties_required */
2751 0, /* properties_provided */
2752 0, /* properties_destroyed */
2753 0, /* todo_flags_start */
2756 | TODO_update_ssa
/* todo_flags_finish */