1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
94 #include "tree-pass.h"
95 #include "alloc-pool.h"
96 #include "basic-block.h"
98 #include "gimple-pretty-print.h"
100 /* FIXME: RTL headers have to be included here for optabs. */
101 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
102 #include "expr.h" /* Because optabs.h wants sepops. */
105 /* This structure represents one basic block that either computes a
106 division, or is a common dominator for basic block that compute a
109 /* The basic block represented by this structure. */
112 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
116 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
117 was inserted in BB. */
118 gimple recip_def_stmt
;
120 /* Pointer to a list of "struct occurrence"s for blocks dominated
122 struct occurrence
*children
;
124 /* Pointer to the next "struct occurrence"s in the list of blocks
125 sharing a common dominator. */
126 struct occurrence
*next
;
128 /* The number of divisions that are in BB before compute_merit. The
129 number of divisions that are in BB or post-dominate it after
133 /* True if the basic block has a division, false if it is a common
134 dominator for basic blocks that do. If it is false and trapping
135 math is active, BB is not a candidate for inserting a reciprocal. */
136 bool bb_has_division
;
141 /* Number of 1.0/X ops inserted. */
144 /* Number of 1.0/FUNC ops inserted. */
150 /* Number of cexpi calls inserted. */
156 /* Number of hand-written 16-bit bswaps found. */
159 /* Number of hand-written 32-bit bswaps found. */
162 /* Number of hand-written 64-bit bswaps found. */
168 /* Number of widening multiplication ops inserted. */
169 int widen_mults_inserted
;
171 /* Number of integer multiply-and-accumulate ops inserted. */
174 /* Number of fp fused multiply-add ops inserted. */
178 /* The instance of "struct occurrence" representing the highest
179 interesting block in the dominator tree. */
180 static struct occurrence
*occ_head
;
182 /* Allocation pool for getting instances of "struct occurrence". */
183 static alloc_pool occ_pool
;
187 /* Allocate and return a new struct occurrence for basic block BB, and
188 whose children list is headed by CHILDREN. */
189 static struct occurrence
*
190 occ_new (basic_block bb
, struct occurrence
*children
)
192 struct occurrence
*occ
;
194 bb
->aux
= occ
= (struct occurrence
*) pool_alloc (occ_pool
);
195 memset (occ
, 0, sizeof (struct occurrence
));
198 occ
->children
= children
;
203 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
204 list of "struct occurrence"s, one per basic block, having IDOM as
205 their common dominator.
207 We try to insert NEW_OCC as deep as possible in the tree, and we also
208 insert any other block that is a common dominator for BB and one
209 block already in the tree. */
212 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
213 struct occurrence
**p_head
)
215 struct occurrence
*occ
, **p_occ
;
217 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
219 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
220 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
223 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
226 occ
->next
= new_occ
->children
;
227 new_occ
->children
= occ
;
229 /* Try the next block (it may as well be dominated by BB). */
232 else if (dom
== occ_bb
)
234 /* OCC_BB dominates BB. Tail recurse to look deeper. */
235 insert_bb (new_occ
, dom
, &occ
->children
);
239 else if (dom
!= idom
)
241 gcc_assert (!dom
->aux
);
243 /* There is a dominator between IDOM and BB, add it and make
244 two children out of NEW_OCC and OCC. First, remove OCC from
250 /* None of the previous blocks has DOM as a dominator: if we tail
251 recursed, we would reexamine them uselessly. Just switch BB with
252 DOM, and go on looking for blocks dominated by DOM. */
253 new_occ
= occ_new (dom
, new_occ
);
258 /* Nothing special, go on with the next element. */
263 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
264 new_occ
->next
= *p_head
;
268 /* Register that we found a division in BB. */
271 register_division_in (basic_block bb
)
273 struct occurrence
*occ
;
275 occ
= (struct occurrence
*) bb
->aux
;
278 occ
= occ_new (bb
, NULL
);
279 insert_bb (occ
, ENTRY_BLOCK_PTR
, &occ_head
);
282 occ
->bb_has_division
= true;
283 occ
->num_divisions
++;
287 /* Compute the number of divisions that postdominate each block in OCC and
291 compute_merit (struct occurrence
*occ
)
293 struct occurrence
*occ_child
;
294 basic_block dom
= occ
->bb
;
296 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
299 if (occ_child
->children
)
300 compute_merit (occ_child
);
303 bb
= single_noncomplex_succ (dom
);
307 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
308 occ
->num_divisions
+= occ_child
->num_divisions
;
313 /* Return whether USE_STMT is a floating-point division by DEF. */
315 is_division_by (gimple use_stmt
, tree def
)
317 return is_gimple_assign (use_stmt
)
318 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
319 && gimple_assign_rhs2 (use_stmt
) == def
320 /* Do not recognize x / x as valid division, as we are getting
321 confused later by replacing all immediate uses x in such
323 && gimple_assign_rhs1 (use_stmt
) != def
;
326 /* Walk the subset of the dominator tree rooted at OCC, setting the
327 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
328 the given basic block. The field may be left NULL, of course,
329 if it is not possible or profitable to do the optimization.
331 DEF_BSI is an iterator pointing at the statement defining DEF.
332 If RECIP_DEF is set, a dominator already has a computation that can
336 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
337 tree def
, tree recip_def
, int threshold
)
341 gimple_stmt_iterator gsi
;
342 struct occurrence
*occ_child
;
345 && (occ
->bb_has_division
|| !flag_trapping_math
)
346 && occ
->num_divisions
>= threshold
)
348 /* Make a variable with the replacement and substitute it. */
349 type
= TREE_TYPE (def
);
350 recip_def
= create_tmp_reg (type
, "reciptmp");
351 new_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, recip_def
,
352 build_one_cst (type
), def
);
354 if (occ
->bb_has_division
)
356 /* Case 1: insert before an existing division. */
357 gsi
= gsi_after_labels (occ
->bb
);
358 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
361 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
363 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
365 /* Case 2: insert right after the definition. Note that this will
366 never happen if the definition statement can throw, because in
367 that case the sole successor of the statement's basic block will
368 dominate all the uses as well. */
369 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
373 /* Case 3: insert in a basic block not containing defs/uses. */
374 gsi
= gsi_after_labels (occ
->bb
);
375 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
378 reciprocal_stats
.rdivs_inserted
++;
380 occ
->recip_def_stmt
= new_stmt
;
383 occ
->recip_def
= recip_def
;
384 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
385 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
389 /* Replace the division at USE_P with a multiplication by the reciprocal, if
393 replace_reciprocal (use_operand_p use_p
)
395 gimple use_stmt
= USE_STMT (use_p
);
396 basic_block bb
= gimple_bb (use_stmt
);
397 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
399 if (optimize_bb_for_speed_p (bb
)
400 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
402 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
403 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
404 SET_USE (use_p
, occ
->recip_def
);
405 fold_stmt_inplace (&gsi
);
406 update_stmt (use_stmt
);
411 /* Free OCC and return one more "struct occurrence" to be freed. */
413 static struct occurrence
*
414 free_bb (struct occurrence
*occ
)
416 struct occurrence
*child
, *next
;
418 /* First get the two pointers hanging off OCC. */
420 child
= occ
->children
;
422 pool_free (occ_pool
, occ
);
424 /* Now ensure that we don't recurse unless it is necessary. */
430 next
= free_bb (next
);
437 /* Look for floating-point divisions among DEF's uses, and try to
438 replace them by multiplications with the reciprocal. Add
439 as many statements computing the reciprocal as needed.
441 DEF must be a GIMPLE register of a floating-point type. */
444 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
447 imm_use_iterator use_iter
;
448 struct occurrence
*occ
;
449 int count
= 0, threshold
;
451 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
453 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
455 gimple use_stmt
= USE_STMT (use_p
);
456 if (is_division_by (use_stmt
, def
))
458 register_division_in (gimple_bb (use_stmt
));
463 /* Do the expensive part only if we can hope to optimize something. */
464 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
465 if (count
>= threshold
)
468 for (occ
= occ_head
; occ
; occ
= occ
->next
)
471 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
474 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
476 if (is_division_by (use_stmt
, def
))
478 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
479 replace_reciprocal (use_p
);
484 for (occ
= occ_head
; occ
; )
491 gate_cse_reciprocals (void)
493 return optimize
&& flag_reciprocal_math
;
496 /* Go through all the floating-point SSA_NAMEs, and call
497 execute_cse_reciprocals_1 on each of them. */
499 execute_cse_reciprocals (void)
504 occ_pool
= create_alloc_pool ("dominators for recip",
505 sizeof (struct occurrence
),
506 n_basic_blocks
/ 3 + 1);
508 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
509 calculate_dominance_info (CDI_DOMINATORS
);
510 calculate_dominance_info (CDI_POST_DOMINATORS
);
512 #ifdef ENABLE_CHECKING
514 gcc_assert (!bb
->aux
);
517 for (arg
= DECL_ARGUMENTS (cfun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
518 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
519 && is_gimple_reg (arg
))
521 tree name
= ssa_default_def (cfun
, arg
);
523 execute_cse_reciprocals_1 (NULL
, name
);
528 gimple_stmt_iterator gsi
;
532 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
534 phi
= gsi_stmt (gsi
);
535 def
= PHI_RESULT (phi
);
536 if (! virtual_operand_p (def
)
537 && FLOAT_TYPE_P (TREE_TYPE (def
)))
538 execute_cse_reciprocals_1 (NULL
, def
);
541 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
543 gimple stmt
= gsi_stmt (gsi
);
545 if (gimple_has_lhs (stmt
)
546 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
547 && FLOAT_TYPE_P (TREE_TYPE (def
))
548 && TREE_CODE (def
) == SSA_NAME
)
549 execute_cse_reciprocals_1 (&gsi
, def
);
552 if (optimize_bb_for_size_p (bb
))
555 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
556 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
558 gimple stmt
= gsi_stmt (gsi
);
561 if (is_gimple_assign (stmt
)
562 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
564 tree arg1
= gimple_assign_rhs2 (stmt
);
567 if (TREE_CODE (arg1
) != SSA_NAME
)
570 stmt1
= SSA_NAME_DEF_STMT (arg1
);
572 if (is_gimple_call (stmt1
)
573 && gimple_call_lhs (stmt1
)
574 && (fndecl
= gimple_call_fndecl (stmt1
))
575 && (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
576 || DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
))
578 enum built_in_function code
;
583 code
= DECL_FUNCTION_CODE (fndecl
);
584 md_code
= DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
;
586 fndecl
= targetm
.builtin_reciprocal (code
, md_code
, false);
590 /* Check that all uses of the SSA name are divisions,
591 otherwise replacing the defining statement will do
594 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
596 gimple stmt2
= USE_STMT (use_p
);
597 if (is_gimple_debug (stmt2
))
599 if (!is_gimple_assign (stmt2
)
600 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
601 || gimple_assign_rhs1 (stmt2
) == arg1
602 || gimple_assign_rhs2 (stmt2
) != arg1
)
611 gimple_replace_ssa_lhs (stmt1
, arg1
);
612 gimple_call_set_fndecl (stmt1
, fndecl
);
614 reciprocal_stats
.rfuncs_inserted
++;
616 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
618 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
619 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
620 fold_stmt_inplace (&gsi
);
628 statistics_counter_event (cfun
, "reciprocal divs inserted",
629 reciprocal_stats
.rdivs_inserted
);
630 statistics_counter_event (cfun
, "reciprocal functions inserted",
631 reciprocal_stats
.rfuncs_inserted
);
633 free_dominance_info (CDI_DOMINATORS
);
634 free_dominance_info (CDI_POST_DOMINATORS
);
635 free_alloc_pool (occ_pool
);
641 const pass_data pass_data_cse_reciprocals
=
643 GIMPLE_PASS
, /* type */
645 OPTGROUP_NONE
, /* optinfo_flags */
647 true, /* has_execute */
649 PROP_ssa
, /* properties_required */
650 0, /* properties_provided */
651 0, /* properties_destroyed */
652 0, /* todo_flags_start */
653 ( TODO_update_ssa
| TODO_verify_ssa
654 | TODO_verify_stmts
), /* todo_flags_finish */
657 class pass_cse_reciprocals
: public gimple_opt_pass
660 pass_cse_reciprocals (gcc::context
*ctxt
)
661 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
664 /* opt_pass methods: */
665 bool gate () { return gate_cse_reciprocals (); }
666 unsigned int execute () { return execute_cse_reciprocals (); }
668 }; // class pass_cse_reciprocals
673 make_pass_cse_reciprocals (gcc::context
*ctxt
)
675 return new pass_cse_reciprocals (ctxt
);
678 /* Records an occurrence at statement USE_STMT in the vector of trees
679 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
680 is not yet initialized. Returns true if the occurrence was pushed on
681 the vector. Adjusts *TOP_BB to be the basic block dominating all
682 statements in the vector. */
685 maybe_record_sincos (vec
<gimple
> *stmts
,
686 basic_block
*top_bb
, gimple use_stmt
)
688 basic_block use_bb
= gimple_bb (use_stmt
);
690 && (*top_bb
== use_bb
691 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
692 stmts
->safe_push (use_stmt
);
694 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
696 stmts
->safe_push (use_stmt
);
705 /* Look for sin, cos and cexpi calls with the same argument NAME and
706 create a single call to cexpi CSEing the result in this case.
707 We first walk over all immediate uses of the argument collecting
708 statements that we can CSE in a vector and in a second pass replace
709 the statement rhs with a REALPART or IMAGPART expression on the
710 result of the cexpi call we insert before the use statement that
711 dominates all other candidates. */
714 execute_cse_sincos_1 (tree name
)
716 gimple_stmt_iterator gsi
;
717 imm_use_iterator use_iter
;
718 tree fndecl
, res
, type
;
719 gimple def_stmt
, use_stmt
, stmt
;
720 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
721 vec
<gimple
> stmts
= vNULL
;
722 basic_block top_bb
= NULL
;
724 bool cfg_changed
= false;
726 type
= TREE_TYPE (name
);
727 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
729 if (gimple_code (use_stmt
) != GIMPLE_CALL
730 || !gimple_call_lhs (use_stmt
)
731 || !(fndecl
= gimple_call_fndecl (use_stmt
))
732 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
735 switch (DECL_FUNCTION_CODE (fndecl
))
737 CASE_FLT_FN (BUILT_IN_COS
):
738 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
741 CASE_FLT_FN (BUILT_IN_SIN
):
742 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
745 CASE_FLT_FN (BUILT_IN_CEXPI
):
746 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
753 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
759 /* Simply insert cexpi at the beginning of top_bb but not earlier than
760 the name def statement. */
761 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
764 stmt
= gimple_build_call (fndecl
, 1, name
);
765 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
766 gimple_call_set_lhs (stmt
, res
);
768 def_stmt
= SSA_NAME_DEF_STMT (name
);
769 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
770 && gimple_code (def_stmt
) != GIMPLE_PHI
771 && gimple_bb (def_stmt
) == top_bb
)
773 gsi
= gsi_for_stmt (def_stmt
);
774 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
778 gsi
= gsi_after_labels (top_bb
);
779 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
781 sincos_stats
.inserted
++;
783 /* And adjust the recorded old call sites. */
784 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
787 fndecl
= gimple_call_fndecl (use_stmt
);
789 switch (DECL_FUNCTION_CODE (fndecl
))
791 CASE_FLT_FN (BUILT_IN_COS
):
792 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
795 CASE_FLT_FN (BUILT_IN_SIN
):
796 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
799 CASE_FLT_FN (BUILT_IN_CEXPI
):
807 /* Replace call with a copy. */
808 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
810 gsi
= gsi_for_stmt (use_stmt
);
811 gsi_replace (&gsi
, stmt
, true);
812 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
821 /* To evaluate powi(x,n), the floating point value x raised to the
822 constant integer exponent n, we use a hybrid algorithm that
823 combines the "window method" with look-up tables. For an
824 introduction to exponentiation algorithms and "addition chains",
825 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
826 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
827 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
828 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
830 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
831 multiplications to inline before calling the system library's pow
832 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
833 so this default never requires calling pow, powf or powl. */
835 #ifndef POWI_MAX_MULTS
836 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
839 /* The size of the "optimal power tree" lookup table. All
840 exponents less than this value are simply looked up in the
841 powi_table below. This threshold is also used to size the
842 cache of pseudo registers that hold intermediate results. */
843 #define POWI_TABLE_SIZE 256
845 /* The size, in bits of the window, used in the "window method"
846 exponentiation algorithm. This is equivalent to a radix of
847 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
848 #define POWI_WINDOW_SIZE 3
850 /* The following table is an efficient representation of an
851 "optimal power tree". For each value, i, the corresponding
852 value, j, in the table states than an optimal evaluation
853 sequence for calculating pow(x,i) can be found by evaluating
854 pow(x,j)*pow(x,i-j). An optimal power tree for the first
855 100 integers is given in Knuth's "Seminumerical algorithms". */
857 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
859 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
860 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
861 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
862 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
863 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
864 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
865 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
866 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
867 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
868 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
869 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
870 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
871 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
872 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
873 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
874 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
875 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
876 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
877 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
878 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
879 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
880 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
881 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
882 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
883 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
884 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
885 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
886 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
887 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
888 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
889 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
890 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
894 /* Return the number of multiplications required to calculate
895 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
896 subroutine of powi_cost. CACHE is an array indicating
897 which exponents have already been calculated. */
900 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
902 /* If we've already calculated this exponent, then this evaluation
903 doesn't require any additional multiplications. */
908 return powi_lookup_cost (n
- powi_table
[n
], cache
)
909 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
912 /* Return the number of multiplications required to calculate
913 powi(x,n) for an arbitrary x, given the exponent N. This
914 function needs to be kept in sync with powi_as_mults below. */
917 powi_cost (HOST_WIDE_INT n
)
919 bool cache
[POWI_TABLE_SIZE
];
920 unsigned HOST_WIDE_INT digit
;
921 unsigned HOST_WIDE_INT val
;
927 /* Ignore the reciprocal when calculating the cost. */
928 val
= (n
< 0) ? -n
: n
;
930 /* Initialize the exponent cache. */
931 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
936 while (val
>= POWI_TABLE_SIZE
)
940 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
941 result
+= powi_lookup_cost (digit
, cache
)
942 + POWI_WINDOW_SIZE
+ 1;
943 val
>>= POWI_WINDOW_SIZE
;
952 return result
+ powi_lookup_cost (val
, cache
);
955 /* Recursive subroutine of powi_as_mults. This function takes the
956 array, CACHE, of already calculated exponents and an exponent N and
957 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
960 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
961 HOST_WIDE_INT n
, tree
*cache
)
963 tree op0
, op1
, ssa_target
;
964 unsigned HOST_WIDE_INT digit
;
967 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
970 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
972 if (n
< POWI_TABLE_SIZE
)
974 cache
[n
] = ssa_target
;
975 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
976 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
980 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
981 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
982 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
986 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
990 mult_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, ssa_target
, op0
, op1
);
991 gimple_set_location (mult_stmt
, loc
);
992 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
997 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
998 This function needs to be kept in sync with powi_cost above. */
1001 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1002 tree arg0
, HOST_WIDE_INT n
)
1004 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1009 return build_real (type
, dconst1
);
1011 memset (cache
, 0, sizeof (cache
));
1014 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1018 /* If the original exponent was negative, reciprocate the result. */
1019 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1020 div_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, target
,
1021 build_real (type
, dconst1
),
1023 gimple_set_location (div_stmt
, loc
);
1024 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1029 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1030 location info LOC. If the arguments are appropriate, create an
1031 equivalent sequence of statements prior to GSI using an optimal
1032 number of multiplications, and return an expession holding the
1036 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1037 tree arg0
, HOST_WIDE_INT n
)
1039 /* Avoid largest negative number. */
1041 && ((n
>= -1 && n
<= 2)
1042 || (optimize_function_for_speed_p (cfun
)
1043 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1044 return powi_as_mults (gsi
, loc
, arg0
, n
);
1049 /* Build a gimple call statement that calls FN with argument ARG.
1050 Set the lhs of the call statement to a fresh SSA name. Insert the
1051 statement prior to GSI's current position, and return the fresh
1055 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1061 call_stmt
= gimple_build_call (fn
, 1, arg
);
1062 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1063 gimple_set_lhs (call_stmt
, ssa_target
);
1064 gimple_set_location (call_stmt
, loc
);
1065 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1070 /* Build a gimple binary operation with the given CODE and arguments
1071 ARG0, ARG1, assigning the result to a new SSA name for variable
1072 TARGET. Insert the statement prior to GSI's current position, and
1073 return the fresh SSA name.*/
1076 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1077 const char *name
, enum tree_code code
,
1078 tree arg0
, tree arg1
)
1080 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1081 gimple stmt
= gimple_build_assign_with_ops (code
, result
, arg0
, arg1
);
1082 gimple_set_location (stmt
, loc
);
1083 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1087 /* Build a gimple reference operation with the given CODE and argument
1088 ARG, assigning the result to a new SSA name of TYPE with NAME.
1089 Insert the statement prior to GSI's current position, and return
1090 the fresh SSA name. */
1093 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1094 const char *name
, enum tree_code code
, tree arg0
)
1096 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1097 gimple stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1098 gimple_set_location (stmt
, loc
);
1099 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1103 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1104 prior to GSI's current position, and return the fresh SSA name. */
1107 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1108 tree type
, tree val
)
1110 tree result
= make_ssa_name (type
, NULL
);
1111 gimple stmt
= gimple_build_assign_with_ops (NOP_EXPR
, result
, val
, NULL_TREE
);
1112 gimple_set_location (stmt
, loc
);
1113 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1117 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1118 with location info LOC. If possible, create an equivalent and
1119 less expensive sequence of statements prior to GSI, and return an
1120 expession holding the result. */
1123 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1124 tree arg0
, tree arg1
)
1126 REAL_VALUE_TYPE c
, cint
, dconst1_4
, dconst3_4
, dconst1_3
, dconst1_6
;
1127 REAL_VALUE_TYPE c2
, dconst3
;
1129 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, sqrt_sqrt
, result
, cbrt_x
, powi_cbrt_x
;
1130 enum machine_mode mode
;
1131 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1133 /* If the exponent isn't a constant, there's nothing of interest
1135 if (TREE_CODE (arg1
) != REAL_CST
)
1138 /* If the exponent is equivalent to an integer, expand to an optimal
1139 multiplication sequence when profitable. */
1140 c
= TREE_REAL_CST (arg1
);
1141 n
= real_to_integer (&c
);
1142 real_from_integer (&cint
, VOIDmode
, n
, n
< 0 ? -1 : 0, 0);
1143 c_is_int
= real_identical (&c
, &cint
);
1146 && ((n
>= -1 && n
<= 2)
1147 || (flag_unsafe_math_optimizations
1148 && optimize_insn_for_speed_p ()
1149 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1150 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1152 /* Attempt various optimizations using sqrt and cbrt. */
1153 type
= TREE_TYPE (arg0
);
1154 mode
= TYPE_MODE (type
);
1155 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1157 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1158 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1161 && REAL_VALUES_EQUAL (c
, dconsthalf
)
1162 && !HONOR_SIGNED_ZEROS (mode
))
1163 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1165 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1166 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1167 so do this optimization even if -Os. Don't do this optimization
1168 if we don't have a hardware sqrt insn. */
1169 dconst1_4
= dconst1
;
1170 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1171 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1173 if (flag_unsafe_math_optimizations
1175 && REAL_VALUES_EQUAL (c
, dconst1_4
)
1179 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1182 return build_and_insert_call (gsi
, loc
, sqrtfn
, sqrt_arg0
);
1185 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1186 optimizing for space. Don't do this optimization if we don't have
1187 a hardware sqrt insn. */
1188 real_from_integer (&dconst3_4
, VOIDmode
, 3, 0, 0);
1189 SET_REAL_EXP (&dconst3_4
, REAL_EXP (&dconst3_4
) - 2);
1191 if (flag_unsafe_math_optimizations
1193 && optimize_function_for_speed_p (cfun
)
1194 && REAL_VALUES_EQUAL (c
, dconst3_4
)
1198 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1201 sqrt_sqrt
= build_and_insert_call (gsi
, loc
, sqrtfn
, sqrt_arg0
);
1203 /* sqrt(x) * sqrt(sqrt(x)) */
1204 return build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1205 sqrt_arg0
, sqrt_sqrt
);
1208 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1209 optimizations since 1./3. is not exactly representable. If x
1210 is negative and finite, the correct value of pow(x,1./3.) is
1211 a NaN with the "invalid" exception raised, because the value
1212 of 1./3. actually has an even denominator. The correct value
1213 of cbrt(x) is a negative real value. */
1214 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1215 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1217 if (flag_unsafe_math_optimizations
1219 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1220 && REAL_VALUES_EQUAL (c
, dconst1_3
))
1221 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1223 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1224 if we don't have a hardware sqrt insn. */
1225 dconst1_6
= dconst1_3
;
1226 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1228 if (flag_unsafe_math_optimizations
1231 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1232 && optimize_function_for_speed_p (cfun
)
1234 && REAL_VALUES_EQUAL (c
, dconst1_6
))
1237 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1240 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1243 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1244 and c not an integer, into
1246 sqrt(x) * powi(x, n/2), n > 0;
1247 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1249 Do not calculate the powi factor when n/2 = 0. */
1250 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1251 n
= real_to_integer (&c2
);
1252 real_from_integer (&cint
, VOIDmode
, n
, n
< 0 ? -1 : 0, 0);
1253 c2_is_int
= real_identical (&c2
, &cint
);
1255 if (flag_unsafe_math_optimizations
1259 && optimize_function_for_speed_p (cfun
))
1261 tree powi_x_ndiv2
= NULL_TREE
;
1263 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1264 possible or profitable, give up. Skip the degenerate case when
1265 n is 1 or -1, where the result is always 1. */
1266 if (absu_hwi (n
) != 1)
1268 powi_x_ndiv2
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1274 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1275 result of the optimal multiply sequence just calculated. */
1276 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1278 if (absu_hwi (n
) == 1)
1281 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1282 sqrt_arg0
, powi_x_ndiv2
);
1284 /* If n is negative, reciprocate the result. */
1286 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1287 build_real (type
, dconst1
), result
);
1291 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1293 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1294 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1296 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1297 different from pow(x, 1./3.) due to rounding and behavior with
1298 negative x, we need to constrain this transformation to unsafe
1299 math and positive x or finite math. */
1300 real_from_integer (&dconst3
, VOIDmode
, 3, 0, 0);
1301 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1302 real_round (&c2
, mode
, &c2
);
1303 n
= real_to_integer (&c2
);
1304 real_from_integer (&cint
, VOIDmode
, n
, n
< 0 ? -1 : 0, 0);
1305 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1306 real_convert (&c2
, mode
, &c2
);
1308 if (flag_unsafe_math_optimizations
1310 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1311 && real_identical (&c2
, &c
)
1313 && optimize_function_for_speed_p (cfun
)
1314 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1316 tree powi_x_ndiv3
= NULL_TREE
;
1318 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1319 possible or profitable, give up. Skip the degenerate case when
1320 abs(n) < 3, where the result is always 1. */
1321 if (absu_hwi (n
) >= 3)
1323 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1329 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1330 as that creates an unnecessary variable. Instead, just produce
1331 either cbrt(x) or cbrt(x) * cbrt(x). */
1332 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1334 if (absu_hwi (n
) % 3 == 1)
1335 powi_cbrt_x
= cbrt_x
;
1337 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1340 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1341 if (absu_hwi (n
) < 3)
1342 result
= powi_cbrt_x
;
1344 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1345 powi_x_ndiv3
, powi_cbrt_x
);
1347 /* If n is negative, reciprocate the result. */
1349 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1350 build_real (type
, dconst1
), result
);
1355 /* No optimizations succeeded. */
1359 /* ARG is the argument to a cabs builtin call in GSI with location info
1360 LOC. Create a sequence of statements prior to GSI that calculates
1361 sqrt(R*R + I*I), where R and I are the real and imaginary components
1362 of ARG, respectively. Return an expression holding the result. */
1365 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1367 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1368 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1369 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1370 enum machine_mode mode
= TYPE_MODE (type
);
1372 if (!flag_unsafe_math_optimizations
1373 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1375 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1378 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1379 REALPART_EXPR
, arg
);
1380 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1381 real_part
, real_part
);
1382 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1383 IMAGPART_EXPR
, arg
);
1384 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1385 imag_part
, imag_part
);
1386 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1387 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1392 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1393 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1394 an optimal number of multiplies, when n is a constant. */
1397 execute_cse_sincos (void)
1400 bool cfg_changed
= false;
1402 calculate_dominance_info (CDI_DOMINATORS
);
1403 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1407 gimple_stmt_iterator gsi
;
1408 bool cleanup_eh
= false;
1410 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1412 gimple stmt
= gsi_stmt (gsi
);
1415 /* Only the last stmt in a bb could throw, no need to call
1416 gimple_purge_dead_eh_edges if we change something in the middle
1417 of a basic block. */
1420 if (is_gimple_call (stmt
)
1421 && gimple_call_lhs (stmt
)
1422 && (fndecl
= gimple_call_fndecl (stmt
))
1423 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1425 tree arg
, arg0
, arg1
, result
;
1429 switch (DECL_FUNCTION_CODE (fndecl
))
1431 CASE_FLT_FN (BUILT_IN_COS
):
1432 CASE_FLT_FN (BUILT_IN_SIN
):
1433 CASE_FLT_FN (BUILT_IN_CEXPI
):
1434 /* Make sure we have either sincos or cexp. */
1435 if (!targetm
.libc_has_function (function_c99_math_complex
)
1436 && !targetm
.libc_has_function (function_sincos
))
1439 arg
= gimple_call_arg (stmt
, 0);
1440 if (TREE_CODE (arg
) == SSA_NAME
)
1441 cfg_changed
|= execute_cse_sincos_1 (arg
);
1444 CASE_FLT_FN (BUILT_IN_POW
):
1445 arg0
= gimple_call_arg (stmt
, 0);
1446 arg1
= gimple_call_arg (stmt
, 1);
1448 loc
= gimple_location (stmt
);
1449 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1453 tree lhs
= gimple_get_lhs (stmt
);
1454 gimple new_stmt
= gimple_build_assign (lhs
, result
);
1455 gimple_set_location (new_stmt
, loc
);
1456 unlink_stmt_vdef (stmt
);
1457 gsi_replace (&gsi
, new_stmt
, true);
1459 if (gimple_vdef (stmt
))
1460 release_ssa_name (gimple_vdef (stmt
));
1464 CASE_FLT_FN (BUILT_IN_POWI
):
1465 arg0
= gimple_call_arg (stmt
, 0);
1466 arg1
= gimple_call_arg (stmt
, 1);
1467 loc
= gimple_location (stmt
);
1469 if (real_minus_onep (arg0
))
1471 tree t0
, t1
, cond
, one
, minus_one
;
1474 t0
= TREE_TYPE (arg0
);
1475 t1
= TREE_TYPE (arg1
);
1476 one
= build_real (t0
, dconst1
);
1477 minus_one
= build_real (t0
, dconstm1
);
1479 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
1480 stmt
= gimple_build_assign_with_ops (BIT_AND_EXPR
, cond
,
1484 gimple_set_location (stmt
, loc
);
1485 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1487 result
= make_temp_ssa_name (t0
, NULL
, "powi");
1488 stmt
= gimple_build_assign_with_ops (COND_EXPR
, result
,
1491 gimple_set_location (stmt
, loc
);
1492 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1496 if (!host_integerp (arg1
, 0))
1499 n
= TREE_INT_CST_LOW (arg1
);
1500 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1505 tree lhs
= gimple_get_lhs (stmt
);
1506 gimple new_stmt
= gimple_build_assign (lhs
, result
);
1507 gimple_set_location (new_stmt
, loc
);
1508 unlink_stmt_vdef (stmt
);
1509 gsi_replace (&gsi
, new_stmt
, true);
1511 if (gimple_vdef (stmt
))
1512 release_ssa_name (gimple_vdef (stmt
));
1516 CASE_FLT_FN (BUILT_IN_CABS
):
1517 arg0
= gimple_call_arg (stmt
, 0);
1518 loc
= gimple_location (stmt
);
1519 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1523 tree lhs
= gimple_get_lhs (stmt
);
1524 gimple new_stmt
= gimple_build_assign (lhs
, result
);
1525 gimple_set_location (new_stmt
, loc
);
1526 unlink_stmt_vdef (stmt
);
1527 gsi_replace (&gsi
, new_stmt
, true);
1529 if (gimple_vdef (stmt
))
1530 release_ssa_name (gimple_vdef (stmt
));
1539 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1542 statistics_counter_event (cfun
, "sincos statements inserted",
1543 sincos_stats
.inserted
);
1545 free_dominance_info (CDI_DOMINATORS
);
1546 return cfg_changed
? TODO_cleanup_cfg
: 0;
1550 gate_cse_sincos (void)
1552 /* We no longer require either sincos or cexp, since powi expansion
1553 piggybacks on this pass. */
1559 const pass_data pass_data_cse_sincos
=
1561 GIMPLE_PASS
, /* type */
1562 "sincos", /* name */
1563 OPTGROUP_NONE
, /* optinfo_flags */
1564 true, /* has_gate */
1565 true, /* has_execute */
1566 TV_NONE
, /* tv_id */
1567 PROP_ssa
, /* properties_required */
1568 0, /* properties_provided */
1569 0, /* properties_destroyed */
1570 0, /* todo_flags_start */
1571 ( TODO_update_ssa
| TODO_verify_ssa
1572 | TODO_verify_stmts
), /* todo_flags_finish */
1575 class pass_cse_sincos
: public gimple_opt_pass
1578 pass_cse_sincos (gcc::context
*ctxt
)
1579 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1582 /* opt_pass methods: */
1583 bool gate () { return gate_cse_sincos (); }
1584 unsigned int execute () { return execute_cse_sincos (); }
1586 }; // class pass_cse_sincos
1591 make_pass_cse_sincos (gcc::context
*ctxt
)
1593 return new pass_cse_sincos (ctxt
);
1596 /* A symbolic number is used to detect byte permutation and selection
1597 patterns. Therefore the field N contains an artificial number
1598 consisting of byte size markers:
1600 0 - byte has the value 0
1601 1..size - byte contains the content of the byte
1602 number indexed with that value minus one */
1604 struct symbolic_number
{
1605 unsigned HOST_WIDEST_INT n
;
1609 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1610 number N. Return false if the requested operation is not permitted
1611 on a symbolic number. */
1614 do_shift_rotate (enum tree_code code
,
1615 struct symbolic_number
*n
,
1621 /* Zero out the extra bits of N in order to avoid them being shifted
1622 into the significant bits. */
1623 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1624 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << (n
->size
* BITS_PER_UNIT
)) - 1;
1635 n
->n
= (n
->n
<< count
) | (n
->n
>> ((n
->size
* BITS_PER_UNIT
) - count
));
1638 n
->n
= (n
->n
>> count
) | (n
->n
<< ((n
->size
* BITS_PER_UNIT
) - count
));
1643 /* Zero unused bits for size. */
1644 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1645 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << (n
->size
* BITS_PER_UNIT
)) - 1;
1649 /* Perform sanity checking for the symbolic number N and the gimple
1653 verify_symbolic_number_p (struct symbolic_number
*n
, gimple stmt
)
1657 lhs_type
= gimple_expr_type (stmt
);
1659 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
1662 if (TYPE_PRECISION (lhs_type
) != n
->size
* BITS_PER_UNIT
)
1668 /* find_bswap_1 invokes itself recursively with N and tries to perform
1669 the operation given by the rhs of STMT on the result. If the
1670 operation could successfully be executed the function returns the
1671 tree expression of the source operand and NULL otherwise. */
1674 find_bswap_1 (gimple stmt
, struct symbolic_number
*n
, int limit
)
1676 enum tree_code code
;
1677 tree rhs1
, rhs2
= NULL
;
1678 gimple rhs1_stmt
, rhs2_stmt
;
1680 enum gimple_rhs_class rhs_class
;
1682 if (!limit
|| !is_gimple_assign (stmt
))
1685 rhs1
= gimple_assign_rhs1 (stmt
);
1687 if (TREE_CODE (rhs1
) != SSA_NAME
)
1690 code
= gimple_assign_rhs_code (stmt
);
1691 rhs_class
= gimple_assign_rhs_class (stmt
);
1692 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
1694 if (rhs_class
== GIMPLE_BINARY_RHS
)
1695 rhs2
= gimple_assign_rhs2 (stmt
);
1697 /* Handle unary rhs and binary rhs with integer constants as second
1700 if (rhs_class
== GIMPLE_UNARY_RHS
1701 || (rhs_class
== GIMPLE_BINARY_RHS
1702 && TREE_CODE (rhs2
) == INTEGER_CST
))
1704 if (code
!= BIT_AND_EXPR
1705 && code
!= LSHIFT_EXPR
1706 && code
!= RSHIFT_EXPR
1707 && code
!= LROTATE_EXPR
1708 && code
!= RROTATE_EXPR
1710 && code
!= CONVERT_EXPR
)
1713 source_expr1
= find_bswap_1 (rhs1_stmt
, n
, limit
- 1);
1715 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1716 to initialize the symbolic number. */
1719 /* Set up the symbolic number N by setting each byte to a
1720 value between 1 and the byte size of rhs1. The highest
1721 order byte is set to n->size and the lowest order
1723 n
->size
= TYPE_PRECISION (TREE_TYPE (rhs1
));
1724 if (n
->size
% BITS_PER_UNIT
!= 0)
1726 n
->size
/= BITS_PER_UNIT
;
1727 n
->n
= (sizeof (HOST_WIDEST_INT
) < 8 ? 0 :
1728 (unsigned HOST_WIDEST_INT
)0x08070605 << 32 | 0x04030201);
1730 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1731 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 <<
1732 (n
->size
* BITS_PER_UNIT
)) - 1;
1734 source_expr1
= rhs1
;
1742 unsigned HOST_WIDEST_INT val
= widest_int_cst_value (rhs2
);
1743 unsigned HOST_WIDEST_INT tmp
= val
;
1745 /* Only constants masking full bytes are allowed. */
1746 for (i
= 0; i
< n
->size
; i
++, tmp
>>= BITS_PER_UNIT
)
1747 if ((tmp
& 0xff) != 0 && (tmp
& 0xff) != 0xff)
1757 if (!do_shift_rotate (code
, n
, (int)TREE_INT_CST_LOW (rhs2
)))
1764 type_size
= TYPE_PRECISION (gimple_expr_type (stmt
));
1765 if (type_size
% BITS_PER_UNIT
!= 0)
1768 if (type_size
/ BITS_PER_UNIT
< (int)(sizeof (HOST_WIDEST_INT
)))
1770 /* If STMT casts to a smaller type mask out the bits not
1771 belonging to the target type. */
1772 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << type_size
) - 1;
1774 n
->size
= type_size
/ BITS_PER_UNIT
;
1780 return verify_symbolic_number_p (n
, stmt
) ? source_expr1
: NULL
;
1783 /* Handle binary rhs. */
1785 if (rhs_class
== GIMPLE_BINARY_RHS
)
1787 struct symbolic_number n1
, n2
;
1790 if (code
!= BIT_IOR_EXPR
)
1793 if (TREE_CODE (rhs2
) != SSA_NAME
)
1796 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
1801 source_expr1
= find_bswap_1 (rhs1_stmt
, &n1
, limit
- 1);
1806 source_expr2
= find_bswap_1 (rhs2_stmt
, &n2
, limit
- 1);
1808 if (source_expr1
!= source_expr2
1809 || n1
.size
!= n2
.size
)
1815 if (!verify_symbolic_number_p (n
, stmt
))
1822 return source_expr1
;
1827 /* Check if STMT completes a bswap implementation consisting of ORs,
1828 SHIFTs and ANDs. Return the source tree expression on which the
1829 byte swap is performed and NULL if no bswap was found. */
1832 find_bswap (gimple stmt
)
1834 /* The number which the find_bswap result should match in order to
1835 have a full byte swap. The number is shifted to the left according
1836 to the size of the symbolic number before using it. */
1837 unsigned HOST_WIDEST_INT cmp
=
1838 sizeof (HOST_WIDEST_INT
) < 8 ? 0 :
1839 (unsigned HOST_WIDEST_INT
)0x01020304 << 32 | 0x05060708;
1841 struct symbolic_number n
;
1845 /* The last parameter determines the depth search limit. It usually
1846 correlates directly to the number of bytes to be touched. We
1847 increase that number by three here in order to also
1848 cover signed -> unsigned converions of the src operand as can be seen
1849 in libgcc, and for initial shift/and operation of the src operand. */
1850 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
1851 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
1852 source_expr
= find_bswap_1 (stmt
, &n
, limit
);
1857 /* Zero out the extra bits of N and CMP. */
1858 if (n
.size
< (int)sizeof (HOST_WIDEST_INT
))
1860 unsigned HOST_WIDEST_INT mask
=
1861 ((unsigned HOST_WIDEST_INT
)1 << (n
.size
* BITS_PER_UNIT
)) - 1;
1864 cmp
>>= (sizeof (HOST_WIDEST_INT
) - n
.size
) * BITS_PER_UNIT
;
1867 /* A complete byte swap should make the symbolic number to start
1868 with the largest digit in the highest order byte. */
1875 /* Find manual byte swap implementations and turn them into a bswap
1876 builtin invokation. */
1879 execute_optimize_bswap (void)
1882 bool bswap16_p
, bswap32_p
, bswap64_p
;
1883 bool changed
= false;
1884 tree bswap16_type
= NULL_TREE
, bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1886 if (BITS_PER_UNIT
!= 8)
1889 if (sizeof (HOST_WIDEST_INT
) < 8)
1892 bswap16_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP16
)
1893 && optab_handler (bswap_optab
, HImode
) != CODE_FOR_nothing
);
1894 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1895 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1896 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1897 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1898 || (bswap32_p
&& word_mode
== SImode
)));
1900 if (!bswap16_p
&& !bswap32_p
&& !bswap64_p
)
1903 /* Determine the argument type of the builtins. The code later on
1904 assumes that the return and argument type are the same. */
1907 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP16
);
1908 bswap16_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1913 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1914 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1919 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1920 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1923 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1927 gimple_stmt_iterator gsi
;
1929 /* We do a reverse scan for bswap patterns to make sure we get the
1930 widest match. As bswap pattern matching doesn't handle
1931 previously inserted smaller bswap replacements as sub-
1932 patterns, the wider variant wouldn't be detected. */
1933 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
1935 gimple stmt
= gsi_stmt (gsi
);
1936 tree bswap_src
, bswap_type
;
1938 tree fndecl
= NULL_TREE
;
1942 if (!is_gimple_assign (stmt
)
1943 || gimple_assign_rhs_code (stmt
) != BIT_IOR_EXPR
)
1946 type_size
= TYPE_PRECISION (gimple_expr_type (stmt
));
1953 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP16
);
1954 bswap_type
= bswap16_type
;
1960 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1961 bswap_type
= bswap32_type
;
1967 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1968 bswap_type
= bswap64_type
;
1978 bswap_src
= find_bswap (stmt
);
1984 if (type_size
== 16)
1985 bswap_stats
.found_16bit
++;
1986 else if (type_size
== 32)
1987 bswap_stats
.found_32bit
++;
1989 bswap_stats
.found_64bit
++;
1991 bswap_tmp
= bswap_src
;
1993 /* Convert the src expression if necessary. */
1994 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp
), bswap_type
))
1996 gimple convert_stmt
;
1997 bswap_tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1998 convert_stmt
= gimple_build_assign_with_ops
1999 (NOP_EXPR
, bswap_tmp
, bswap_src
, NULL
);
2000 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2003 call
= gimple_build_call (fndecl
, 1, bswap_tmp
);
2005 bswap_tmp
= gimple_assign_lhs (stmt
);
2007 /* Convert the result if necessary. */
2008 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp
), bswap_type
))
2010 gimple convert_stmt
;
2011 bswap_tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
2012 convert_stmt
= gimple_build_assign_with_ops
2013 (NOP_EXPR
, gimple_assign_lhs (stmt
), bswap_tmp
, NULL
);
2014 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2017 gimple_call_set_lhs (call
, bswap_tmp
);
2021 fprintf (dump_file
, "%d bit bswap implementation found at: ",
2023 print_gimple_stmt (dump_file
, stmt
, 0, 0);
2026 gsi_insert_after (&gsi
, call
, GSI_SAME_STMT
);
2027 gsi_remove (&gsi
, true);
2031 statistics_counter_event (cfun
, "16-bit bswap implementations found",
2032 bswap_stats
.found_16bit
);
2033 statistics_counter_event (cfun
, "32-bit bswap implementations found",
2034 bswap_stats
.found_32bit
);
2035 statistics_counter_event (cfun
, "64-bit bswap implementations found",
2036 bswap_stats
.found_64bit
);
2038 return (changed
? TODO_update_ssa
| TODO_verify_ssa
2039 | TODO_verify_stmts
: 0);
2043 gate_optimize_bswap (void)
2045 return flag_expensive_optimizations
&& optimize
;
2050 const pass_data pass_data_optimize_bswap
=
2052 GIMPLE_PASS
, /* type */
2054 OPTGROUP_NONE
, /* optinfo_flags */
2055 true, /* has_gate */
2056 true, /* has_execute */
2057 TV_NONE
, /* tv_id */
2058 PROP_ssa
, /* properties_required */
2059 0, /* properties_provided */
2060 0, /* properties_destroyed */
2061 0, /* todo_flags_start */
2062 0, /* todo_flags_finish */
2065 class pass_optimize_bswap
: public gimple_opt_pass
2068 pass_optimize_bswap (gcc::context
*ctxt
)
2069 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
2072 /* opt_pass methods: */
2073 bool gate () { return gate_optimize_bswap (); }
2074 unsigned int execute () { return execute_optimize_bswap (); }
2076 }; // class pass_optimize_bswap
2081 make_pass_optimize_bswap (gcc::context
*ctxt
)
2083 return new pass_optimize_bswap (ctxt
);
2086 /* Return true if stmt is a type conversion operation that can be stripped
2087 when used in a widening multiply operation. */
2089 widening_mult_conversion_strippable_p (tree result_type
, gimple stmt
)
2091 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2093 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2098 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2101 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2103 /* If the type of OP has the same precision as the result, then
2104 we can strip this conversion. The multiply operation will be
2105 selected to create the correct extension as a by-product. */
2106 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2109 /* We can also strip a conversion if it preserves the signed-ness of
2110 the operation and doesn't narrow the range. */
2111 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2113 /* If the inner-most type is unsigned, then we can strip any
2114 intermediate widening operation. If it's signed, then the
2115 intermediate widening operation must also be signed. */
2116 if ((TYPE_UNSIGNED (inner_op_type
)
2117 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2118 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2124 return rhs_code
== FIXED_CONVERT_EXPR
;
2127 /* Return true if RHS is a suitable operand for a widening multiplication,
2128 assuming a target type of TYPE.
2129 There are two cases:
2131 - RHS makes some value at least twice as wide. Store that value
2132 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2134 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2135 but leave *TYPE_OUT untouched. */
2138 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2144 if (TREE_CODE (rhs
) == SSA_NAME
)
2146 stmt
= SSA_NAME_DEF_STMT (rhs
);
2147 if (is_gimple_assign (stmt
))
2149 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2153 rhs1
= gimple_assign_rhs1 (stmt
);
2155 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2157 *new_rhs_out
= rhs1
;
2166 type1
= TREE_TYPE (rhs1
);
2168 if (TREE_CODE (type1
) != TREE_CODE (type
)
2169 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2172 *new_rhs_out
= rhs1
;
2177 if (TREE_CODE (rhs
) == INTEGER_CST
)
2187 /* Return true if STMT performs a widening multiplication, assuming the
2188 output type is TYPE. If so, store the unwidened types of the operands
2189 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2190 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2191 and *TYPE2_OUT would give the operands of the multiplication. */
2194 is_widening_mult_p (gimple stmt
,
2195 tree
*type1_out
, tree
*rhs1_out
,
2196 tree
*type2_out
, tree
*rhs2_out
)
2198 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2200 if (TREE_CODE (type
) != INTEGER_TYPE
2201 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2204 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2208 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2212 if (*type1_out
== NULL
)
2214 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2216 *type1_out
= *type2_out
;
2219 if (*type2_out
== NULL
)
2221 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2223 *type2_out
= *type1_out
;
2226 /* Ensure that the larger of the two operands comes first. */
2227 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2231 *type1_out
= *type2_out
;
2234 *rhs1_out
= *rhs2_out
;
2241 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2242 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2243 value is true iff we converted the statement. */
2246 convert_mult_to_widen (gimple stmt
, gimple_stmt_iterator
*gsi
)
2248 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2249 enum insn_code handler
;
2250 enum machine_mode to_mode
, from_mode
, actual_mode
;
2252 int actual_precision
;
2253 location_t loc
= gimple_location (stmt
);
2254 bool from_unsigned1
, from_unsigned2
;
2256 lhs
= gimple_assign_lhs (stmt
);
2257 type
= TREE_TYPE (lhs
);
2258 if (TREE_CODE (type
) != INTEGER_TYPE
)
2261 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2264 to_mode
= TYPE_MODE (type
);
2265 from_mode
= TYPE_MODE (type1
);
2266 from_unsigned1
= TYPE_UNSIGNED (type1
);
2267 from_unsigned2
= TYPE_UNSIGNED (type2
);
2269 if (from_unsigned1
&& from_unsigned2
)
2270 op
= umul_widen_optab
;
2271 else if (!from_unsigned1
&& !from_unsigned2
)
2272 op
= smul_widen_optab
;
2274 op
= usmul_widen_optab
;
2276 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2279 if (handler
== CODE_FOR_nothing
)
2281 if (op
!= smul_widen_optab
)
2283 /* We can use a signed multiply with unsigned types as long as
2284 there is a wider mode to use, or it is the smaller of the two
2285 types that is unsigned. Note that type1 >= type2, always. */
2286 if ((TYPE_UNSIGNED (type1
)
2287 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2288 || (TYPE_UNSIGNED (type2
)
2289 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2291 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
2292 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2296 op
= smul_widen_optab
;
2297 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2301 if (handler
== CODE_FOR_nothing
)
2304 from_unsigned1
= from_unsigned2
= false;
2310 /* Ensure that the inputs to the handler are in the correct precison
2311 for the opcode. This will be the full mode size. */
2312 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2313 if (2 * actual_precision
> TYPE_PRECISION (type
))
2315 if (actual_precision
!= TYPE_PRECISION (type1
)
2316 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2317 rhs1
= build_and_insert_cast (gsi
, loc
,
2318 build_nonstandard_integer_type
2319 (actual_precision
, from_unsigned1
), rhs1
);
2320 if (actual_precision
!= TYPE_PRECISION (type2
)
2321 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2322 rhs2
= build_and_insert_cast (gsi
, loc
,
2323 build_nonstandard_integer_type
2324 (actual_precision
, from_unsigned2
), rhs2
);
2326 /* Handle constants. */
2327 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2328 rhs1
= fold_convert (type1
, rhs1
);
2329 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2330 rhs2
= fold_convert (type2
, rhs2
);
2332 gimple_assign_set_rhs1 (stmt
, rhs1
);
2333 gimple_assign_set_rhs2 (stmt
, rhs2
);
2334 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2336 widen_mul_stats
.widen_mults_inserted
++;
2340 /* Process a single gimple statement STMT, which is found at the
2341 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2342 rhs (given by CODE), and try to convert it into a
2343 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2344 is true iff we converted the statement. */
2347 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple stmt
,
2348 enum tree_code code
)
2350 gimple rhs1_stmt
= NULL
, rhs2_stmt
= NULL
;
2351 gimple conv1_stmt
= NULL
, conv2_stmt
= NULL
, conv_stmt
;
2352 tree type
, type1
, type2
, optype
;
2353 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2354 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2356 enum tree_code wmult_code
;
2357 enum insn_code handler
;
2358 enum machine_mode to_mode
, from_mode
, actual_mode
;
2359 location_t loc
= gimple_location (stmt
);
2360 int actual_precision
;
2361 bool from_unsigned1
, from_unsigned2
;
2363 lhs
= gimple_assign_lhs (stmt
);
2364 type
= TREE_TYPE (lhs
);
2365 if (TREE_CODE (type
) != INTEGER_TYPE
2366 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2369 if (code
== MINUS_EXPR
)
2370 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2372 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2374 rhs1
= gimple_assign_rhs1 (stmt
);
2375 rhs2
= gimple_assign_rhs2 (stmt
);
2377 if (TREE_CODE (rhs1
) == SSA_NAME
)
2379 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2380 if (is_gimple_assign (rhs1_stmt
))
2381 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2384 if (TREE_CODE (rhs2
) == SSA_NAME
)
2386 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2387 if (is_gimple_assign (rhs2_stmt
))
2388 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2391 /* Allow for one conversion statement between the multiply
2392 and addition/subtraction statement. If there are more than
2393 one conversions then we assume they would invalidate this
2394 transformation. If that's not the case then they should have
2395 been folded before now. */
2396 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2398 conv1_stmt
= rhs1_stmt
;
2399 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2400 if (TREE_CODE (rhs1
) == SSA_NAME
)
2402 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2403 if (is_gimple_assign (rhs1_stmt
))
2404 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2409 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2411 conv2_stmt
= rhs2_stmt
;
2412 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2413 if (TREE_CODE (rhs2
) == SSA_NAME
)
2415 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2416 if (is_gimple_assign (rhs2_stmt
))
2417 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2423 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2424 is_widening_mult_p, but we still need the rhs returns.
2426 It might also appear that it would be sufficient to use the existing
2427 operands of the widening multiply, but that would limit the choice of
2428 multiply-and-accumulate instructions. */
2429 if (code
== PLUS_EXPR
2430 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2432 if (!is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2433 &type2
, &mult_rhs2
))
2436 conv_stmt
= conv1_stmt
;
2438 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
2440 if (!is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
2441 &type2
, &mult_rhs2
))
2444 conv_stmt
= conv2_stmt
;
2449 to_mode
= TYPE_MODE (type
);
2450 from_mode
= TYPE_MODE (type1
);
2451 from_unsigned1
= TYPE_UNSIGNED (type1
);
2452 from_unsigned2
= TYPE_UNSIGNED (type2
);
2455 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2456 if (from_unsigned1
!= from_unsigned2
)
2458 if (!INTEGRAL_TYPE_P (type
))
2460 /* We can use a signed multiply with unsigned types as long as
2461 there is a wider mode to use, or it is the smaller of the two
2462 types that is unsigned. Note that type1 >= type2, always. */
2464 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2466 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2468 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
2469 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
2473 from_unsigned1
= from_unsigned2
= false;
2474 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
2478 /* If there was a conversion between the multiply and addition
2479 then we need to make sure it fits a multiply-and-accumulate.
2480 The should be a single mode change which does not change the
2484 /* We use the original, unmodified data types for this. */
2485 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
2486 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
2487 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
2488 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
2490 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
2492 /* Conversion is a truncate. */
2493 if (TYPE_PRECISION (to_type
) < data_size
)
2496 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
2498 /* Conversion is an extend. Check it's the right sort. */
2499 if (TYPE_UNSIGNED (from_type
) != is_unsigned
2500 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
2503 /* else convert is a no-op for our purposes. */
2506 /* Verify that the machine can perform a widening multiply
2507 accumulate in this mode/signedness combination, otherwise
2508 this transformation is likely to pessimize code. */
2509 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
2510 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
2511 from_mode
, 0, &actual_mode
);
2513 if (handler
== CODE_FOR_nothing
)
2516 /* Ensure that the inputs to the handler are in the correct precison
2517 for the opcode. This will be the full mode size. */
2518 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2519 if (actual_precision
!= TYPE_PRECISION (type1
)
2520 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2521 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
2522 build_nonstandard_integer_type
2523 (actual_precision
, from_unsigned1
),
2525 if (actual_precision
!= TYPE_PRECISION (type2
)
2526 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2527 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
2528 build_nonstandard_integer_type
2529 (actual_precision
, from_unsigned2
),
2532 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
2533 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
2535 /* Handle constants. */
2536 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
2537 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
2538 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
2539 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
2541 gimple_assign_set_rhs_with_ops_1 (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
2543 update_stmt (gsi_stmt (*gsi
));
2544 widen_mul_stats
.maccs_inserted
++;
2548 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2549 with uses in additions and subtractions to form fused multiply-add
2550 operations. Returns true if successful and MUL_STMT should be removed. */
2553 convert_mult_to_fma (gimple mul_stmt
, tree op1
, tree op2
)
2555 tree mul_result
= gimple_get_lhs (mul_stmt
);
2556 tree type
= TREE_TYPE (mul_result
);
2557 gimple use_stmt
, neguse_stmt
, fma_stmt
;
2558 use_operand_p use_p
;
2559 imm_use_iterator imm_iter
;
2561 if (FLOAT_TYPE_P (type
)
2562 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
2565 /* We don't want to do bitfield reduction ops. */
2566 if (INTEGRAL_TYPE_P (type
)
2567 && (TYPE_PRECISION (type
)
2568 != GET_MODE_PRECISION (TYPE_MODE (type
))))
2571 /* If the target doesn't support it, don't generate it. We assume that
2572 if fma isn't available then fms, fnma or fnms are not either. */
2573 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
2576 /* If the multiplication has zero uses, it is kept around probably because
2577 of -fnon-call-exceptions. Don't optimize it away in that case,
2579 if (has_zero_uses (mul_result
))
2582 /* Make sure that the multiplication statement becomes dead after
2583 the transformation, thus that all uses are transformed to FMAs.
2584 This means we assume that an FMA operation has the same cost
2586 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
2588 enum tree_code use_code
;
2589 tree result
= mul_result
;
2590 bool negate_p
= false;
2592 use_stmt
= USE_STMT (use_p
);
2594 if (is_gimple_debug (use_stmt
))
2597 /* For now restrict this operations to single basic blocks. In theory
2598 we would want to support sinking the multiplication in
2604 to form a fma in the then block and sink the multiplication to the
2606 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2609 if (!is_gimple_assign (use_stmt
))
2612 use_code
= gimple_assign_rhs_code (use_stmt
);
2614 /* A negate on the multiplication leads to FNMA. */
2615 if (use_code
== NEGATE_EXPR
)
2620 result
= gimple_assign_lhs (use_stmt
);
2622 /* Make sure the negate statement becomes dead with this
2623 single transformation. */
2624 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
2625 &use_p
, &neguse_stmt
))
2628 /* Make sure the multiplication isn't also used on that stmt. */
2629 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
2630 if (USE_FROM_PTR (usep
) == mul_result
)
2634 use_stmt
= neguse_stmt
;
2635 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2637 if (!is_gimple_assign (use_stmt
))
2640 use_code
= gimple_assign_rhs_code (use_stmt
);
2647 if (gimple_assign_rhs2 (use_stmt
) == result
)
2648 negate_p
= !negate_p
;
2653 /* FMA can only be formed from PLUS and MINUS. */
2657 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2658 by a MULT_EXPR that we'll visit later, we might be able to
2659 get a more profitable match with fnma.
2660 OTOH, if we don't, a negate / fma pair has likely lower latency
2661 that a mult / subtract pair. */
2662 if (use_code
== MINUS_EXPR
&& !negate_p
2663 && gimple_assign_rhs1 (use_stmt
) == result
2664 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
2665 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
2667 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
2669 if (TREE_CODE (rhs2
) == SSA_NAME
)
2671 gimple stmt2
= SSA_NAME_DEF_STMT (rhs2
);
2672 if (has_single_use (rhs2
)
2673 && is_gimple_assign (stmt2
)
2674 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
2679 /* We can't handle a * b + a * b. */
2680 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
2683 /* While it is possible to validate whether or not the exact form
2684 that we've recognized is available in the backend, the assumption
2685 is that the transformation is never a loss. For instance, suppose
2686 the target only has the plain FMA pattern available. Consider
2687 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2688 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2689 still have 3 operations, but in the FMA form the two NEGs are
2690 independent and could be run in parallel. */
2693 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
2695 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2696 enum tree_code use_code
;
2697 tree addop
, mulop1
= op1
, result
= mul_result
;
2698 bool negate_p
= false;
2700 if (is_gimple_debug (use_stmt
))
2703 use_code
= gimple_assign_rhs_code (use_stmt
);
2704 if (use_code
== NEGATE_EXPR
)
2706 result
= gimple_assign_lhs (use_stmt
);
2707 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
2708 gsi_remove (&gsi
, true);
2709 release_defs (use_stmt
);
2711 use_stmt
= neguse_stmt
;
2712 gsi
= gsi_for_stmt (use_stmt
);
2713 use_code
= gimple_assign_rhs_code (use_stmt
);
2717 if (gimple_assign_rhs1 (use_stmt
) == result
)
2719 addop
= gimple_assign_rhs2 (use_stmt
);
2720 /* a * b - c -> a * b + (-c) */
2721 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2722 addop
= force_gimple_operand_gsi (&gsi
,
2723 build1 (NEGATE_EXPR
,
2725 true, NULL_TREE
, true,
2730 addop
= gimple_assign_rhs1 (use_stmt
);
2731 /* a - b * c -> (-b) * c + a */
2732 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2733 negate_p
= !negate_p
;
2737 mulop1
= force_gimple_operand_gsi (&gsi
,
2738 build1 (NEGATE_EXPR
,
2740 true, NULL_TREE
, true,
2743 fma_stmt
= gimple_build_assign_with_ops (FMA_EXPR
,
2744 gimple_assign_lhs (use_stmt
),
2747 gsi_replace (&gsi
, fma_stmt
, true);
2748 widen_mul_stats
.fmas_inserted
++;
2754 /* Find integer multiplications where the operands are extended from
2755 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2756 where appropriate. */
2759 execute_optimize_widening_mul (void)
2762 bool cfg_changed
= false;
2764 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
2768 gimple_stmt_iterator gsi
;
2770 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
2772 gimple stmt
= gsi_stmt (gsi
);
2773 enum tree_code code
;
2775 if (is_gimple_assign (stmt
))
2777 code
= gimple_assign_rhs_code (stmt
);
2781 if (!convert_mult_to_widen (stmt
, &gsi
)
2782 && convert_mult_to_fma (stmt
,
2783 gimple_assign_rhs1 (stmt
),
2784 gimple_assign_rhs2 (stmt
)))
2786 gsi_remove (&gsi
, true);
2787 release_defs (stmt
);
2794 convert_plusminus_to_widen (&gsi
, stmt
, code
);
2800 else if (is_gimple_call (stmt
)
2801 && gimple_call_lhs (stmt
))
2803 tree fndecl
= gimple_call_fndecl (stmt
);
2805 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
2807 switch (DECL_FUNCTION_CODE (fndecl
))
2812 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
2813 && REAL_VALUES_EQUAL
2814 (TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
2816 && convert_mult_to_fma (stmt
,
2817 gimple_call_arg (stmt
, 0),
2818 gimple_call_arg (stmt
, 0)))
2820 unlink_stmt_vdef (stmt
);
2821 if (gsi_remove (&gsi
, true)
2822 && gimple_purge_dead_eh_edges (bb
))
2824 release_defs (stmt
);
2837 statistics_counter_event (cfun
, "widening multiplications inserted",
2838 widen_mul_stats
.widen_mults_inserted
);
2839 statistics_counter_event (cfun
, "widening maccs inserted",
2840 widen_mul_stats
.maccs_inserted
);
2841 statistics_counter_event (cfun
, "fused multiply-adds inserted",
2842 widen_mul_stats
.fmas_inserted
);
2844 return cfg_changed
? TODO_cleanup_cfg
: 0;
2848 gate_optimize_widening_mul (void)
2850 return flag_expensive_optimizations
&& optimize
;
2855 const pass_data pass_data_optimize_widening_mul
=
2857 GIMPLE_PASS
, /* type */
2858 "widening_mul", /* name */
2859 OPTGROUP_NONE
, /* optinfo_flags */
2860 true, /* has_gate */
2861 true, /* has_execute */
2862 TV_NONE
, /* tv_id */
2863 PROP_ssa
, /* properties_required */
2864 0, /* properties_provided */
2865 0, /* properties_destroyed */
2866 0, /* todo_flags_start */
2867 ( TODO_verify_ssa
| TODO_verify_stmts
2868 | TODO_update_ssa
), /* todo_flags_finish */
2871 class pass_optimize_widening_mul
: public gimple_opt_pass
2874 pass_optimize_widening_mul (gcc::context
*ctxt
)
2875 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
2878 /* opt_pass methods: */
2879 bool gate () { return gate_optimize_widening_mul (); }
2880 unsigned int execute () { return execute_optimize_widening_mul (); }
2882 }; // class pass_optimize_widening_mul
2887 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
2889 return new pass_optimize_widening_mul (ctxt
);