1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Currently, the only mini-pass in this file tries to CSE reciprocal
22 operations. These are common in sequences such as this one:
24 modulus = sqrt(x*x + y*y + z*z);
29 that can be optimized to
31 modulus = sqrt(x*x + y*y + z*z);
32 rmodulus = 1.0 / modulus;
37 We do this for loop invariant divisors, and with this pass whenever
38 we notice that a division has the same divisor multiple times.
40 Of course, like in PRE, we don't insert a division if a dominator
41 already has one. However, this cannot be done as an extension of
42 PRE for several reasons.
44 First of all, with some experiments it was found out that the
45 transformation is not always useful if there are only two divisions
46 hy the same divisor. This is probably because modern processors
47 can pipeline the divisions; on older, in-order processors it should
48 still be effective to optimize two divisions by the same number.
49 We make this a param, and it shall be called N in the remainder of
52 Second, if trapping math is active, we have less freedom on where
53 to insert divisions: we can only do so in basic blocks that already
54 contain one. (If divisions don't trap, instead, we can insert
55 divisions elsewhere, which will be in blocks that are common dominators
56 of those that have the division).
58 We really don't want to compute the reciprocal unless a division will
59 be found. To do this, we won't insert the division in a basic block
60 that has less than N divisions *post-dominating* it.
62 The algorithm constructs a subset of the dominator tree, holding the
63 blocks containing the divisions and the common dominators to them,
64 and walk it twice. The first walk is in post-order, and it annotates
65 each block with the number of divisions that post-dominate it: this
66 gives information on where divisions can be inserted profitably.
67 The second walk is in pre-order, and it inserts divisions as explained
68 above, and replaces divisions by multiplications.
70 In the best case, the cost of the pass is O(n_statements). In the
71 worst-case, the cost is due to creating the dominator tree subset,
72 with a cost of O(n_basic_blocks ^ 2); however this can only happen
73 for n_statements / n_basic_blocks statements. So, the amortized cost
74 of creating the dominator tree subset is O(n_basic_blocks) and the
75 worst-case cost of the pass is O(n_statements * n_basic_blocks).
77 More practically, the cost will be small because there are few
78 divisions, and they tend to be in the same basic block, so insert_bb
79 is called very few times.
81 If we did this using domwalk.c, an efficient implementation would have
82 to work on all the variables in a single pass, because we could not
83 work on just a subset of the dominator tree, as we do now, and the
84 cost would also be something like O(n_statements * n_basic_blocks).
85 The data structures would be more complex in order to work on all the
86 variables in a single pass. */
90 #include "coretypes.h"
94 #include "tree-flow.h"
96 #include "tree-pass.h"
97 #include "alloc-pool.h"
98 #include "basic-block.h"
100 #include "gimple-pretty-print.h"
102 /* FIXME: RTL headers have to be included here for optabs. */
103 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
104 #include "expr.h" /* Because optabs.h wants sepops. */
107 /* This structure represents one basic block that either computes a
108 division, or is a common dominator for basic block that compute a
111 /* The basic block represented by this structure. */
114 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
118 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
119 was inserted in BB. */
120 gimple recip_def_stmt
;
122 /* Pointer to a list of "struct occurrence"s for blocks dominated
124 struct occurrence
*children
;
126 /* Pointer to the next "struct occurrence"s in the list of blocks
127 sharing a common dominator. */
128 struct occurrence
*next
;
130 /* The number of divisions that are in BB before compute_merit. The
131 number of divisions that are in BB or post-dominate it after
135 /* True if the basic block has a division, false if it is a common
136 dominator for basic blocks that do. If it is false and trapping
137 math is active, BB is not a candidate for inserting a reciprocal. */
138 bool bb_has_division
;
143 /* Number of 1.0/X ops inserted. */
146 /* Number of 1.0/FUNC ops inserted. */
152 /* Number of cexpi calls inserted. */
158 /* Number of hand-written 32-bit bswaps found. */
161 /* Number of hand-written 64-bit bswaps found. */
167 /* Number of widening multiplication ops inserted. */
168 int widen_mults_inserted
;
170 /* Number of integer multiply-and-accumulate ops inserted. */
173 /* Number of fp fused multiply-add ops inserted. */
177 /* The instance of "struct occurrence" representing the highest
178 interesting block in the dominator tree. */
179 static struct occurrence
*occ_head
;
181 /* Allocation pool for getting instances of "struct occurrence". */
182 static alloc_pool occ_pool
;
186 /* Allocate and return a new struct occurrence for basic block BB, and
187 whose children list is headed by CHILDREN. */
188 static struct occurrence
*
189 occ_new (basic_block bb
, struct occurrence
*children
)
191 struct occurrence
*occ
;
193 bb
->aux
= occ
= (struct occurrence
*) pool_alloc (occ_pool
);
194 memset (occ
, 0, sizeof (struct occurrence
));
197 occ
->children
= children
;
202 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
203 list of "struct occurrence"s, one per basic block, having IDOM as
204 their common dominator.
206 We try to insert NEW_OCC as deep as possible in the tree, and we also
207 insert any other block that is a common dominator for BB and one
208 block already in the tree. */
211 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
212 struct occurrence
**p_head
)
214 struct occurrence
*occ
, **p_occ
;
216 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
218 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
219 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
222 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
225 occ
->next
= new_occ
->children
;
226 new_occ
->children
= occ
;
228 /* Try the next block (it may as well be dominated by BB). */
231 else if (dom
== occ_bb
)
233 /* OCC_BB dominates BB. Tail recurse to look deeper. */
234 insert_bb (new_occ
, dom
, &occ
->children
);
238 else if (dom
!= idom
)
240 gcc_assert (!dom
->aux
);
242 /* There is a dominator between IDOM and BB, add it and make
243 two children out of NEW_OCC and OCC. First, remove OCC from
249 /* None of the previous blocks has DOM as a dominator: if we tail
250 recursed, we would reexamine them uselessly. Just switch BB with
251 DOM, and go on looking for blocks dominated by DOM. */
252 new_occ
= occ_new (dom
, new_occ
);
257 /* Nothing special, go on with the next element. */
262 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
263 new_occ
->next
= *p_head
;
267 /* Register that we found a division in BB. */
270 register_division_in (basic_block bb
)
272 struct occurrence
*occ
;
274 occ
= (struct occurrence
*) bb
->aux
;
277 occ
= occ_new (bb
, NULL
);
278 insert_bb (occ
, ENTRY_BLOCK_PTR
, &occ_head
);
281 occ
->bb_has_division
= true;
282 occ
->num_divisions
++;
286 /* Compute the number of divisions that postdominate each block in OCC and
290 compute_merit (struct occurrence
*occ
)
292 struct occurrence
*occ_child
;
293 basic_block dom
= occ
->bb
;
295 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
298 if (occ_child
->children
)
299 compute_merit (occ_child
);
302 bb
= single_noncomplex_succ (dom
);
306 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
307 occ
->num_divisions
+= occ_child
->num_divisions
;
312 /* Return whether USE_STMT is a floating-point division by DEF. */
314 is_division_by (gimple use_stmt
, tree def
)
316 return is_gimple_assign (use_stmt
)
317 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
318 && gimple_assign_rhs2 (use_stmt
) == def
319 /* Do not recognize x / x as valid division, as we are getting
320 confused later by replacing all immediate uses x in such
322 && gimple_assign_rhs1 (use_stmt
) != def
;
325 /* Walk the subset of the dominator tree rooted at OCC, setting the
326 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
327 the given basic block. The field may be left NULL, of course,
328 if it is not possible or profitable to do the optimization.
330 DEF_BSI is an iterator pointing at the statement defining DEF.
331 If RECIP_DEF is set, a dominator already has a computation that can
335 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
336 tree def
, tree recip_def
, int threshold
)
340 gimple_stmt_iterator gsi
;
341 struct occurrence
*occ_child
;
344 && (occ
->bb_has_division
|| !flag_trapping_math
)
345 && occ
->num_divisions
>= threshold
)
347 /* Make a variable with the replacement and substitute it. */
348 type
= TREE_TYPE (def
);
349 recip_def
= make_rename_temp (type
, "reciptmp");
350 new_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, recip_def
,
351 build_one_cst (type
), def
);
353 if (occ
->bb_has_division
)
355 /* Case 1: insert before an existing division. */
356 gsi
= gsi_after_labels (occ
->bb
);
357 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
360 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
362 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
364 /* Case 2: insert right after the definition. Note that this will
365 never happen if the definition statement can throw, because in
366 that case the sole successor of the statement's basic block will
367 dominate all the uses as well. */
368 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
372 /* Case 3: insert in a basic block not containing defs/uses. */
373 gsi
= gsi_after_labels (occ
->bb
);
374 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
377 reciprocal_stats
.rdivs_inserted
++;
379 occ
->recip_def_stmt
= new_stmt
;
382 occ
->recip_def
= recip_def
;
383 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
384 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
388 /* Replace the division at USE_P with a multiplication by the reciprocal, if
392 replace_reciprocal (use_operand_p use_p
)
394 gimple use_stmt
= USE_STMT (use_p
);
395 basic_block bb
= gimple_bb (use_stmt
);
396 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
398 if (optimize_bb_for_speed_p (bb
)
399 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
401 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
402 SET_USE (use_p
, occ
->recip_def
);
403 fold_stmt_inplace (use_stmt
);
404 update_stmt (use_stmt
);
409 /* Free OCC and return one more "struct occurrence" to be freed. */
411 static struct occurrence
*
412 free_bb (struct occurrence
*occ
)
414 struct occurrence
*child
, *next
;
416 /* First get the two pointers hanging off OCC. */
418 child
= occ
->children
;
420 pool_free (occ_pool
, occ
);
422 /* Now ensure that we don't recurse unless it is necessary. */
428 next
= free_bb (next
);
435 /* Look for floating-point divisions among DEF's uses, and try to
436 replace them by multiplications with the reciprocal. Add
437 as many statements computing the reciprocal as needed.
439 DEF must be a GIMPLE register of a floating-point type. */
442 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
445 imm_use_iterator use_iter
;
446 struct occurrence
*occ
;
447 int count
= 0, threshold
;
449 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
451 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
453 gimple use_stmt
= USE_STMT (use_p
);
454 if (is_division_by (use_stmt
, def
))
456 register_division_in (gimple_bb (use_stmt
));
461 /* Do the expensive part only if we can hope to optimize something. */
462 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
463 if (count
>= threshold
)
466 for (occ
= occ_head
; occ
; occ
= occ
->next
)
469 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
472 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
474 if (is_division_by (use_stmt
, def
))
476 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
477 replace_reciprocal (use_p
);
482 for (occ
= occ_head
; occ
; )
489 gate_cse_reciprocals (void)
491 return optimize
&& flag_reciprocal_math
;
494 /* Go through all the floating-point SSA_NAMEs, and call
495 execute_cse_reciprocals_1 on each of them. */
497 execute_cse_reciprocals (void)
502 occ_pool
= create_alloc_pool ("dominators for recip",
503 sizeof (struct occurrence
),
504 n_basic_blocks
/ 3 + 1);
506 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
507 calculate_dominance_info (CDI_DOMINATORS
);
508 calculate_dominance_info (CDI_POST_DOMINATORS
);
510 #ifdef ENABLE_CHECKING
512 gcc_assert (!bb
->aux
);
515 for (arg
= DECL_ARGUMENTS (cfun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
516 if (gimple_default_def (cfun
, arg
)
517 && FLOAT_TYPE_P (TREE_TYPE (arg
))
518 && is_gimple_reg (arg
))
519 execute_cse_reciprocals_1 (NULL
, gimple_default_def (cfun
, arg
));
523 gimple_stmt_iterator gsi
;
527 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
529 phi
= gsi_stmt (gsi
);
530 def
= PHI_RESULT (phi
);
531 if (FLOAT_TYPE_P (TREE_TYPE (def
))
532 && is_gimple_reg (def
))
533 execute_cse_reciprocals_1 (NULL
, def
);
536 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
538 gimple stmt
= gsi_stmt (gsi
);
540 if (gimple_has_lhs (stmt
)
541 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
542 && FLOAT_TYPE_P (TREE_TYPE (def
))
543 && TREE_CODE (def
) == SSA_NAME
)
544 execute_cse_reciprocals_1 (&gsi
, def
);
547 if (optimize_bb_for_size_p (bb
))
550 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
551 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
553 gimple stmt
= gsi_stmt (gsi
);
556 if (is_gimple_assign (stmt
)
557 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
559 tree arg1
= gimple_assign_rhs2 (stmt
);
562 if (TREE_CODE (arg1
) != SSA_NAME
)
565 stmt1
= SSA_NAME_DEF_STMT (arg1
);
567 if (is_gimple_call (stmt1
)
568 && gimple_call_lhs (stmt1
)
569 && (fndecl
= gimple_call_fndecl (stmt1
))
570 && (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
571 || DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
))
573 enum built_in_function code
;
578 code
= DECL_FUNCTION_CODE (fndecl
);
579 md_code
= DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
;
581 fndecl
= targetm
.builtin_reciprocal (code
, md_code
, false);
585 /* Check that all uses of the SSA name are divisions,
586 otherwise replacing the defining statement will do
589 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
591 gimple stmt2
= USE_STMT (use_p
);
592 if (is_gimple_debug (stmt2
))
594 if (!is_gimple_assign (stmt2
)
595 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
596 || gimple_assign_rhs1 (stmt2
) == arg1
597 || gimple_assign_rhs2 (stmt2
) != arg1
)
606 gimple_replace_lhs (stmt1
, arg1
);
607 gimple_call_set_fndecl (stmt1
, fndecl
);
609 reciprocal_stats
.rfuncs_inserted
++;
611 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
613 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
614 fold_stmt_inplace (stmt
);
622 statistics_counter_event (cfun
, "reciprocal divs inserted",
623 reciprocal_stats
.rdivs_inserted
);
624 statistics_counter_event (cfun
, "reciprocal functions inserted",
625 reciprocal_stats
.rfuncs_inserted
);
627 free_dominance_info (CDI_DOMINATORS
);
628 free_dominance_info (CDI_POST_DOMINATORS
);
629 free_alloc_pool (occ_pool
);
633 struct gimple_opt_pass pass_cse_reciprocals
=
638 gate_cse_reciprocals
, /* gate */
639 execute_cse_reciprocals
, /* execute */
642 0, /* static_pass_number */
644 PROP_ssa
, /* properties_required */
645 0, /* properties_provided */
646 0, /* properties_destroyed */
647 0, /* todo_flags_start */
648 TODO_dump_func
| TODO_update_ssa
| TODO_verify_ssa
649 | TODO_verify_stmts
/* todo_flags_finish */
653 /* Records an occurrence at statement USE_STMT in the vector of trees
654 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
655 is not yet initialized. Returns true if the occurrence was pushed on
656 the vector. Adjusts *TOP_BB to be the basic block dominating all
657 statements in the vector. */
660 maybe_record_sincos (VEC(gimple
, heap
) **stmts
,
661 basic_block
*top_bb
, gimple use_stmt
)
663 basic_block use_bb
= gimple_bb (use_stmt
);
665 && (*top_bb
== use_bb
666 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
667 VEC_safe_push (gimple
, heap
, *stmts
, use_stmt
);
669 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
671 VEC_safe_push (gimple
, heap
, *stmts
, use_stmt
);
680 /* Look for sin, cos and cexpi calls with the same argument NAME and
681 create a single call to cexpi CSEing the result in this case.
682 We first walk over all immediate uses of the argument collecting
683 statements that we can CSE in a vector and in a second pass replace
684 the statement rhs with a REALPART or IMAGPART expression on the
685 result of the cexpi call we insert before the use statement that
686 dominates all other candidates. */
689 execute_cse_sincos_1 (tree name
)
691 gimple_stmt_iterator gsi
;
692 imm_use_iterator use_iter
;
693 tree fndecl
, res
, type
;
694 gimple def_stmt
, use_stmt
, stmt
;
695 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
696 VEC(gimple
, heap
) *stmts
= NULL
;
697 basic_block top_bb
= NULL
;
699 bool cfg_changed
= false;
701 type
= TREE_TYPE (name
);
702 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
704 if (gimple_code (use_stmt
) != GIMPLE_CALL
705 || !gimple_call_lhs (use_stmt
)
706 || !(fndecl
= gimple_call_fndecl (use_stmt
))
707 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
710 switch (DECL_FUNCTION_CODE (fndecl
))
712 CASE_FLT_FN (BUILT_IN_COS
):
713 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
716 CASE_FLT_FN (BUILT_IN_SIN
):
717 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
720 CASE_FLT_FN (BUILT_IN_CEXPI
):
721 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
728 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
730 VEC_free(gimple
, heap
, stmts
);
734 /* Simply insert cexpi at the beginning of top_bb but not earlier than
735 the name def statement. */
736 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
739 res
= create_tmp_reg (TREE_TYPE (TREE_TYPE (fndecl
)), "sincostmp");
740 stmt
= gimple_build_call (fndecl
, 1, name
);
741 res
= make_ssa_name (res
, stmt
);
742 gimple_call_set_lhs (stmt
, res
);
744 def_stmt
= SSA_NAME_DEF_STMT (name
);
745 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
746 && gimple_code (def_stmt
) != GIMPLE_PHI
747 && gimple_bb (def_stmt
) == top_bb
)
749 gsi
= gsi_for_stmt (def_stmt
);
750 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
754 gsi
= gsi_after_labels (top_bb
);
755 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
758 sincos_stats
.inserted
++;
760 /* And adjust the recorded old call sites. */
761 for (i
= 0; VEC_iterate(gimple
, stmts
, i
, use_stmt
); ++i
)
764 fndecl
= gimple_call_fndecl (use_stmt
);
766 switch (DECL_FUNCTION_CODE (fndecl
))
768 CASE_FLT_FN (BUILT_IN_COS
):
769 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
772 CASE_FLT_FN (BUILT_IN_SIN
):
773 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
776 CASE_FLT_FN (BUILT_IN_CEXPI
):
784 /* Replace call with a copy. */
785 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
787 gsi
= gsi_for_stmt (use_stmt
);
788 gsi_replace (&gsi
, stmt
, true);
789 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
793 VEC_free(gimple
, heap
, stmts
);
798 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
799 on the SSA_NAME argument of each of them. */
802 execute_cse_sincos (void)
805 bool cfg_changed
= false;
807 calculate_dominance_info (CDI_DOMINATORS
);
808 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
812 gimple_stmt_iterator gsi
;
814 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
816 gimple stmt
= gsi_stmt (gsi
);
819 if (is_gimple_call (stmt
)
820 && gimple_call_lhs (stmt
)
821 && (fndecl
= gimple_call_fndecl (stmt
))
822 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
826 switch (DECL_FUNCTION_CODE (fndecl
))
828 CASE_FLT_FN (BUILT_IN_COS
):
829 CASE_FLT_FN (BUILT_IN_SIN
):
830 CASE_FLT_FN (BUILT_IN_CEXPI
):
831 arg
= gimple_call_arg (stmt
, 0);
832 if (TREE_CODE (arg
) == SSA_NAME
)
833 cfg_changed
|= execute_cse_sincos_1 (arg
);
842 statistics_counter_event (cfun
, "sincos statements inserted",
843 sincos_stats
.inserted
);
845 free_dominance_info (CDI_DOMINATORS
);
846 return cfg_changed
? TODO_cleanup_cfg
: 0;
850 gate_cse_sincos (void)
852 /* Make sure we have either sincos or cexp. */
853 return (TARGET_HAS_SINCOS
854 || TARGET_C99_FUNCTIONS
)
858 struct gimple_opt_pass pass_cse_sincos
=
863 gate_cse_sincos
, /* gate */
864 execute_cse_sincos
, /* execute */
867 0, /* static_pass_number */
869 PROP_ssa
, /* properties_required */
870 0, /* properties_provided */
871 0, /* properties_destroyed */
872 0, /* todo_flags_start */
873 TODO_dump_func
| TODO_update_ssa
| TODO_verify_ssa
874 | TODO_verify_stmts
/* todo_flags_finish */
878 /* A symbolic number is used to detect byte permutation and selection
879 patterns. Therefore the field N contains an artificial number
880 consisting of byte size markers:
882 0 - byte has the value 0
883 1..size - byte contains the content of the byte
884 number indexed with that value minus one */
886 struct symbolic_number
{
887 unsigned HOST_WIDEST_INT n
;
891 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
892 number N. Return false if the requested operation is not permitted
893 on a symbolic number. */
896 do_shift_rotate (enum tree_code code
,
897 struct symbolic_number
*n
,
903 /* Zero out the extra bits of N in order to avoid them being shifted
904 into the significant bits. */
905 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
906 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << (n
->size
* BITS_PER_UNIT
)) - 1;
917 n
->n
= (n
->n
<< count
) | (n
->n
>> ((n
->size
* BITS_PER_UNIT
) - count
));
920 n
->n
= (n
->n
>> count
) | (n
->n
<< ((n
->size
* BITS_PER_UNIT
) - count
));
928 /* Perform sanity checking for the symbolic number N and the gimple
932 verify_symbolic_number_p (struct symbolic_number
*n
, gimple stmt
)
936 lhs_type
= gimple_expr_type (stmt
);
938 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
941 if (TYPE_PRECISION (lhs_type
) != n
->size
* BITS_PER_UNIT
)
947 /* find_bswap_1 invokes itself recursively with N and tries to perform
948 the operation given by the rhs of STMT on the result. If the
949 operation could successfully be executed the function returns the
950 tree expression of the source operand and NULL otherwise. */
953 find_bswap_1 (gimple stmt
, struct symbolic_number
*n
, int limit
)
956 tree rhs1
, rhs2
= NULL
;
957 gimple rhs1_stmt
, rhs2_stmt
;
959 enum gimple_rhs_class rhs_class
;
961 if (!limit
|| !is_gimple_assign (stmt
))
964 rhs1
= gimple_assign_rhs1 (stmt
);
966 if (TREE_CODE (rhs1
) != SSA_NAME
)
969 code
= gimple_assign_rhs_code (stmt
);
970 rhs_class
= gimple_assign_rhs_class (stmt
);
971 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
973 if (rhs_class
== GIMPLE_BINARY_RHS
)
974 rhs2
= gimple_assign_rhs2 (stmt
);
976 /* Handle unary rhs and binary rhs with integer constants as second
979 if (rhs_class
== GIMPLE_UNARY_RHS
980 || (rhs_class
== GIMPLE_BINARY_RHS
981 && TREE_CODE (rhs2
) == INTEGER_CST
))
983 if (code
!= BIT_AND_EXPR
984 && code
!= LSHIFT_EXPR
985 && code
!= RSHIFT_EXPR
986 && code
!= LROTATE_EXPR
987 && code
!= RROTATE_EXPR
989 && code
!= CONVERT_EXPR
)
992 source_expr1
= find_bswap_1 (rhs1_stmt
, n
, limit
- 1);
994 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
995 to initialize the symbolic number. */
998 /* Set up the symbolic number N by setting each byte to a
999 value between 1 and the byte size of rhs1. The highest
1000 order byte is set to n->size and the lowest order
1002 n
->size
= TYPE_PRECISION (TREE_TYPE (rhs1
));
1003 if (n
->size
% BITS_PER_UNIT
!= 0)
1005 n
->size
/= BITS_PER_UNIT
;
1006 n
->n
= (sizeof (HOST_WIDEST_INT
) < 8 ? 0 :
1007 (unsigned HOST_WIDEST_INT
)0x08070605 << 32 | 0x04030201);
1009 if (n
->size
< (int)sizeof (HOST_WIDEST_INT
))
1010 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 <<
1011 (n
->size
* BITS_PER_UNIT
)) - 1;
1013 source_expr1
= rhs1
;
1021 unsigned HOST_WIDEST_INT val
= widest_int_cst_value (rhs2
);
1022 unsigned HOST_WIDEST_INT tmp
= val
;
1024 /* Only constants masking full bytes are allowed. */
1025 for (i
= 0; i
< n
->size
; i
++, tmp
>>= BITS_PER_UNIT
)
1026 if ((tmp
& 0xff) != 0 && (tmp
& 0xff) != 0xff)
1036 if (!do_shift_rotate (code
, n
, (int)TREE_INT_CST_LOW (rhs2
)))
1043 type_size
= TYPE_PRECISION (gimple_expr_type (stmt
));
1044 if (type_size
% BITS_PER_UNIT
!= 0)
1047 if (type_size
/ BITS_PER_UNIT
< (int)(sizeof (HOST_WIDEST_INT
)))
1049 /* If STMT casts to a smaller type mask out the bits not
1050 belonging to the target type. */
1051 n
->n
&= ((unsigned HOST_WIDEST_INT
)1 << type_size
) - 1;
1053 n
->size
= type_size
/ BITS_PER_UNIT
;
1059 return verify_symbolic_number_p (n
, stmt
) ? source_expr1
: NULL
;
1062 /* Handle binary rhs. */
1064 if (rhs_class
== GIMPLE_BINARY_RHS
)
1066 struct symbolic_number n1
, n2
;
1069 if (code
!= BIT_IOR_EXPR
)
1072 if (TREE_CODE (rhs2
) != SSA_NAME
)
1075 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
1080 source_expr1
= find_bswap_1 (rhs1_stmt
, &n1
, limit
- 1);
1085 source_expr2
= find_bswap_1 (rhs2_stmt
, &n2
, limit
- 1);
1087 if (source_expr1
!= source_expr2
1088 || n1
.size
!= n2
.size
)
1094 if (!verify_symbolic_number_p (n
, stmt
))
1101 return source_expr1
;
1106 /* Check if STMT completes a bswap implementation consisting of ORs,
1107 SHIFTs and ANDs. Return the source tree expression on which the
1108 byte swap is performed and NULL if no bswap was found. */
1111 find_bswap (gimple stmt
)
1113 /* The number which the find_bswap result should match in order to
1114 have a full byte swap. The number is shifted to the left according
1115 to the size of the symbolic number before using it. */
1116 unsigned HOST_WIDEST_INT cmp
=
1117 sizeof (HOST_WIDEST_INT
) < 8 ? 0 :
1118 (unsigned HOST_WIDEST_INT
)0x01020304 << 32 | 0x05060708;
1120 struct symbolic_number n
;
1123 /* The last parameter determines the depth search limit. It usually
1124 correlates directly to the number of bytes to be touched. We
1125 increase that number by one here in order to also cover signed ->
1126 unsigned conversions of the src operand as can be seen in
1128 source_expr
= find_bswap_1 (stmt
, &n
,
1130 TYPE_SIZE_UNIT (gimple_expr_type (stmt
))) + 1);
1135 /* Zero out the extra bits of N and CMP. */
1136 if (n
.size
< (int)sizeof (HOST_WIDEST_INT
))
1138 unsigned HOST_WIDEST_INT mask
=
1139 ((unsigned HOST_WIDEST_INT
)1 << (n
.size
* BITS_PER_UNIT
)) - 1;
1142 cmp
>>= (sizeof (HOST_WIDEST_INT
) - n
.size
) * BITS_PER_UNIT
;
1145 /* A complete byte swap should make the symbolic number to start
1146 with the largest digit in the highest order byte. */
1153 /* Find manual byte swap implementations and turn them into a bswap
1154 builtin invokation. */
1157 execute_optimize_bswap (void)
1160 bool bswap32_p
, bswap64_p
;
1161 bool changed
= false;
1162 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1164 if (BITS_PER_UNIT
!= 8)
1167 if (sizeof (HOST_WIDEST_INT
) < 8)
1170 bswap32_p
= (built_in_decls
[BUILT_IN_BSWAP32
]
1171 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1172 bswap64_p
= (built_in_decls
[BUILT_IN_BSWAP64
]
1173 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1174 || (bswap32_p
&& word_mode
== SImode
)));
1176 if (!bswap32_p
&& !bswap64_p
)
1179 /* Determine the argument type of the builtins. The code later on
1180 assumes that the return and argument type are the same. */
1183 tree fndecl
= built_in_decls
[BUILT_IN_BSWAP32
];
1184 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1189 tree fndecl
= built_in_decls
[BUILT_IN_BSWAP64
];
1190 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1193 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1197 gimple_stmt_iterator gsi
;
1199 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1201 gimple stmt
= gsi_stmt (gsi
);
1202 tree bswap_src
, bswap_type
;
1204 tree fndecl
= NULL_TREE
;
1208 if (!is_gimple_assign (stmt
)
1209 || gimple_assign_rhs_code (stmt
) != BIT_IOR_EXPR
)
1212 type_size
= TYPE_PRECISION (gimple_expr_type (stmt
));
1219 fndecl
= built_in_decls
[BUILT_IN_BSWAP32
];
1220 bswap_type
= bswap32_type
;
1226 fndecl
= built_in_decls
[BUILT_IN_BSWAP64
];
1227 bswap_type
= bswap64_type
;
1237 bswap_src
= find_bswap (stmt
);
1243 if (type_size
== 32)
1244 bswap_stats
.found_32bit
++;
1246 bswap_stats
.found_64bit
++;
1248 bswap_tmp
= bswap_src
;
1250 /* Convert the src expression if necessary. */
1251 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp
), bswap_type
))
1253 gimple convert_stmt
;
1255 bswap_tmp
= create_tmp_var (bswap_type
, "bswapsrc");
1256 add_referenced_var (bswap_tmp
);
1257 bswap_tmp
= make_ssa_name (bswap_tmp
, NULL
);
1259 convert_stmt
= gimple_build_assign_with_ops (
1260 CONVERT_EXPR
, bswap_tmp
, bswap_src
, NULL
);
1261 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1264 call
= gimple_build_call (fndecl
, 1, bswap_tmp
);
1266 bswap_tmp
= gimple_assign_lhs (stmt
);
1268 /* Convert the result if necessary. */
1269 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp
), bswap_type
))
1271 gimple convert_stmt
;
1273 bswap_tmp
= create_tmp_var (bswap_type
, "bswapdst");
1274 add_referenced_var (bswap_tmp
);
1275 bswap_tmp
= make_ssa_name (bswap_tmp
, NULL
);
1276 convert_stmt
= gimple_build_assign_with_ops (
1277 CONVERT_EXPR
, gimple_assign_lhs (stmt
), bswap_tmp
, NULL
);
1278 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1281 gimple_call_set_lhs (call
, bswap_tmp
);
1285 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1287 print_gimple_stmt (dump_file
, stmt
, 0, 0);
1290 gsi_insert_after (&gsi
, call
, GSI_SAME_STMT
);
1291 gsi_remove (&gsi
, true);
1295 statistics_counter_event (cfun
, "32-bit bswap implementations found",
1296 bswap_stats
.found_32bit
);
1297 statistics_counter_event (cfun
, "64-bit bswap implementations found",
1298 bswap_stats
.found_64bit
);
1300 return (changed
? TODO_dump_func
| TODO_update_ssa
| TODO_verify_ssa
1301 | TODO_verify_stmts
: 0);
1305 gate_optimize_bswap (void)
1307 return flag_expensive_optimizations
&& optimize
;
1310 struct gimple_opt_pass pass_optimize_bswap
=
1315 gate_optimize_bswap
, /* gate */
1316 execute_optimize_bswap
, /* execute */
1319 0, /* static_pass_number */
1320 TV_NONE
, /* tv_id */
1321 PROP_ssa
, /* properties_required */
1322 0, /* properties_provided */
1323 0, /* properties_destroyed */
1324 0, /* todo_flags_start */
1325 0 /* todo_flags_finish */
1329 /* Return true if RHS is a suitable operand for a widening multiplication.
1330 There are two cases:
1332 - RHS makes some value twice as wide. Store that value in *NEW_RHS_OUT
1333 if so, and store its type in *TYPE_OUT.
1335 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
1336 but leave *TYPE_OUT untouched. */
1339 is_widening_mult_rhs_p (tree rhs
, tree
*type_out
, tree
*new_rhs_out
)
1342 tree type
, type1
, rhs1
;
1343 enum tree_code rhs_code
;
1345 if (TREE_CODE (rhs
) == SSA_NAME
)
1347 type
= TREE_TYPE (rhs
);
1348 stmt
= SSA_NAME_DEF_STMT (rhs
);
1349 if (!is_gimple_assign (stmt
))
1352 rhs_code
= gimple_assign_rhs_code (stmt
);
1353 if (TREE_CODE (type
) == INTEGER_TYPE
1354 ? !CONVERT_EXPR_CODE_P (rhs_code
)
1355 : rhs_code
!= FIXED_CONVERT_EXPR
)
1358 rhs1
= gimple_assign_rhs1 (stmt
);
1359 type1
= TREE_TYPE (rhs1
);
1360 if (TREE_CODE (type1
) != TREE_CODE (type
)
1361 || TYPE_PRECISION (type1
) * 2 != TYPE_PRECISION (type
))
1364 *new_rhs_out
= rhs1
;
1369 if (TREE_CODE (rhs
) == INTEGER_CST
)
1379 /* Return true if STMT performs a widening multiplication. If so,
1380 store the unwidened types of the operands in *TYPE1_OUT and *TYPE2_OUT
1381 respectively. Also fill *RHS1_OUT and *RHS2_OUT such that converting
1382 those operands to types *TYPE1_OUT and *TYPE2_OUT would give the
1383 operands of the multiplication. */
1386 is_widening_mult_p (gimple stmt
,
1387 tree
*type1_out
, tree
*rhs1_out
,
1388 tree
*type2_out
, tree
*rhs2_out
)
1392 type
= TREE_TYPE (gimple_assign_lhs (stmt
));
1393 if (TREE_CODE (type
) != INTEGER_TYPE
1394 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
1397 if (!is_widening_mult_rhs_p (gimple_assign_rhs1 (stmt
), type1_out
, rhs1_out
))
1400 if (!is_widening_mult_rhs_p (gimple_assign_rhs2 (stmt
), type2_out
, rhs2_out
))
1403 if (*type1_out
== NULL
)
1405 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
1407 *type1_out
= *type2_out
;
1410 if (*type2_out
== NULL
)
1412 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
1414 *type2_out
= *type1_out
;
1420 /* Process a single gimple statement STMT, which has a MULT_EXPR as
1421 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
1422 value is true iff we converted the statement. */
1425 convert_mult_to_widen (gimple stmt
)
1427 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
1428 enum insn_code handler
;
1430 lhs
= gimple_assign_lhs (stmt
);
1431 type
= TREE_TYPE (lhs
);
1432 if (TREE_CODE (type
) != INTEGER_TYPE
)
1435 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
1438 if (TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
))
1439 handler
= optab_handler (umul_widen_optab
, TYPE_MODE (type
));
1440 else if (!TYPE_UNSIGNED (type1
) && !TYPE_UNSIGNED (type2
))
1441 handler
= optab_handler (smul_widen_optab
, TYPE_MODE (type
));
1443 handler
= optab_handler (usmul_widen_optab
, TYPE_MODE (type
));
1445 if (handler
== CODE_FOR_nothing
)
1448 gimple_assign_set_rhs1 (stmt
, fold_convert (type1
, rhs1
));
1449 gimple_assign_set_rhs2 (stmt
, fold_convert (type2
, rhs2
));
1450 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
1452 widen_mul_stats
.widen_mults_inserted
++;
1456 /* Process a single gimple statement STMT, which is found at the
1457 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
1458 rhs (given by CODE), and try to convert it into a
1459 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
1460 is true iff we converted the statement. */
1463 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple stmt
,
1464 enum tree_code code
)
1466 gimple rhs1_stmt
= NULL
, rhs2_stmt
= NULL
;
1467 tree type
, type1
, type2
;
1468 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
1469 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
1471 enum tree_code wmult_code
;
1473 lhs
= gimple_assign_lhs (stmt
);
1474 type
= TREE_TYPE (lhs
);
1475 if (TREE_CODE (type
) != INTEGER_TYPE
1476 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
1479 if (code
== MINUS_EXPR
)
1480 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
1482 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
1484 rhs1
= gimple_assign_rhs1 (stmt
);
1485 rhs2
= gimple_assign_rhs2 (stmt
);
1487 if (TREE_CODE (rhs1
) == SSA_NAME
)
1489 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
1490 if (is_gimple_assign (rhs1_stmt
))
1491 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
1496 if (TREE_CODE (rhs2
) == SSA_NAME
)
1498 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
1499 if (is_gimple_assign (rhs2_stmt
))
1500 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
1505 if (code
== PLUS_EXPR
&& rhs1_code
== MULT_EXPR
)
1507 if (!is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
1508 &type2
, &mult_rhs2
))
1512 else if (rhs2_code
== MULT_EXPR
)
1514 if (!is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
1515 &type2
, &mult_rhs2
))
1519 else if (code
== PLUS_EXPR
&& rhs1_code
== WIDEN_MULT_EXPR
)
1521 mult_rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
1522 mult_rhs2
= gimple_assign_rhs2 (rhs1_stmt
);
1523 type1
= TREE_TYPE (mult_rhs1
);
1524 type2
= TREE_TYPE (mult_rhs2
);
1527 else if (rhs2_code
== WIDEN_MULT_EXPR
)
1529 mult_rhs1
= gimple_assign_rhs1 (rhs2_stmt
);
1530 mult_rhs2
= gimple_assign_rhs2 (rhs2_stmt
);
1531 type1
= TREE_TYPE (mult_rhs1
);
1532 type2
= TREE_TYPE (mult_rhs2
);
1538 if (TYPE_UNSIGNED (type1
) != TYPE_UNSIGNED (type2
))
1541 /* Verify that the machine can perform a widening multiply
1542 accumulate in this mode/signedness combination, otherwise
1543 this transformation is likely to pessimize code. */
1544 this_optab
= optab_for_tree_code (wmult_code
, type1
, optab_default
);
1545 if (optab_handler (this_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
1548 /* ??? May need some type verification here? */
1550 gimple_assign_set_rhs_with_ops_1 (gsi
, wmult_code
,
1551 fold_convert (type1
, mult_rhs1
),
1552 fold_convert (type2
, mult_rhs2
),
1554 update_stmt (gsi_stmt (*gsi
));
1555 widen_mul_stats
.maccs_inserted
++;
1559 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
1560 with uses in additions and subtractions to form fused multiply-add
1561 operations. Returns true if successful and MUL_STMT should be removed. */
1564 convert_mult_to_fma (gimple mul_stmt
, tree op1
, tree op2
)
1566 tree mul_result
= gimple_get_lhs (mul_stmt
);
1567 tree type
= TREE_TYPE (mul_result
);
1568 gimple use_stmt
, neguse_stmt
, fma_stmt
;
1569 use_operand_p use_p
;
1570 imm_use_iterator imm_iter
;
1572 if (FLOAT_TYPE_P (type
)
1573 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
1576 /* We don't want to do bitfield reduction ops. */
1577 if (INTEGRAL_TYPE_P (type
)
1578 && (TYPE_PRECISION (type
)
1579 != GET_MODE_PRECISION (TYPE_MODE (type
))))
1582 /* If the target doesn't support it, don't generate it. We assume that
1583 if fma isn't available then fms, fnma or fnms are not either. */
1584 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
1587 /* Make sure that the multiplication statement becomes dead after
1588 the transformation, thus that all uses are transformed to FMAs.
1589 This means we assume that an FMA operation has the same cost
1591 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
1593 enum tree_code use_code
;
1594 tree result
= mul_result
;
1595 bool negate_p
= false;
1597 use_stmt
= USE_STMT (use_p
);
1599 if (is_gimple_debug (use_stmt
))
1602 /* For now restrict this operations to single basic blocks. In theory
1603 we would want to support sinking the multiplication in
1609 to form a fma in the then block and sink the multiplication to the
1611 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
1614 if (!is_gimple_assign (use_stmt
))
1617 use_code
= gimple_assign_rhs_code (use_stmt
);
1619 /* A negate on the multiplication leads to FNMA. */
1620 if (use_code
== NEGATE_EXPR
)
1625 result
= gimple_assign_lhs (use_stmt
);
1627 /* Make sure the negate statement becomes dead with this
1628 single transformation. */
1629 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
1630 &use_p
, &neguse_stmt
))
1633 /* Make sure the multiplication isn't also used on that stmt. */
1634 FOR_EACH_SSA_TREE_OPERAND (use
, neguse_stmt
, iter
, SSA_OP_USE
)
1635 if (use
== mul_result
)
1639 use_stmt
= neguse_stmt
;
1640 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
1642 if (!is_gimple_assign (use_stmt
))
1645 use_code
= gimple_assign_rhs_code (use_stmt
);
1652 if (gimple_assign_rhs2 (use_stmt
) == result
)
1653 negate_p
= !negate_p
;
1658 /* FMA can only be formed from PLUS and MINUS. */
1662 /* We can't handle a * b + a * b. */
1663 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
1666 /* While it is possible to validate whether or not the exact form
1667 that we've recognized is available in the backend, the assumption
1668 is that the transformation is never a loss. For instance, suppose
1669 the target only has the plain FMA pattern available. Consider
1670 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
1671 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
1672 still have 3 operations, but in the FMA form the two NEGs are
1673 independant and could be run in parallel. */
1676 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
1678 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
1679 enum tree_code use_code
;
1680 tree addop
, mulop1
= op1
, result
= mul_result
;
1681 bool negate_p
= false;
1683 if (is_gimple_debug (use_stmt
))
1686 use_code
= gimple_assign_rhs_code (use_stmt
);
1687 if (use_code
== NEGATE_EXPR
)
1689 result
= gimple_assign_lhs (use_stmt
);
1690 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
1691 gsi_remove (&gsi
, true);
1692 release_defs (use_stmt
);
1694 use_stmt
= neguse_stmt
;
1695 gsi
= gsi_for_stmt (use_stmt
);
1696 use_code
= gimple_assign_rhs_code (use_stmt
);
1700 if (gimple_assign_rhs1 (use_stmt
) == result
)
1702 addop
= gimple_assign_rhs2 (use_stmt
);
1703 /* a * b - c -> a * b + (-c) */
1704 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
1705 addop
= force_gimple_operand_gsi (&gsi
,
1706 build1 (NEGATE_EXPR
,
1708 true, NULL_TREE
, true,
1713 addop
= gimple_assign_rhs1 (use_stmt
);
1714 /* a - b * c -> (-b) * c + a */
1715 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
1716 negate_p
= !negate_p
;
1720 mulop1
= force_gimple_operand_gsi (&gsi
,
1721 build1 (NEGATE_EXPR
,
1723 true, NULL_TREE
, true,
1726 fma_stmt
= gimple_build_assign_with_ops3 (FMA_EXPR
,
1727 gimple_assign_lhs (use_stmt
),
1730 gsi_replace (&gsi
, fma_stmt
, true);
1731 widen_mul_stats
.fmas_inserted
++;
1737 /* Find integer multiplications where the operands are extended from
1738 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
1739 where appropriate. */
1742 execute_optimize_widening_mul (void)
1745 bool cfg_changed
= false;
1747 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
1751 gimple_stmt_iterator gsi
;
1753 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
1755 gimple stmt
= gsi_stmt (gsi
);
1756 enum tree_code code
;
1758 if (is_gimple_assign (stmt
))
1760 code
= gimple_assign_rhs_code (stmt
);
1764 if (!convert_mult_to_widen (stmt
)
1765 && convert_mult_to_fma (stmt
,
1766 gimple_assign_rhs1 (stmt
),
1767 gimple_assign_rhs2 (stmt
)))
1769 gsi_remove (&gsi
, true);
1770 release_defs (stmt
);
1777 convert_plusminus_to_widen (&gsi
, stmt
, code
);
1783 else if (is_gimple_call (stmt
)
1784 && gimple_call_lhs (stmt
))
1786 tree fndecl
= gimple_call_fndecl (stmt
);
1788 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1790 switch (DECL_FUNCTION_CODE (fndecl
))
1795 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
1796 && REAL_VALUES_EQUAL
1797 (TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
1799 && convert_mult_to_fma (stmt
,
1800 gimple_call_arg (stmt
, 0),
1801 gimple_call_arg (stmt
, 0)))
1803 unlink_stmt_vdef (stmt
);
1804 gsi_remove (&gsi
, true);
1805 release_defs (stmt
);
1806 if (gimple_purge_dead_eh_edges (bb
))
1820 statistics_counter_event (cfun
, "widening multiplications inserted",
1821 widen_mul_stats
.widen_mults_inserted
);
1822 statistics_counter_event (cfun
, "widening maccs inserted",
1823 widen_mul_stats
.maccs_inserted
);
1824 statistics_counter_event (cfun
, "fused multiply-adds inserted",
1825 widen_mul_stats
.fmas_inserted
);
1827 return cfg_changed
? TODO_cleanup_cfg
: 0;
1831 gate_optimize_widening_mul (void)
1833 return flag_expensive_optimizations
&& optimize
;
1836 struct gimple_opt_pass pass_optimize_widening_mul
=
1840 "widening_mul", /* name */
1841 gate_optimize_widening_mul
, /* gate */
1842 execute_optimize_widening_mul
, /* execute */
1845 0, /* static_pass_number */
1846 TV_NONE
, /* tv_id */
1847 PROP_ssa
, /* properties_required */
1848 0, /* properties_provided */
1849 0, /* properties_destroyed */
1850 0, /* todo_flags_start */
1854 | TODO_update_ssa
/* todo_flags_finish */