1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
93 #include "basic-block.h"
94 #include "tree-ssa-alias.h"
95 #include "internal-fn.h"
96 #include "gimple-fold.h"
97 #include "gimple-expr.h"
100 #include "gimple-iterator.h"
101 #include "gimplify.h"
102 #include "gimplify-me.h"
103 #include "stor-layout.h"
104 #include "gimple-ssa.h"
105 #include "tree-cfg.h"
106 #include "tree-phinodes.h"
107 #include "ssa-iterators.h"
108 #include "stringpool.h"
109 #include "tree-ssanames.h"
111 #include "tree-dfa.h"
112 #include "tree-ssa.h"
113 #include "tree-pass.h"
114 #include "alloc-pool.h"
116 #include "gimple-pretty-print.h"
117 #include "builtins.h"
119 /* FIXME: RTL headers have to be included here for optabs. */
120 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
121 #include "expr.h" /* Because optabs.h wants sepops. */
124 /* This structure represents one basic block that either computes a
125 division, or is a common dominator for basic block that compute a
128 /* The basic block represented by this structure. */
131 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
136 was inserted in BB. */
137 gimple recip_def_stmt
;
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
141 struct occurrence
*children
;
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence
*next
;
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division
;
160 /* Number of 1.0/X ops inserted. */
163 /* Number of 1.0/FUNC ops inserted. */
169 /* Number of cexpi calls inserted. */
175 /* Number of hand-written 16-bit nop / bswaps found. */
178 /* Number of hand-written 32-bit nop / bswaps found. */
181 /* Number of hand-written 64-bit nop / bswaps found. */
183 } nop_stats
, bswap_stats
;
187 /* Number of widening multiplication ops inserted. */
188 int widen_mults_inserted
;
190 /* Number of integer multiply-and-accumulate ops inserted. */
193 /* Number of fp fused multiply-add ops inserted. */
197 /* The instance of "struct occurrence" representing the highest
198 interesting block in the dominator tree. */
199 static struct occurrence
*occ_head
;
201 /* Allocation pool for getting instances of "struct occurrence". */
202 static alloc_pool occ_pool
;
206 /* Allocate and return a new struct occurrence for basic block BB, and
207 whose children list is headed by CHILDREN. */
208 static struct occurrence
*
209 occ_new (basic_block bb
, struct occurrence
*children
)
211 struct occurrence
*occ
;
213 bb
->aux
= occ
= (struct occurrence
*) pool_alloc (occ_pool
);
214 memset (occ
, 0, sizeof (struct occurrence
));
217 occ
->children
= children
;
222 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
223 list of "struct occurrence"s, one per basic block, having IDOM as
224 their common dominator.
226 We try to insert NEW_OCC as deep as possible in the tree, and we also
227 insert any other block that is a common dominator for BB and one
228 block already in the tree. */
231 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
232 struct occurrence
**p_head
)
234 struct occurrence
*occ
, **p_occ
;
236 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
238 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
239 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
242 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
245 occ
->next
= new_occ
->children
;
246 new_occ
->children
= occ
;
248 /* Try the next block (it may as well be dominated by BB). */
251 else if (dom
== occ_bb
)
253 /* OCC_BB dominates BB. Tail recurse to look deeper. */
254 insert_bb (new_occ
, dom
, &occ
->children
);
258 else if (dom
!= idom
)
260 gcc_assert (!dom
->aux
);
262 /* There is a dominator between IDOM and BB, add it and make
263 two children out of NEW_OCC and OCC. First, remove OCC from
269 /* None of the previous blocks has DOM as a dominator: if we tail
270 recursed, we would reexamine them uselessly. Just switch BB with
271 DOM, and go on looking for blocks dominated by DOM. */
272 new_occ
= occ_new (dom
, new_occ
);
277 /* Nothing special, go on with the next element. */
282 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
283 new_occ
->next
= *p_head
;
287 /* Register that we found a division in BB. */
290 register_division_in (basic_block bb
)
292 struct occurrence
*occ
;
294 occ
= (struct occurrence
*) bb
->aux
;
297 occ
= occ_new (bb
, NULL
);
298 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
301 occ
->bb_has_division
= true;
302 occ
->num_divisions
++;
306 /* Compute the number of divisions that postdominate each block in OCC and
310 compute_merit (struct occurrence
*occ
)
312 struct occurrence
*occ_child
;
313 basic_block dom
= occ
->bb
;
315 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
318 if (occ_child
->children
)
319 compute_merit (occ_child
);
322 bb
= single_noncomplex_succ (dom
);
326 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
327 occ
->num_divisions
+= occ_child
->num_divisions
;
332 /* Return whether USE_STMT is a floating-point division by DEF. */
334 is_division_by (gimple use_stmt
, tree def
)
336 return is_gimple_assign (use_stmt
)
337 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
338 && gimple_assign_rhs2 (use_stmt
) == def
339 /* Do not recognize x / x as valid division, as we are getting
340 confused later by replacing all immediate uses x in such
342 && gimple_assign_rhs1 (use_stmt
) != def
;
345 /* Walk the subset of the dominator tree rooted at OCC, setting the
346 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
347 the given basic block. The field may be left NULL, of course,
348 if it is not possible or profitable to do the optimization.
350 DEF_BSI is an iterator pointing at the statement defining DEF.
351 If RECIP_DEF is set, a dominator already has a computation that can
355 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
356 tree def
, tree recip_def
, int threshold
)
359 gimple_assign new_stmt
;
360 gimple_stmt_iterator gsi
;
361 struct occurrence
*occ_child
;
364 && (occ
->bb_has_division
|| !flag_trapping_math
)
365 && occ
->num_divisions
>= threshold
)
367 /* Make a variable with the replacement and substitute it. */
368 type
= TREE_TYPE (def
);
369 recip_def
= create_tmp_reg (type
, "reciptmp");
370 new_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, recip_def
,
371 build_one_cst (type
), def
);
373 if (occ
->bb_has_division
)
375 /* Case 1: insert before an existing division. */
376 gsi
= gsi_after_labels (occ
->bb
);
377 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
380 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
382 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
384 /* Case 2: insert right after the definition. Note that this will
385 never happen if the definition statement can throw, because in
386 that case the sole successor of the statement's basic block will
387 dominate all the uses as well. */
388 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
392 /* Case 3: insert in a basic block not containing defs/uses. */
393 gsi
= gsi_after_labels (occ
->bb
);
394 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
397 reciprocal_stats
.rdivs_inserted
++;
399 occ
->recip_def_stmt
= new_stmt
;
402 occ
->recip_def
= recip_def
;
403 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
404 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
408 /* Replace the division at USE_P with a multiplication by the reciprocal, if
412 replace_reciprocal (use_operand_p use_p
)
414 gimple use_stmt
= USE_STMT (use_p
);
415 basic_block bb
= gimple_bb (use_stmt
);
416 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
418 if (optimize_bb_for_speed_p (bb
)
419 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
421 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
422 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
423 SET_USE (use_p
, occ
->recip_def
);
424 fold_stmt_inplace (&gsi
);
425 update_stmt (use_stmt
);
430 /* Free OCC and return one more "struct occurrence" to be freed. */
432 static struct occurrence
*
433 free_bb (struct occurrence
*occ
)
435 struct occurrence
*child
, *next
;
437 /* First get the two pointers hanging off OCC. */
439 child
= occ
->children
;
441 pool_free (occ_pool
, occ
);
443 /* Now ensure that we don't recurse unless it is necessary. */
449 next
= free_bb (next
);
456 /* Look for floating-point divisions among DEF's uses, and try to
457 replace them by multiplications with the reciprocal. Add
458 as many statements computing the reciprocal as needed.
460 DEF must be a GIMPLE register of a floating-point type. */
463 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
466 imm_use_iterator use_iter
;
467 struct occurrence
*occ
;
468 int count
= 0, threshold
;
470 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
472 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
474 gimple use_stmt
= USE_STMT (use_p
);
475 if (is_division_by (use_stmt
, def
))
477 register_division_in (gimple_bb (use_stmt
));
482 /* Do the expensive part only if we can hope to optimize something. */
483 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
484 if (count
>= threshold
)
487 for (occ
= occ_head
; occ
; occ
= occ
->next
)
490 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
493 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
495 if (is_division_by (use_stmt
, def
))
497 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
498 replace_reciprocal (use_p
);
503 for (occ
= occ_head
; occ
; )
509 /* Go through all the floating-point SSA_NAMEs, and call
510 execute_cse_reciprocals_1 on each of them. */
513 const pass_data pass_data_cse_reciprocals
=
515 GIMPLE_PASS
, /* type */
517 OPTGROUP_NONE
, /* optinfo_flags */
519 PROP_ssa
, /* properties_required */
520 0, /* properties_provided */
521 0, /* properties_destroyed */
522 0, /* todo_flags_start */
523 TODO_update_ssa
, /* todo_flags_finish */
526 class pass_cse_reciprocals
: public gimple_opt_pass
529 pass_cse_reciprocals (gcc::context
*ctxt
)
530 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
533 /* opt_pass methods: */
534 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
535 virtual unsigned int execute (function
*);
537 }; // class pass_cse_reciprocals
540 pass_cse_reciprocals::execute (function
*fun
)
545 occ_pool
= create_alloc_pool ("dominators for recip",
546 sizeof (struct occurrence
),
547 n_basic_blocks_for_fn (fun
) / 3 + 1);
549 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
550 calculate_dominance_info (CDI_DOMINATORS
);
551 calculate_dominance_info (CDI_POST_DOMINATORS
);
553 #ifdef ENABLE_CHECKING
554 FOR_EACH_BB_FN (bb
, fun
)
555 gcc_assert (!bb
->aux
);
558 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
559 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
560 && is_gimple_reg (arg
))
562 tree name
= ssa_default_def (fun
, arg
);
564 execute_cse_reciprocals_1 (NULL
, name
);
567 FOR_EACH_BB_FN (bb
, fun
)
569 gimple_stmt_iterator gsi
;
573 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
575 phi
= gsi_stmt (gsi
);
576 def
= PHI_RESULT (phi
);
577 if (! virtual_operand_p (def
)
578 && FLOAT_TYPE_P (TREE_TYPE (def
)))
579 execute_cse_reciprocals_1 (NULL
, def
);
582 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
584 gimple stmt
= gsi_stmt (gsi
);
586 if (gimple_has_lhs (stmt
)
587 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
588 && FLOAT_TYPE_P (TREE_TYPE (def
))
589 && TREE_CODE (def
) == SSA_NAME
)
590 execute_cse_reciprocals_1 (&gsi
, def
);
593 if (optimize_bb_for_size_p (bb
))
596 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
597 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
599 gimple stmt
= gsi_stmt (gsi
);
602 if (is_gimple_assign (stmt
)
603 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
605 tree arg1
= gimple_assign_rhs2 (stmt
);
608 if (TREE_CODE (arg1
) != SSA_NAME
)
611 stmt1
= SSA_NAME_DEF_STMT (arg1
);
613 if (is_gimple_call (stmt1
)
614 && gimple_call_lhs (stmt1
)
615 && (fndecl
= gimple_call_fndecl (stmt1
))
616 && (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
617 || DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
))
619 enum built_in_function code
;
624 code
= DECL_FUNCTION_CODE (fndecl
);
625 md_code
= DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
;
627 fndecl
= targetm
.builtin_reciprocal (code
, md_code
, false);
631 /* Check that all uses of the SSA name are divisions,
632 otherwise replacing the defining statement will do
635 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
637 gimple stmt2
= USE_STMT (use_p
);
638 if (is_gimple_debug (stmt2
))
640 if (!is_gimple_assign (stmt2
)
641 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
642 || gimple_assign_rhs1 (stmt2
) == arg1
643 || gimple_assign_rhs2 (stmt2
) != arg1
)
652 gimple_replace_ssa_lhs (stmt1
, arg1
);
653 gimple_call_set_fndecl (stmt1
, fndecl
);
655 reciprocal_stats
.rfuncs_inserted
++;
657 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
659 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
660 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
661 fold_stmt_inplace (&gsi
);
669 statistics_counter_event (fun
, "reciprocal divs inserted",
670 reciprocal_stats
.rdivs_inserted
);
671 statistics_counter_event (fun
, "reciprocal functions inserted",
672 reciprocal_stats
.rfuncs_inserted
);
674 free_dominance_info (CDI_DOMINATORS
);
675 free_dominance_info (CDI_POST_DOMINATORS
);
676 free_alloc_pool (occ_pool
);
683 make_pass_cse_reciprocals (gcc::context
*ctxt
)
685 return new pass_cse_reciprocals (ctxt
);
688 /* Records an occurrence at statement USE_STMT in the vector of trees
689 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
690 is not yet initialized. Returns true if the occurrence was pushed on
691 the vector. Adjusts *TOP_BB to be the basic block dominating all
692 statements in the vector. */
695 maybe_record_sincos (vec
<gimple
> *stmts
,
696 basic_block
*top_bb
, gimple use_stmt
)
698 basic_block use_bb
= gimple_bb (use_stmt
);
700 && (*top_bb
== use_bb
701 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
702 stmts
->safe_push (use_stmt
);
704 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
706 stmts
->safe_push (use_stmt
);
715 /* Look for sin, cos and cexpi calls with the same argument NAME and
716 create a single call to cexpi CSEing the result in this case.
717 We first walk over all immediate uses of the argument collecting
718 statements that we can CSE in a vector and in a second pass replace
719 the statement rhs with a REALPART or IMAGPART expression on the
720 result of the cexpi call we insert before the use statement that
721 dominates all other candidates. */
724 execute_cse_sincos_1 (tree name
)
726 gimple_stmt_iterator gsi
;
727 imm_use_iterator use_iter
;
728 tree fndecl
, res
, type
;
729 gimple def_stmt
, use_stmt
, stmt
;
730 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
731 vec
<gimple
> stmts
= vNULL
;
732 basic_block top_bb
= NULL
;
734 bool cfg_changed
= false;
736 type
= TREE_TYPE (name
);
737 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
739 if (gimple_code (use_stmt
) != GIMPLE_CALL
740 || !gimple_call_lhs (use_stmt
)
741 || !(fndecl
= gimple_call_fndecl (use_stmt
))
742 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
745 switch (DECL_FUNCTION_CODE (fndecl
))
747 CASE_FLT_FN (BUILT_IN_COS
):
748 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
751 CASE_FLT_FN (BUILT_IN_SIN
):
752 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
755 CASE_FLT_FN (BUILT_IN_CEXPI
):
756 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
763 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
769 /* Simply insert cexpi at the beginning of top_bb but not earlier than
770 the name def statement. */
771 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
774 stmt
= gimple_build_call (fndecl
, 1, name
);
775 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
776 gimple_call_set_lhs (stmt
, res
);
778 def_stmt
= SSA_NAME_DEF_STMT (name
);
779 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
780 && gimple_code (def_stmt
) != GIMPLE_PHI
781 && gimple_bb (def_stmt
) == top_bb
)
783 gsi
= gsi_for_stmt (def_stmt
);
784 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
788 gsi
= gsi_after_labels (top_bb
);
789 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
791 sincos_stats
.inserted
++;
793 /* And adjust the recorded old call sites. */
794 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
797 fndecl
= gimple_call_fndecl (use_stmt
);
799 switch (DECL_FUNCTION_CODE (fndecl
))
801 CASE_FLT_FN (BUILT_IN_COS
):
802 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
805 CASE_FLT_FN (BUILT_IN_SIN
):
806 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
809 CASE_FLT_FN (BUILT_IN_CEXPI
):
817 /* Replace call with a copy. */
818 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
820 gsi
= gsi_for_stmt (use_stmt
);
821 gsi_replace (&gsi
, stmt
, true);
822 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
831 /* To evaluate powi(x,n), the floating point value x raised to the
832 constant integer exponent n, we use a hybrid algorithm that
833 combines the "window method" with look-up tables. For an
834 introduction to exponentiation algorithms and "addition chains",
835 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
836 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
837 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
838 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
840 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
841 multiplications to inline before calling the system library's pow
842 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
843 so this default never requires calling pow, powf or powl. */
845 #ifndef POWI_MAX_MULTS
846 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
849 /* The size of the "optimal power tree" lookup table. All
850 exponents less than this value are simply looked up in the
851 powi_table below. This threshold is also used to size the
852 cache of pseudo registers that hold intermediate results. */
853 #define POWI_TABLE_SIZE 256
855 /* The size, in bits of the window, used in the "window method"
856 exponentiation algorithm. This is equivalent to a radix of
857 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
858 #define POWI_WINDOW_SIZE 3
860 /* The following table is an efficient representation of an
861 "optimal power tree". For each value, i, the corresponding
862 value, j, in the table states than an optimal evaluation
863 sequence for calculating pow(x,i) can be found by evaluating
864 pow(x,j)*pow(x,i-j). An optimal power tree for the first
865 100 integers is given in Knuth's "Seminumerical algorithms". */
867 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
869 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
870 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
871 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
872 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
873 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
874 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
875 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
876 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
877 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
878 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
879 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
880 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
881 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
882 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
883 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
884 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
885 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
886 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
887 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
888 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
889 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
890 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
891 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
892 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
893 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
894 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
895 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
896 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
897 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
898 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
899 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
900 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
904 /* Return the number of multiplications required to calculate
905 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
906 subroutine of powi_cost. CACHE is an array indicating
907 which exponents have already been calculated. */
910 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
912 /* If we've already calculated this exponent, then this evaluation
913 doesn't require any additional multiplications. */
918 return powi_lookup_cost (n
- powi_table
[n
], cache
)
919 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
922 /* Return the number of multiplications required to calculate
923 powi(x,n) for an arbitrary x, given the exponent N. This
924 function needs to be kept in sync with powi_as_mults below. */
927 powi_cost (HOST_WIDE_INT n
)
929 bool cache
[POWI_TABLE_SIZE
];
930 unsigned HOST_WIDE_INT digit
;
931 unsigned HOST_WIDE_INT val
;
937 /* Ignore the reciprocal when calculating the cost. */
938 val
= (n
< 0) ? -n
: n
;
940 /* Initialize the exponent cache. */
941 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
946 while (val
>= POWI_TABLE_SIZE
)
950 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
951 result
+= powi_lookup_cost (digit
, cache
)
952 + POWI_WINDOW_SIZE
+ 1;
953 val
>>= POWI_WINDOW_SIZE
;
962 return result
+ powi_lookup_cost (val
, cache
);
965 /* Recursive subroutine of powi_as_mults. This function takes the
966 array, CACHE, of already calculated exponents and an exponent N and
967 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
970 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
971 HOST_WIDE_INT n
, tree
*cache
)
973 tree op0
, op1
, ssa_target
;
974 unsigned HOST_WIDE_INT digit
;
975 gimple_assign mult_stmt
;
977 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
980 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
982 if (n
< POWI_TABLE_SIZE
)
984 cache
[n
] = ssa_target
;
985 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
986 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
990 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
991 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
992 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
996 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1000 mult_stmt
= gimple_build_assign_with_ops (MULT_EXPR
, ssa_target
, op0
, op1
);
1001 gimple_set_location (mult_stmt
, loc
);
1002 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1007 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1008 This function needs to be kept in sync with powi_cost above. */
1011 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1012 tree arg0
, HOST_WIDE_INT n
)
1014 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1015 gimple_assign div_stmt
;
1019 return build_real (type
, dconst1
);
1021 memset (cache
, 0, sizeof (cache
));
1024 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1028 /* If the original exponent was negative, reciprocate the result. */
1029 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1030 div_stmt
= gimple_build_assign_with_ops (RDIV_EXPR
, target
,
1031 build_real (type
, dconst1
),
1033 gimple_set_location (div_stmt
, loc
);
1034 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1039 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1040 location info LOC. If the arguments are appropriate, create an
1041 equivalent sequence of statements prior to GSI using an optimal
1042 number of multiplications, and return an expession holding the
1046 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1047 tree arg0
, HOST_WIDE_INT n
)
1049 /* Avoid largest negative number. */
1051 && ((n
>= -1 && n
<= 2)
1052 || (optimize_function_for_speed_p (cfun
)
1053 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1054 return powi_as_mults (gsi
, loc
, arg0
, n
);
1059 /* Build a gimple call statement that calls FN with argument ARG.
1060 Set the lhs of the call statement to a fresh SSA name. Insert the
1061 statement prior to GSI's current position, and return the fresh
1065 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1068 gimple_call call_stmt
;
1071 call_stmt
= gimple_build_call (fn
, 1, arg
);
1072 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1073 gimple_set_lhs (call_stmt
, ssa_target
);
1074 gimple_set_location (call_stmt
, loc
);
1075 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1080 /* Build a gimple binary operation with the given CODE and arguments
1081 ARG0, ARG1, assigning the result to a new SSA name for variable
1082 TARGET. Insert the statement prior to GSI's current position, and
1083 return the fresh SSA name.*/
1086 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1087 const char *name
, enum tree_code code
,
1088 tree arg0
, tree arg1
)
1090 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1091 gimple_assign stmt
= gimple_build_assign_with_ops (code
, result
, arg0
, arg1
);
1092 gimple_set_location (stmt
, loc
);
1093 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1097 /* Build a gimple reference operation with the given CODE and argument
1098 ARG, assigning the result to a new SSA name of TYPE with NAME.
1099 Insert the statement prior to GSI's current position, and return
1100 the fresh SSA name. */
1103 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1104 const char *name
, enum tree_code code
, tree arg0
)
1106 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1107 gimple stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1108 gimple_set_location (stmt
, loc
);
1109 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1113 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1114 prior to GSI's current position, and return the fresh SSA name. */
1117 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1118 tree type
, tree val
)
1120 tree result
= make_ssa_name (type
, NULL
);
1121 gimple_assign stmt
=
1122 gimple_build_assign_with_ops (NOP_EXPR
, result
, val
, NULL_TREE
);
1123 gimple_set_location (stmt
, loc
);
1124 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1128 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1129 with location info LOC. If possible, create an equivalent and
1130 less expensive sequence of statements prior to GSI, and return an
1131 expession holding the result. */
1134 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1135 tree arg0
, tree arg1
)
1137 REAL_VALUE_TYPE c
, cint
, dconst1_4
, dconst3_4
, dconst1_3
, dconst1_6
;
1138 REAL_VALUE_TYPE c2
, dconst3
;
1140 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, sqrt_sqrt
, result
, cbrt_x
, powi_cbrt_x
;
1141 enum machine_mode mode
;
1142 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1144 /* If the exponent isn't a constant, there's nothing of interest
1146 if (TREE_CODE (arg1
) != REAL_CST
)
1149 /* If the exponent is equivalent to an integer, expand to an optimal
1150 multiplication sequence when profitable. */
1151 c
= TREE_REAL_CST (arg1
);
1152 n
= real_to_integer (&c
);
1153 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1154 c_is_int
= real_identical (&c
, &cint
);
1157 && ((n
>= -1 && n
<= 2)
1158 || (flag_unsafe_math_optimizations
1159 && optimize_bb_for_speed_p (gsi_bb (*gsi
))
1160 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1161 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1163 /* Attempt various optimizations using sqrt and cbrt. */
1164 type
= TREE_TYPE (arg0
);
1165 mode
= TYPE_MODE (type
);
1166 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1168 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1169 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1172 && REAL_VALUES_EQUAL (c
, dconsthalf
)
1173 && !HONOR_SIGNED_ZEROS (mode
))
1174 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1176 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1177 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1178 so do this optimization even if -Os. Don't do this optimization
1179 if we don't have a hardware sqrt insn. */
1180 dconst1_4
= dconst1
;
1181 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1182 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1184 if (flag_unsafe_math_optimizations
1186 && REAL_VALUES_EQUAL (c
, dconst1_4
)
1190 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1193 return build_and_insert_call (gsi
, loc
, sqrtfn
, sqrt_arg0
);
1196 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1197 optimizing for space. Don't do this optimization if we don't have
1198 a hardware sqrt insn. */
1199 real_from_integer (&dconst3_4
, VOIDmode
, 3, SIGNED
);
1200 SET_REAL_EXP (&dconst3_4
, REAL_EXP (&dconst3_4
) - 2);
1202 if (flag_unsafe_math_optimizations
1204 && optimize_function_for_speed_p (cfun
)
1205 && REAL_VALUES_EQUAL (c
, dconst3_4
)
1209 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1212 sqrt_sqrt
= build_and_insert_call (gsi
, loc
, sqrtfn
, sqrt_arg0
);
1214 /* sqrt(x) * sqrt(sqrt(x)) */
1215 return build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1216 sqrt_arg0
, sqrt_sqrt
);
1219 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1220 optimizations since 1./3. is not exactly representable. If x
1221 is negative and finite, the correct value of pow(x,1./3.) is
1222 a NaN with the "invalid" exception raised, because the value
1223 of 1./3. actually has an even denominator. The correct value
1224 of cbrt(x) is a negative real value. */
1225 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1226 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1228 if (flag_unsafe_math_optimizations
1230 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1231 && REAL_VALUES_EQUAL (c
, dconst1_3
))
1232 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1234 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1235 if we don't have a hardware sqrt insn. */
1236 dconst1_6
= dconst1_3
;
1237 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1239 if (flag_unsafe_math_optimizations
1242 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1243 && optimize_function_for_speed_p (cfun
)
1245 && REAL_VALUES_EQUAL (c
, dconst1_6
))
1248 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1251 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1254 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1255 and c not an integer, into
1257 sqrt(x) * powi(x, n/2), n > 0;
1258 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1260 Do not calculate the powi factor when n/2 = 0. */
1261 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1262 n
= real_to_integer (&c2
);
1263 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1264 c2_is_int
= real_identical (&c2
, &cint
);
1266 if (flag_unsafe_math_optimizations
1270 && optimize_function_for_speed_p (cfun
))
1272 tree powi_x_ndiv2
= NULL_TREE
;
1274 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1275 possible or profitable, give up. Skip the degenerate case when
1276 n is 1 or -1, where the result is always 1. */
1277 if (absu_hwi (n
) != 1)
1279 powi_x_ndiv2
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1285 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1286 result of the optimal multiply sequence just calculated. */
1287 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1289 if (absu_hwi (n
) == 1)
1292 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1293 sqrt_arg0
, powi_x_ndiv2
);
1295 /* If n is negative, reciprocate the result. */
1297 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1298 build_real (type
, dconst1
), result
);
1302 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1304 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1305 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1307 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1308 different from pow(x, 1./3.) due to rounding and behavior with
1309 negative x, we need to constrain this transformation to unsafe
1310 math and positive x or finite math. */
1311 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
1312 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1313 real_round (&c2
, mode
, &c2
);
1314 n
= real_to_integer (&c2
);
1315 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1316 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1317 real_convert (&c2
, mode
, &c2
);
1319 if (flag_unsafe_math_optimizations
1321 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1322 && real_identical (&c2
, &c
)
1324 && optimize_function_for_speed_p (cfun
)
1325 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1327 tree powi_x_ndiv3
= NULL_TREE
;
1329 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1330 possible or profitable, give up. Skip the degenerate case when
1331 abs(n) < 3, where the result is always 1. */
1332 if (absu_hwi (n
) >= 3)
1334 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1340 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1341 as that creates an unnecessary variable. Instead, just produce
1342 either cbrt(x) or cbrt(x) * cbrt(x). */
1343 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1345 if (absu_hwi (n
) % 3 == 1)
1346 powi_cbrt_x
= cbrt_x
;
1348 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1351 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1352 if (absu_hwi (n
) < 3)
1353 result
= powi_cbrt_x
;
1355 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1356 powi_x_ndiv3
, powi_cbrt_x
);
1358 /* If n is negative, reciprocate the result. */
1360 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1361 build_real (type
, dconst1
), result
);
1366 /* No optimizations succeeded. */
1370 /* ARG is the argument to a cabs builtin call in GSI with location info
1371 LOC. Create a sequence of statements prior to GSI that calculates
1372 sqrt(R*R + I*I), where R and I are the real and imaginary components
1373 of ARG, respectively. Return an expression holding the result. */
1376 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1378 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1379 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1380 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1381 enum machine_mode mode
= TYPE_MODE (type
);
1383 if (!flag_unsafe_math_optimizations
1384 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1386 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1389 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1390 REALPART_EXPR
, arg
);
1391 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1392 real_part
, real_part
);
1393 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1394 IMAGPART_EXPR
, arg
);
1395 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1396 imag_part
, imag_part
);
1397 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1398 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1403 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1404 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1405 an optimal number of multiplies, when n is a constant. */
1409 const pass_data pass_data_cse_sincos
=
1411 GIMPLE_PASS
, /* type */
1412 "sincos", /* name */
1413 OPTGROUP_NONE
, /* optinfo_flags */
1414 TV_NONE
, /* tv_id */
1415 PROP_ssa
, /* properties_required */
1416 0, /* properties_provided */
1417 0, /* properties_destroyed */
1418 0, /* todo_flags_start */
1419 TODO_update_ssa
, /* todo_flags_finish */
1422 class pass_cse_sincos
: public gimple_opt_pass
1425 pass_cse_sincos (gcc::context
*ctxt
)
1426 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1429 /* opt_pass methods: */
1430 virtual bool gate (function
*)
1432 /* We no longer require either sincos or cexp, since powi expansion
1433 piggybacks on this pass. */
1437 virtual unsigned int execute (function
*);
1439 }; // class pass_cse_sincos
1442 pass_cse_sincos::execute (function
*fun
)
1445 bool cfg_changed
= false;
1447 calculate_dominance_info (CDI_DOMINATORS
);
1448 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1450 FOR_EACH_BB_FN (bb
, fun
)
1452 gimple_stmt_iterator gsi
;
1453 bool cleanup_eh
= false;
1455 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1457 gimple stmt
= gsi_stmt (gsi
);
1460 /* Only the last stmt in a bb could throw, no need to call
1461 gimple_purge_dead_eh_edges if we change something in the middle
1462 of a basic block. */
1465 if (is_gimple_call (stmt
)
1466 && gimple_call_lhs (stmt
)
1467 && (fndecl
= gimple_call_fndecl (stmt
))
1468 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1470 tree arg
, arg0
, arg1
, result
;
1474 switch (DECL_FUNCTION_CODE (fndecl
))
1476 CASE_FLT_FN (BUILT_IN_COS
):
1477 CASE_FLT_FN (BUILT_IN_SIN
):
1478 CASE_FLT_FN (BUILT_IN_CEXPI
):
1479 /* Make sure we have either sincos or cexp. */
1480 if (!targetm
.libc_has_function (function_c99_math_complex
)
1481 && !targetm
.libc_has_function (function_sincos
))
1484 arg
= gimple_call_arg (stmt
, 0);
1485 if (TREE_CODE (arg
) == SSA_NAME
)
1486 cfg_changed
|= execute_cse_sincos_1 (arg
);
1489 CASE_FLT_FN (BUILT_IN_POW
):
1490 arg0
= gimple_call_arg (stmt
, 0);
1491 arg1
= gimple_call_arg (stmt
, 1);
1493 loc
= gimple_location (stmt
);
1494 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1498 tree lhs
= gimple_get_lhs (stmt
);
1499 gimple_assign new_stmt
=
1500 gimple_build_assign (lhs
, result
);
1501 gimple_set_location (new_stmt
, loc
);
1502 unlink_stmt_vdef (stmt
);
1503 gsi_replace (&gsi
, new_stmt
, true);
1505 if (gimple_vdef (stmt
))
1506 release_ssa_name (gimple_vdef (stmt
));
1510 CASE_FLT_FN (BUILT_IN_POWI
):
1511 arg0
= gimple_call_arg (stmt
, 0);
1512 arg1
= gimple_call_arg (stmt
, 1);
1513 loc
= gimple_location (stmt
);
1515 if (real_minus_onep (arg0
))
1517 tree t0
, t1
, cond
, one
, minus_one
;
1520 t0
= TREE_TYPE (arg0
);
1521 t1
= TREE_TYPE (arg1
);
1522 one
= build_real (t0
, dconst1
);
1523 minus_one
= build_real (t0
, dconstm1
);
1525 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
1526 stmt
= gimple_build_assign_with_ops (BIT_AND_EXPR
, cond
,
1530 gimple_set_location (stmt
, loc
);
1531 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1533 result
= make_temp_ssa_name (t0
, NULL
, "powi");
1534 stmt
= gimple_build_assign_with_ops (COND_EXPR
, result
,
1537 gimple_set_location (stmt
, loc
);
1538 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1542 if (!tree_fits_shwi_p (arg1
))
1545 n
= tree_to_shwi (arg1
);
1546 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1551 tree lhs
= gimple_get_lhs (stmt
);
1552 gimple_assign new_stmt
= gimple_build_assign (lhs
, result
);
1553 gimple_set_location (new_stmt
, loc
);
1554 unlink_stmt_vdef (stmt
);
1555 gsi_replace (&gsi
, new_stmt
, true);
1557 if (gimple_vdef (stmt
))
1558 release_ssa_name (gimple_vdef (stmt
));
1562 CASE_FLT_FN (BUILT_IN_CABS
):
1563 arg0
= gimple_call_arg (stmt
, 0);
1564 loc
= gimple_location (stmt
);
1565 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1569 tree lhs
= gimple_get_lhs (stmt
);
1570 gimple_assign new_stmt
= gimple_build_assign (lhs
, result
);
1571 gimple_set_location (new_stmt
, loc
);
1572 unlink_stmt_vdef (stmt
);
1573 gsi_replace (&gsi
, new_stmt
, true);
1575 if (gimple_vdef (stmt
))
1576 release_ssa_name (gimple_vdef (stmt
));
1585 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1588 statistics_counter_event (fun
, "sincos statements inserted",
1589 sincos_stats
.inserted
);
1591 free_dominance_info (CDI_DOMINATORS
);
1592 return cfg_changed
? TODO_cleanup_cfg
: 0;
1598 make_pass_cse_sincos (gcc::context
*ctxt
)
1600 return new pass_cse_sincos (ctxt
);
1603 /* A symbolic number is used to detect byte permutation and selection
1604 patterns. Therefore the field N contains an artificial number
1605 consisting of octet sized markers:
1607 0 - target byte has the value 0
1608 FF - target byte has an unknown value (eg. due to sign extension)
1609 1..size - marker value is the target byte index minus one.
1611 To detect permutations on memory sources (arrays and structures), a symbolic
1612 number is also associated a base address (the array or structure the load is
1613 made from), an offset from the base address and a range which gives the
1614 difference between the highest and lowest accessed memory location to make
1615 such a symbolic number. The range is thus different from size which reflects
1616 the size of the type of current expression. Note that for non memory source,
1617 range holds the same value as size.
1619 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1620 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1621 still have a size of 2 but this time a range of 1. */
1623 struct symbolic_number
{
1628 HOST_WIDE_INT bytepos
;
1631 unsigned HOST_WIDE_INT range
;
1634 #define BITS_PER_MARKER 8
1635 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1636 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1637 #define HEAD_MARKER(n, size) \
1638 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1640 /* The number which the find_bswap_or_nop_1 result should match in
1641 order to have a nop. The number is masked according to the size of
1642 the symbolic number before using it. */
1643 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1644 (uint64_t)0x08070605 << 32 | 0x04030201)
1646 /* The number which the find_bswap_or_nop_1 result should match in
1647 order to have a byte swap. The number is masked according to the
1648 size of the symbolic number before using it. */
1649 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1650 (uint64_t)0x01020304 << 32 | 0x05060708)
1652 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1653 number N. Return false if the requested operation is not permitted
1654 on a symbolic number. */
1657 do_shift_rotate (enum tree_code code
,
1658 struct symbolic_number
*n
,
1661 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
1662 unsigned head_marker
;
1664 if (count
% BITS_PER_UNIT
!= 0)
1666 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
1668 /* Zero out the extra bits of N in order to avoid them being shifted
1669 into the significant bits. */
1670 if (size
< 64 / BITS_PER_MARKER
)
1671 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1679 head_marker
= HEAD_MARKER (n
->n
, size
);
1681 /* Arithmetic shift of signed type: result is dependent on the value. */
1682 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
1683 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
1684 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
1685 << ((size
- 1 - i
) * BITS_PER_MARKER
);
1688 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
1691 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
1696 /* Zero unused bits for size. */
1697 if (size
< 64 / BITS_PER_MARKER
)
1698 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1702 /* Perform sanity checking for the symbolic number N and the gimple
1706 verify_symbolic_number_p (struct symbolic_number
*n
, gimple stmt
)
1710 lhs_type
= gimple_expr_type (stmt
);
1712 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
1715 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
1721 /* Initialize the symbolic number N for the bswap pass from the base element
1722 SRC manipulated by the bitwise OR expression. */
1725 init_symbolic_number (struct symbolic_number
*n
, tree src
)
1729 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
1731 /* Set up the symbolic number N by setting each byte to a value between 1 and
1732 the byte size of rhs1. The highest order byte is set to n->size and the
1733 lowest order byte to 1. */
1734 n
->type
= TREE_TYPE (src
);
1735 size
= TYPE_PRECISION (n
->type
);
1736 if (size
% BITS_PER_UNIT
!= 0)
1738 size
/= BITS_PER_UNIT
;
1739 if (size
> 64 / BITS_PER_MARKER
)
1744 if (size
< 64 / BITS_PER_MARKER
)
1745 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1750 /* Check if STMT might be a byte swap or a nop from a memory source and returns
1751 the answer. If so, REF is that memory source and the base of the memory area
1752 accessed and the offset of the access from that base are recorded in N. */
1755 find_bswap_or_nop_load (gimple stmt
, tree ref
, struct symbolic_number
*n
)
1757 /* Leaf node is an array or component ref. Memorize its base and
1758 offset from base to compare to other such leaf node. */
1759 HOST_WIDE_INT bitsize
, bitpos
;
1760 enum machine_mode mode
;
1761 int unsignedp
, volatilep
;
1762 tree offset
, base_addr
;
1764 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
1767 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
1768 &unsignedp
, &volatilep
, false);
1770 if (TREE_CODE (base_addr
) == MEM_REF
)
1772 offset_int bit_offset
= 0;
1773 tree off
= TREE_OPERAND (base_addr
, 1);
1775 if (!integer_zerop (off
))
1777 offset_int boff
, coff
= mem_ref_offset (base_addr
);
1778 boff
= wi::lshift (coff
, LOG2_BITS_PER_UNIT
);
1782 base_addr
= TREE_OPERAND (base_addr
, 0);
1784 /* Avoid returning a negative bitpos as this may wreak havoc later. */
1785 if (wi::neg_p (bit_offset
))
1787 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
1788 offset_int tem
= bit_offset
.and_not (mask
);
1789 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
1790 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
1792 tem
= wi::arshift (tem
, LOG2_BITS_PER_UNIT
);
1794 offset
= size_binop (PLUS_EXPR
, offset
,
1795 wide_int_to_tree (sizetype
, tem
));
1797 offset
= wide_int_to_tree (sizetype
, tem
);
1800 bitpos
+= bit_offset
.to_shwi ();
1803 if (bitpos
% BITS_PER_UNIT
)
1805 if (bitsize
% BITS_PER_UNIT
)
1808 if (!init_symbolic_number (n
, ref
))
1810 n
->base_addr
= base_addr
;
1812 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
1813 n
->alias_set
= reference_alias_ptr_type (ref
);
1814 n
->vuse
= gimple_vuse (stmt
);
1818 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
1819 the operation given by the rhs of STMT on the result. If the operation
1820 could successfully be executed the function returns a gimple stmt whose
1821 rhs's first tree is the expression of the source operand and NULL
1825 find_bswap_or_nop_1 (gimple stmt
, struct symbolic_number
*n
, int limit
)
1827 enum tree_code code
;
1828 tree rhs1
, rhs2
= NULL
;
1829 gimple rhs1_stmt
, rhs2_stmt
, source_stmt1
;
1830 enum gimple_rhs_class rhs_class
;
1832 if (!limit
|| !is_gimple_assign (stmt
))
1835 rhs1
= gimple_assign_rhs1 (stmt
);
1837 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
1840 if (TREE_CODE (rhs1
) != SSA_NAME
)
1843 code
= gimple_assign_rhs_code (stmt
);
1844 rhs_class
= gimple_assign_rhs_class (stmt
);
1845 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
1847 if (rhs_class
== GIMPLE_BINARY_RHS
)
1848 rhs2
= gimple_assign_rhs2 (stmt
);
1850 /* Handle unary rhs and binary rhs with integer constants as second
1853 if (rhs_class
== GIMPLE_UNARY_RHS
1854 || (rhs_class
== GIMPLE_BINARY_RHS
1855 && TREE_CODE (rhs2
) == INTEGER_CST
))
1857 if (code
!= BIT_AND_EXPR
1858 && code
!= LSHIFT_EXPR
1859 && code
!= RSHIFT_EXPR
1860 && code
!= LROTATE_EXPR
1861 && code
!= RROTATE_EXPR
1863 && code
!= CONVERT_EXPR
)
1866 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
1868 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
1869 we have to initialize the symbolic number. */
1872 if (gimple_assign_load_p (stmt
)
1873 || !init_symbolic_number (n
, rhs1
))
1875 source_stmt1
= stmt
;
1882 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
1883 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
1884 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
1886 /* Only constants masking full bytes are allowed. */
1887 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
1888 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
1891 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
1900 if (!do_shift_rotate (code
, n
, (int)TREE_INT_CST_LOW (rhs2
)))
1905 int i
, type_size
, old_type_size
;
1908 type
= gimple_expr_type (stmt
);
1909 type_size
= TYPE_PRECISION (type
);
1910 if (type_size
% BITS_PER_UNIT
!= 0)
1912 type_size
/= BITS_PER_UNIT
;
1913 if (type_size
> 64 / BITS_PER_MARKER
)
1916 /* Sign extension: result is dependent on the value. */
1917 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
1918 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
1919 && HEAD_MARKER (n
->n
, old_type_size
))
1920 for (i
= 0; i
< type_size
- old_type_size
; i
++)
1921 n
->n
|= MARKER_BYTE_UNKNOWN
<< (type_size
- 1 - i
);
1923 if (type_size
< 64 / BITS_PER_MARKER
)
1925 /* If STMT casts to a smaller type mask out the bits not
1926 belonging to the target type. */
1927 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
1931 n
->range
= type_size
;
1937 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
1940 /* Handle binary rhs. */
1942 if (rhs_class
== GIMPLE_BINARY_RHS
)
1945 struct symbolic_number n1
, n2
;
1947 gimple source_stmt2
;
1949 if (code
!= BIT_IOR_EXPR
)
1952 if (TREE_CODE (rhs2
) != SSA_NAME
)
1955 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
1960 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
1965 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
1970 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
1973 if (!n1
.vuse
!= !n2
.vuse
||
1974 (n1
.vuse
&& !operand_equal_p (n1
.vuse
, n2
.vuse
, 0)))
1977 if (gimple_assign_rhs1 (source_stmt1
)
1978 != gimple_assign_rhs1 (source_stmt2
))
1981 HOST_WIDE_INT off_sub
;
1982 struct symbolic_number
*n_ptr
;
1984 if (!n1
.base_addr
|| !n2
.base_addr
1985 || !operand_equal_p (n1
.base_addr
, n2
.base_addr
, 0))
1987 if (!n1
.offset
!= !n2
.offset
||
1988 (n1
.offset
&& !operand_equal_p (n1
.offset
, n2
.offset
, 0)))
1991 /* We swap n1 with n2 to have n1 < n2. */
1992 if (n2
.bytepos
< n1
.bytepos
)
1994 struct symbolic_number tmpn
;
1999 source_stmt1
= source_stmt2
;
2002 off_sub
= n2
.bytepos
- n1
.bytepos
;
2004 /* Check that the range of memory covered can be represented by
2005 a symbolic number. */
2006 if (off_sub
+ n2
.range
> 64 / BITS_PER_MARKER
)
2008 n
->range
= n2
.range
+ off_sub
;
2010 /* Reinterpret byte marks in symbolic number holding the value of
2011 bigger weight according to target endianness. */
2012 inc
= BYTES_BIG_ENDIAN
? off_sub
+ n2
.range
- n1
.range
: off_sub
;
2013 size
= TYPE_PRECISION (n1
.type
) / BITS_PER_UNIT
;
2014 if (BYTES_BIG_ENDIAN
)
2018 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
2021 (n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
2022 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
2027 n
->range
= n1
.range
;
2030 || alias_ptr_types_compatible_p (n1
.alias_set
, n2
.alias_set
))
2031 n
->alias_set
= n1
.alias_set
;
2033 n
->alias_set
= ptr_type_node
;
2035 n
->base_addr
= n1
.base_addr
;
2036 n
->offset
= n1
.offset
;
2037 n
->bytepos
= n1
.bytepos
;
2039 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2040 for (i
= 0, mask
= MARKER_MASK
; i
< size
;
2041 i
++, mask
<<= BITS_PER_MARKER
)
2043 uint64_t masked1
, masked2
;
2045 masked1
= n1
.n
& mask
;
2046 masked2
= n2
.n
& mask
;
2047 if (masked1
&& masked2
&& masked1
!= masked2
)
2052 if (!verify_symbolic_number_p (n
, stmt
))
2059 return source_stmt1
;
2064 /* Check if STMT completes a bswap implementation or a read in a given
2065 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2066 accordingly. It also sets N to represent the kind of operations
2067 performed: size of the resulting expression and whether it works on
2068 a memory source, and if so alias-set and vuse. At last, the
2069 function returns a stmt whose rhs's first tree is the source
2073 find_bswap_or_nop (gimple stmt
, struct symbolic_number
*n
, bool *bswap
)
2075 /* The number which the find_bswap_or_nop_1 result should match in order
2076 to have a full byte swap. The number is shifted to the right
2077 according to the size of the symbolic number before using it. */
2078 uint64_t cmpxchg
= CMPXCHG
;
2079 uint64_t cmpnop
= CMPNOP
;
2084 /* The last parameter determines the depth search limit. It usually
2085 correlates directly to the number n of bytes to be touched. We
2086 increase that number by log2(n) + 1 here in order to also
2087 cover signed -> unsigned conversions of the src operand as can be seen
2088 in libgcc, and for initial shift/and operation of the src operand. */
2089 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
2090 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
2091 source_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
2096 /* Find real size of result (highest non zero byte). */
2102 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
2106 /* Zero out the extra bits of N and CMP*. */
2107 if (n
->range
< (int) sizeof (int64_t))
2111 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
2112 cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
2116 /* A complete byte swap should make the symbolic number to start with
2117 the largest digit in the highest order byte. Unchanged symbolic
2118 number indicates a read with same endianness as target architecture. */
2121 else if (n
->n
== cmpxchg
)
2126 /* Useless bit manipulation performed by code. */
2127 if (!n
->base_addr
&& n
->n
== cmpnop
)
2130 n
->range
*= BITS_PER_UNIT
;
2136 const pass_data pass_data_optimize_bswap
=
2138 GIMPLE_PASS
, /* type */
2140 OPTGROUP_NONE
, /* optinfo_flags */
2141 TV_NONE
, /* tv_id */
2142 PROP_ssa
, /* properties_required */
2143 0, /* properties_provided */
2144 0, /* properties_destroyed */
2145 0, /* todo_flags_start */
2146 0, /* todo_flags_finish */
2149 class pass_optimize_bswap
: public gimple_opt_pass
2152 pass_optimize_bswap (gcc::context
*ctxt
)
2153 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
2156 /* opt_pass methods: */
2157 virtual bool gate (function
*)
2159 return flag_expensive_optimizations
&& optimize
;
2162 virtual unsigned int execute (function
*);
2164 }; // class pass_optimize_bswap
2166 /* Perform the bswap optimization: replace the statement CUR_STMT at
2167 GSI with a load of type, VUSE and set-alias as described by N if a
2168 memory source is involved (N->base_addr is non null), followed by
2169 the builtin bswap invocation in FNDECL if BSWAP is true. SRC_STMT
2170 gives where should the replacement be made. It also gives the
2171 source on which CUR_STMT is operating via its rhs's first tree nad
2172 N->range gives the size of the expression involved for maintaining
2176 bswap_replace (gimple cur_stmt
, gimple_stmt_iterator gsi
, gimple src_stmt
,
2177 tree fndecl
, tree bswap_type
, tree load_type
,
2178 struct symbolic_number
*n
, bool bswap
)
2183 src
= gimple_assign_rhs1 (src_stmt
);
2184 tgt
= gimple_assign_lhs (cur_stmt
);
2186 /* Need to load the value from memory first. */
2189 gimple_stmt_iterator gsi_ins
= gsi_for_stmt (src_stmt
);
2190 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
2191 tree load_offset_ptr
, aligned_load_type
;
2192 gimple addr_stmt
, load_stmt
;
2195 align
= get_object_alignment (src
);
2197 && align
< GET_MODE_ALIGNMENT (TYPE_MODE (load_type
))
2198 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type
), align
))
2201 gsi_move_before (&gsi
, &gsi_ins
);
2202 gsi
= gsi_for_stmt (cur_stmt
);
2204 /* Compute address to load from and cast according to the size
2206 addr_expr
= build_fold_addr_expr (unshare_expr (src
));
2207 if (is_gimple_min_invariant (addr_expr
))
2208 addr_tmp
= addr_expr
;
2211 addr_tmp
= make_temp_ssa_name (TREE_TYPE (addr_expr
), NULL
,
2213 addr_stmt
= gimple_build_assign (addr_tmp
, addr_expr
);
2214 gsi_insert_before (&gsi
, addr_stmt
, GSI_SAME_STMT
);
2217 /* Perform the load. */
2218 aligned_load_type
= load_type
;
2219 if (align
< TYPE_ALIGN (load_type
))
2220 aligned_load_type
= build_aligned_type (load_type
, align
);
2221 load_offset_ptr
= build_int_cst (n
->alias_set
, 0);
2222 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
2228 nop_stats
.found_16bit
++;
2229 else if (n
->range
== 32)
2230 nop_stats
.found_32bit
++;
2233 gcc_assert (n
->range
== 64);
2234 nop_stats
.found_64bit
++;
2237 /* Convert the result of load if necessary. */
2238 if (!useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
2240 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
2242 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2243 gimple_set_vuse (load_stmt
, n
->vuse
);
2244 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2245 gimple_assign_set_rhs_with_ops_1 (&gsi
, NOP_EXPR
, val_tmp
,
2246 NULL_TREE
, NULL_TREE
);
2250 gimple_assign_set_rhs_with_ops_1 (&gsi
, MEM_REF
, val_expr
,
2251 NULL_TREE
, NULL_TREE
);
2252 gimple_set_vuse (cur_stmt
, n
->vuse
);
2254 update_stmt (cur_stmt
);
2259 "%d bit load in target endianness found at: ",
2261 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2267 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
2268 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2269 gimple_set_vuse (load_stmt
, n
->vuse
);
2270 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2276 bswap_stats
.found_16bit
++;
2277 else if (n
->range
== 32)
2278 bswap_stats
.found_32bit
++;
2281 gcc_assert (n
->range
== 64);
2282 bswap_stats
.found_64bit
++;
2287 /* Convert the src expression if necessary. */
2288 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
2290 gimple convert_stmt
;
2291 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
2292 convert_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, tmp
, src
, NULL
);
2293 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2296 call
= gimple_build_call (fndecl
, 1, tmp
);
2300 /* Convert the result if necessary. */
2301 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
2303 gimple convert_stmt
;
2304 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
2305 convert_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, tgt
, tmp
, NULL
);
2306 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2309 gimple_call_set_lhs (call
, tmp
);
2313 fprintf (dump_file
, "%d bit bswap implementation found at: ",
2315 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2318 gsi_insert_after (&gsi
, call
, GSI_SAME_STMT
);
2319 gsi_remove (&gsi
, true);
2323 /* Find manual byte swap implementations as well as load in a given
2324 endianness. Byte swaps are turned into a bswap builtin invokation
2325 while endian loads are converted to bswap builtin invokation or
2326 simple load according to the target endianness. */
2329 pass_optimize_bswap::execute (function
*fun
)
2332 bool bswap16_p
, bswap32_p
, bswap64_p
;
2333 bool changed
= false;
2334 tree bswap16_type
= NULL_TREE
, bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
2336 if (BITS_PER_UNIT
!= 8)
2339 bswap16_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP16
)
2340 && optab_handler (bswap_optab
, HImode
) != CODE_FOR_nothing
);
2341 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2342 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
2343 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2344 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
2345 || (bswap32_p
&& word_mode
== SImode
)));
2347 /* Determine the argument type of the builtins. The code later on
2348 assumes that the return and argument type are the same. */
2351 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP16
);
2352 bswap16_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2357 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2358 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2363 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2364 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2367 memset (&nop_stats
, 0, sizeof (nop_stats
));
2368 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
2370 FOR_EACH_BB_FN (bb
, fun
)
2372 gimple_stmt_iterator gsi
;
2374 /* We do a reverse scan for bswap patterns to make sure we get the
2375 widest match. As bswap pattern matching doesn't handle
2376 previously inserted smaller bswap replacements as sub-
2377 patterns, the wider variant wouldn't be detected. */
2378 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
2380 gimple src_stmt
, cur_stmt
= gsi_stmt (gsi
);
2381 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
2382 struct symbolic_number n
;
2385 if (!is_gimple_assign (cur_stmt
)
2386 || gimple_assign_rhs_code (cur_stmt
) != BIT_IOR_EXPR
)
2389 src_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
2397 load_type
= uint16_type_node
;
2400 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP16
);
2401 bswap_type
= bswap16_type
;
2405 load_type
= uint32_type_node
;
2408 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2409 bswap_type
= bswap32_type
;
2413 load_type
= uint64_type_node
;
2416 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2417 bswap_type
= bswap64_type
;
2424 if (bswap
&& !fndecl
)
2427 if (bswap_replace (cur_stmt
, gsi
, src_stmt
, fndecl
, bswap_type
,
2428 load_type
, &n
, bswap
))
2433 statistics_counter_event (fun
, "16-bit nop implementations found",
2434 nop_stats
.found_16bit
);
2435 statistics_counter_event (fun
, "32-bit nop implementations found",
2436 nop_stats
.found_32bit
);
2437 statistics_counter_event (fun
, "64-bit nop implementations found",
2438 nop_stats
.found_64bit
);
2439 statistics_counter_event (fun
, "16-bit bswap implementations found",
2440 bswap_stats
.found_16bit
);
2441 statistics_counter_event (fun
, "32-bit bswap implementations found",
2442 bswap_stats
.found_32bit
);
2443 statistics_counter_event (fun
, "64-bit bswap implementations found",
2444 bswap_stats
.found_64bit
);
2446 return (changed
? TODO_update_ssa
: 0);
2452 make_pass_optimize_bswap (gcc::context
*ctxt
)
2454 return new pass_optimize_bswap (ctxt
);
2457 /* Return true if stmt is a type conversion operation that can be stripped
2458 when used in a widening multiply operation. */
2460 widening_mult_conversion_strippable_p (tree result_type
, gimple stmt
)
2462 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2464 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2469 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2472 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2474 /* If the type of OP has the same precision as the result, then
2475 we can strip this conversion. The multiply operation will be
2476 selected to create the correct extension as a by-product. */
2477 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2480 /* We can also strip a conversion if it preserves the signed-ness of
2481 the operation and doesn't narrow the range. */
2482 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2484 /* If the inner-most type is unsigned, then we can strip any
2485 intermediate widening operation. If it's signed, then the
2486 intermediate widening operation must also be signed. */
2487 if ((TYPE_UNSIGNED (inner_op_type
)
2488 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2489 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2495 return rhs_code
== FIXED_CONVERT_EXPR
;
2498 /* Return true if RHS is a suitable operand for a widening multiplication,
2499 assuming a target type of TYPE.
2500 There are two cases:
2502 - RHS makes some value at least twice as wide. Store that value
2503 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2505 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2506 but leave *TYPE_OUT untouched. */
2509 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2515 if (TREE_CODE (rhs
) == SSA_NAME
)
2517 stmt
= SSA_NAME_DEF_STMT (rhs
);
2518 if (is_gimple_assign (stmt
))
2520 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2524 rhs1
= gimple_assign_rhs1 (stmt
);
2526 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2528 *new_rhs_out
= rhs1
;
2537 type1
= TREE_TYPE (rhs1
);
2539 if (TREE_CODE (type1
) != TREE_CODE (type
)
2540 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2543 *new_rhs_out
= rhs1
;
2548 if (TREE_CODE (rhs
) == INTEGER_CST
)
2558 /* Return true if STMT performs a widening multiplication, assuming the
2559 output type is TYPE. If so, store the unwidened types of the operands
2560 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2561 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2562 and *TYPE2_OUT would give the operands of the multiplication. */
2565 is_widening_mult_p (gimple stmt
,
2566 tree
*type1_out
, tree
*rhs1_out
,
2567 tree
*type2_out
, tree
*rhs2_out
)
2569 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2571 if (TREE_CODE (type
) != INTEGER_TYPE
2572 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2575 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2579 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2583 if (*type1_out
== NULL
)
2585 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2587 *type1_out
= *type2_out
;
2590 if (*type2_out
== NULL
)
2592 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2594 *type2_out
= *type1_out
;
2597 /* Ensure that the larger of the two operands comes first. */
2598 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2602 *type1_out
= *type2_out
;
2605 *rhs1_out
= *rhs2_out
;
2612 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2613 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2614 value is true iff we converted the statement. */
2617 convert_mult_to_widen (gimple stmt
, gimple_stmt_iterator
*gsi
)
2619 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2620 enum insn_code handler
;
2621 enum machine_mode to_mode
, from_mode
, actual_mode
;
2623 int actual_precision
;
2624 location_t loc
= gimple_location (stmt
);
2625 bool from_unsigned1
, from_unsigned2
;
2627 lhs
= gimple_assign_lhs (stmt
);
2628 type
= TREE_TYPE (lhs
);
2629 if (TREE_CODE (type
) != INTEGER_TYPE
)
2632 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2635 to_mode
= TYPE_MODE (type
);
2636 from_mode
= TYPE_MODE (type1
);
2637 from_unsigned1
= TYPE_UNSIGNED (type1
);
2638 from_unsigned2
= TYPE_UNSIGNED (type2
);
2640 if (from_unsigned1
&& from_unsigned2
)
2641 op
= umul_widen_optab
;
2642 else if (!from_unsigned1
&& !from_unsigned2
)
2643 op
= smul_widen_optab
;
2645 op
= usmul_widen_optab
;
2647 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2650 if (handler
== CODE_FOR_nothing
)
2652 if (op
!= smul_widen_optab
)
2654 /* We can use a signed multiply with unsigned types as long as
2655 there is a wider mode to use, or it is the smaller of the two
2656 types that is unsigned. Note that type1 >= type2, always. */
2657 if ((TYPE_UNSIGNED (type1
)
2658 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2659 || (TYPE_UNSIGNED (type2
)
2660 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2662 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
2663 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2667 op
= smul_widen_optab
;
2668 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2672 if (handler
== CODE_FOR_nothing
)
2675 from_unsigned1
= from_unsigned2
= false;
2681 /* Ensure that the inputs to the handler are in the correct precison
2682 for the opcode. This will be the full mode size. */
2683 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2684 if (2 * actual_precision
> TYPE_PRECISION (type
))
2686 if (actual_precision
!= TYPE_PRECISION (type1
)
2687 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2688 rhs1
= build_and_insert_cast (gsi
, loc
,
2689 build_nonstandard_integer_type
2690 (actual_precision
, from_unsigned1
), rhs1
);
2691 if (actual_precision
!= TYPE_PRECISION (type2
)
2692 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2693 rhs2
= build_and_insert_cast (gsi
, loc
,
2694 build_nonstandard_integer_type
2695 (actual_precision
, from_unsigned2
), rhs2
);
2697 /* Handle constants. */
2698 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2699 rhs1
= fold_convert (type1
, rhs1
);
2700 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2701 rhs2
= fold_convert (type2
, rhs2
);
2703 gimple_assign_set_rhs1 (stmt
, rhs1
);
2704 gimple_assign_set_rhs2 (stmt
, rhs2
);
2705 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2707 widen_mul_stats
.widen_mults_inserted
++;
2711 /* Process a single gimple statement STMT, which is found at the
2712 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2713 rhs (given by CODE), and try to convert it into a
2714 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2715 is true iff we converted the statement. */
2718 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple stmt
,
2719 enum tree_code code
)
2721 gimple rhs1_stmt
= NULL
, rhs2_stmt
= NULL
;
2722 gimple conv1_stmt
= NULL
, conv2_stmt
= NULL
, conv_stmt
;
2723 tree type
, type1
, type2
, optype
;
2724 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2725 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2727 enum tree_code wmult_code
;
2728 enum insn_code handler
;
2729 enum machine_mode to_mode
, from_mode
, actual_mode
;
2730 location_t loc
= gimple_location (stmt
);
2731 int actual_precision
;
2732 bool from_unsigned1
, from_unsigned2
;
2734 lhs
= gimple_assign_lhs (stmt
);
2735 type
= TREE_TYPE (lhs
);
2736 if (TREE_CODE (type
) != INTEGER_TYPE
2737 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2740 if (code
== MINUS_EXPR
)
2741 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2743 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2745 rhs1
= gimple_assign_rhs1 (stmt
);
2746 rhs2
= gimple_assign_rhs2 (stmt
);
2748 if (TREE_CODE (rhs1
) == SSA_NAME
)
2750 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2751 if (is_gimple_assign (rhs1_stmt
))
2752 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2755 if (TREE_CODE (rhs2
) == SSA_NAME
)
2757 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2758 if (is_gimple_assign (rhs2_stmt
))
2759 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2762 /* Allow for one conversion statement between the multiply
2763 and addition/subtraction statement. If there are more than
2764 one conversions then we assume they would invalidate this
2765 transformation. If that's not the case then they should have
2766 been folded before now. */
2767 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2769 conv1_stmt
= rhs1_stmt
;
2770 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2771 if (TREE_CODE (rhs1
) == SSA_NAME
)
2773 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2774 if (is_gimple_assign (rhs1_stmt
))
2775 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2780 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2782 conv2_stmt
= rhs2_stmt
;
2783 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2784 if (TREE_CODE (rhs2
) == SSA_NAME
)
2786 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2787 if (is_gimple_assign (rhs2_stmt
))
2788 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2794 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2795 is_widening_mult_p, but we still need the rhs returns.
2797 It might also appear that it would be sufficient to use the existing
2798 operands of the widening multiply, but that would limit the choice of
2799 multiply-and-accumulate instructions.
2801 If the widened-multiplication result has more than one uses, it is
2802 probably wiser not to do the conversion. */
2803 if (code
== PLUS_EXPR
2804 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2806 if (!has_single_use (rhs1
)
2807 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2808 &type2
, &mult_rhs2
))
2811 conv_stmt
= conv1_stmt
;
2813 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
2815 if (!has_single_use (rhs2
)
2816 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
2817 &type2
, &mult_rhs2
))
2820 conv_stmt
= conv2_stmt
;
2825 to_mode
= TYPE_MODE (type
);
2826 from_mode
= TYPE_MODE (type1
);
2827 from_unsigned1
= TYPE_UNSIGNED (type1
);
2828 from_unsigned2
= TYPE_UNSIGNED (type2
);
2831 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2832 if (from_unsigned1
!= from_unsigned2
)
2834 if (!INTEGRAL_TYPE_P (type
))
2836 /* We can use a signed multiply with unsigned types as long as
2837 there is a wider mode to use, or it is the smaller of the two
2838 types that is unsigned. Note that type1 >= type2, always. */
2840 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2842 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2844 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
2845 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
2849 from_unsigned1
= from_unsigned2
= false;
2850 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
2854 /* If there was a conversion between the multiply and addition
2855 then we need to make sure it fits a multiply-and-accumulate.
2856 The should be a single mode change which does not change the
2860 /* We use the original, unmodified data types for this. */
2861 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
2862 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
2863 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
2864 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
2866 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
2868 /* Conversion is a truncate. */
2869 if (TYPE_PRECISION (to_type
) < data_size
)
2872 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
2874 /* Conversion is an extend. Check it's the right sort. */
2875 if (TYPE_UNSIGNED (from_type
) != is_unsigned
2876 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
2879 /* else convert is a no-op for our purposes. */
2882 /* Verify that the machine can perform a widening multiply
2883 accumulate in this mode/signedness combination, otherwise
2884 this transformation is likely to pessimize code. */
2885 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
2886 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
2887 from_mode
, 0, &actual_mode
);
2889 if (handler
== CODE_FOR_nothing
)
2892 /* Ensure that the inputs to the handler are in the correct precison
2893 for the opcode. This will be the full mode size. */
2894 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2895 if (actual_precision
!= TYPE_PRECISION (type1
)
2896 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2897 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
2898 build_nonstandard_integer_type
2899 (actual_precision
, from_unsigned1
),
2901 if (actual_precision
!= TYPE_PRECISION (type2
)
2902 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2903 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
2904 build_nonstandard_integer_type
2905 (actual_precision
, from_unsigned2
),
2908 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
2909 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
2911 /* Handle constants. */
2912 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
2913 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
2914 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
2915 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
2917 gimple_assign_set_rhs_with_ops_1 (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
2919 update_stmt (gsi_stmt (*gsi
));
2920 widen_mul_stats
.maccs_inserted
++;
2924 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2925 with uses in additions and subtractions to form fused multiply-add
2926 operations. Returns true if successful and MUL_STMT should be removed. */
2929 convert_mult_to_fma (gimple mul_stmt
, tree op1
, tree op2
)
2931 tree mul_result
= gimple_get_lhs (mul_stmt
);
2932 tree type
= TREE_TYPE (mul_result
);
2933 gimple use_stmt
, neguse_stmt
;
2934 gimple_assign fma_stmt
;
2935 use_operand_p use_p
;
2936 imm_use_iterator imm_iter
;
2938 if (FLOAT_TYPE_P (type
)
2939 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
2942 /* We don't want to do bitfield reduction ops. */
2943 if (INTEGRAL_TYPE_P (type
)
2944 && (TYPE_PRECISION (type
)
2945 != GET_MODE_PRECISION (TYPE_MODE (type
))))
2948 /* If the target doesn't support it, don't generate it. We assume that
2949 if fma isn't available then fms, fnma or fnms are not either. */
2950 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
2953 /* If the multiplication has zero uses, it is kept around probably because
2954 of -fnon-call-exceptions. Don't optimize it away in that case,
2956 if (has_zero_uses (mul_result
))
2959 /* Make sure that the multiplication statement becomes dead after
2960 the transformation, thus that all uses are transformed to FMAs.
2961 This means we assume that an FMA operation has the same cost
2963 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
2965 enum tree_code use_code
;
2966 tree result
= mul_result
;
2967 bool negate_p
= false;
2969 use_stmt
= USE_STMT (use_p
);
2971 if (is_gimple_debug (use_stmt
))
2974 /* For now restrict this operations to single basic blocks. In theory
2975 we would want to support sinking the multiplication in
2981 to form a fma in the then block and sink the multiplication to the
2983 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2986 if (!is_gimple_assign (use_stmt
))
2989 use_code
= gimple_assign_rhs_code (use_stmt
);
2991 /* A negate on the multiplication leads to FNMA. */
2992 if (use_code
== NEGATE_EXPR
)
2997 result
= gimple_assign_lhs (use_stmt
);
2999 /* Make sure the negate statement becomes dead with this
3000 single transformation. */
3001 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3002 &use_p
, &neguse_stmt
))
3005 /* Make sure the multiplication isn't also used on that stmt. */
3006 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3007 if (USE_FROM_PTR (usep
) == mul_result
)
3011 use_stmt
= neguse_stmt
;
3012 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3014 if (!is_gimple_assign (use_stmt
))
3017 use_code
= gimple_assign_rhs_code (use_stmt
);
3024 if (gimple_assign_rhs2 (use_stmt
) == result
)
3025 negate_p
= !negate_p
;
3030 /* FMA can only be formed from PLUS and MINUS. */
3034 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3035 by a MULT_EXPR that we'll visit later, we might be able to
3036 get a more profitable match with fnma.
3037 OTOH, if we don't, a negate / fma pair has likely lower latency
3038 that a mult / subtract pair. */
3039 if (use_code
== MINUS_EXPR
&& !negate_p
3040 && gimple_assign_rhs1 (use_stmt
) == result
3041 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
3042 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
3044 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
3046 if (TREE_CODE (rhs2
) == SSA_NAME
)
3048 gimple stmt2
= SSA_NAME_DEF_STMT (rhs2
);
3049 if (has_single_use (rhs2
)
3050 && is_gimple_assign (stmt2
)
3051 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3056 /* We can't handle a * b + a * b. */
3057 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
3060 /* While it is possible to validate whether or not the exact form
3061 that we've recognized is available in the backend, the assumption
3062 is that the transformation is never a loss. For instance, suppose
3063 the target only has the plain FMA pattern available. Consider
3064 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3065 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3066 still have 3 operations, but in the FMA form the two NEGs are
3067 independent and could be run in parallel. */
3070 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
3072 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3073 enum tree_code use_code
;
3074 tree addop
, mulop1
= op1
, result
= mul_result
;
3075 bool negate_p
= false;
3077 if (is_gimple_debug (use_stmt
))
3080 use_code
= gimple_assign_rhs_code (use_stmt
);
3081 if (use_code
== NEGATE_EXPR
)
3083 result
= gimple_assign_lhs (use_stmt
);
3084 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3085 gsi_remove (&gsi
, true);
3086 release_defs (use_stmt
);
3088 use_stmt
= neguse_stmt
;
3089 gsi
= gsi_for_stmt (use_stmt
);
3090 use_code
= gimple_assign_rhs_code (use_stmt
);
3094 if (gimple_assign_rhs1 (use_stmt
) == result
)
3096 addop
= gimple_assign_rhs2 (use_stmt
);
3097 /* a * b - c -> a * b + (-c) */
3098 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3099 addop
= force_gimple_operand_gsi (&gsi
,
3100 build1 (NEGATE_EXPR
,
3102 true, NULL_TREE
, true,
3107 addop
= gimple_assign_rhs1 (use_stmt
);
3108 /* a - b * c -> (-b) * c + a */
3109 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3110 negate_p
= !negate_p
;
3114 mulop1
= force_gimple_operand_gsi (&gsi
,
3115 build1 (NEGATE_EXPR
,
3117 true, NULL_TREE
, true,
3120 fma_stmt
= gimple_build_assign_with_ops (FMA_EXPR
,
3121 gimple_assign_lhs (use_stmt
),
3124 gsi_replace (&gsi
, fma_stmt
, true);
3125 widen_mul_stats
.fmas_inserted
++;
3131 /* Find integer multiplications where the operands are extended from
3132 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3133 where appropriate. */
3137 const pass_data pass_data_optimize_widening_mul
=
3139 GIMPLE_PASS
, /* type */
3140 "widening_mul", /* name */
3141 OPTGROUP_NONE
, /* optinfo_flags */
3142 TV_NONE
, /* tv_id */
3143 PROP_ssa
, /* properties_required */
3144 0, /* properties_provided */
3145 0, /* properties_destroyed */
3146 0, /* todo_flags_start */
3147 TODO_update_ssa
, /* todo_flags_finish */
3150 class pass_optimize_widening_mul
: public gimple_opt_pass
3153 pass_optimize_widening_mul (gcc::context
*ctxt
)
3154 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
3157 /* opt_pass methods: */
3158 virtual bool gate (function
*)
3160 return flag_expensive_optimizations
&& optimize
;
3163 virtual unsigned int execute (function
*);
3165 }; // class pass_optimize_widening_mul
3168 pass_optimize_widening_mul::execute (function
*fun
)
3171 bool cfg_changed
= false;
3173 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
3175 FOR_EACH_BB_FN (bb
, fun
)
3177 gimple_stmt_iterator gsi
;
3179 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
3181 gimple stmt
= gsi_stmt (gsi
);
3182 enum tree_code code
;
3184 if (is_gimple_assign (stmt
))
3186 code
= gimple_assign_rhs_code (stmt
);
3190 if (!convert_mult_to_widen (stmt
, &gsi
)
3191 && convert_mult_to_fma (stmt
,
3192 gimple_assign_rhs1 (stmt
),
3193 gimple_assign_rhs2 (stmt
)))
3195 gsi_remove (&gsi
, true);
3196 release_defs (stmt
);
3203 convert_plusminus_to_widen (&gsi
, stmt
, code
);
3209 else if (is_gimple_call (stmt
)
3210 && gimple_call_lhs (stmt
))
3212 tree fndecl
= gimple_call_fndecl (stmt
);
3214 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3216 switch (DECL_FUNCTION_CODE (fndecl
))
3221 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
3222 && REAL_VALUES_EQUAL
3223 (TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
3225 && convert_mult_to_fma (stmt
,
3226 gimple_call_arg (stmt
, 0),
3227 gimple_call_arg (stmt
, 0)))
3229 unlink_stmt_vdef (stmt
);
3230 if (gsi_remove (&gsi
, true)
3231 && gimple_purge_dead_eh_edges (bb
))
3233 release_defs (stmt
);
3246 statistics_counter_event (fun
, "widening multiplications inserted",
3247 widen_mul_stats
.widen_mults_inserted
);
3248 statistics_counter_event (fun
, "widening maccs inserted",
3249 widen_mul_stats
.maccs_inserted
);
3250 statistics_counter_event (fun
, "fused multiply-adds inserted",
3251 widen_mul_stats
.fmas_inserted
);
3253 return cfg_changed
? TODO_cleanup_cfg
: 0;
3259 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
3261 return new pass_optimize_widening_mul (ctxt
);