1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
97 #include "fold-const.h"
98 #include "internal-fn.h"
99 #include "gimple-fold.h"
100 #include "gimple-iterator.h"
101 #include "gimplify.h"
102 #include "gimplify-me.h"
103 #include "stor-layout.h"
104 #include "tree-cfg.h"
105 #include "insn-config.h"
110 #include "emit-rtl.h"
114 #include "tree-dfa.h"
115 #include "tree-ssa.h"
116 #include "tree-pass.h"
117 #include "alloc-pool.h"
119 #include "gimple-pretty-print.h"
120 #include "builtins.h"
123 /* FIXME: RTL headers have to be included here for optabs. */
124 #include "expr.h" /* Because optabs.h wants sepops. */
125 #include "insn-codes.h"
128 /* This structure represents one basic block that either computes a
129 division, or is a common dominator for basic block that compute a
132 /* The basic block represented by this structure. */
135 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
139 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
140 was inserted in BB. */
141 gimple recip_def_stmt
;
143 /* Pointer to a list of "struct occurrence"s for blocks dominated
145 struct occurrence
*children
;
147 /* Pointer to the next "struct occurrence"s in the list of blocks
148 sharing a common dominator. */
149 struct occurrence
*next
;
151 /* The number of divisions that are in BB before compute_merit. The
152 number of divisions that are in BB or post-dominate it after
156 /* True if the basic block has a division, false if it is a common
157 dominator for basic blocks that do. If it is false and trapping
158 math is active, BB is not a candidate for inserting a reciprocal. */
159 bool bb_has_division
;
164 /* Number of 1.0/X ops inserted. */
167 /* Number of 1.0/FUNC ops inserted. */
173 /* Number of cexpi calls inserted. */
179 /* Number of hand-written 16-bit nop / bswaps found. */
182 /* Number of hand-written 32-bit nop / bswaps found. */
185 /* Number of hand-written 64-bit nop / bswaps found. */
187 } nop_stats
, bswap_stats
;
191 /* Number of widening multiplication ops inserted. */
192 int widen_mults_inserted
;
194 /* Number of integer multiply-and-accumulate ops inserted. */
197 /* Number of fp fused multiply-add ops inserted. */
201 /* The instance of "struct occurrence" representing the highest
202 interesting block in the dominator tree. */
203 static struct occurrence
*occ_head
;
205 /* Allocation pool for getting instances of "struct occurrence". */
206 static pool_allocator
<occurrence
> *occ_pool
;
210 /* Allocate and return a new struct occurrence for basic block BB, and
211 whose children list is headed by CHILDREN. */
212 static struct occurrence
*
213 occ_new (basic_block bb
, struct occurrence
*children
)
215 struct occurrence
*occ
;
217 bb
->aux
= occ
= occ_pool
->allocate ();
218 memset (occ
, 0, sizeof (struct occurrence
));
221 occ
->children
= children
;
226 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
227 list of "struct occurrence"s, one per basic block, having IDOM as
228 their common dominator.
230 We try to insert NEW_OCC as deep as possible in the tree, and we also
231 insert any other block that is a common dominator for BB and one
232 block already in the tree. */
235 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
236 struct occurrence
**p_head
)
238 struct occurrence
*occ
, **p_occ
;
240 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
242 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
243 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
246 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
249 occ
->next
= new_occ
->children
;
250 new_occ
->children
= occ
;
252 /* Try the next block (it may as well be dominated by BB). */
255 else if (dom
== occ_bb
)
257 /* OCC_BB dominates BB. Tail recurse to look deeper. */
258 insert_bb (new_occ
, dom
, &occ
->children
);
262 else if (dom
!= idom
)
264 gcc_assert (!dom
->aux
);
266 /* There is a dominator between IDOM and BB, add it and make
267 two children out of NEW_OCC and OCC. First, remove OCC from
273 /* None of the previous blocks has DOM as a dominator: if we tail
274 recursed, we would reexamine them uselessly. Just switch BB with
275 DOM, and go on looking for blocks dominated by DOM. */
276 new_occ
= occ_new (dom
, new_occ
);
281 /* Nothing special, go on with the next element. */
286 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
287 new_occ
->next
= *p_head
;
291 /* Register that we found a division in BB. */
294 register_division_in (basic_block bb
)
296 struct occurrence
*occ
;
298 occ
= (struct occurrence
*) bb
->aux
;
301 occ
= occ_new (bb
, NULL
);
302 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
305 occ
->bb_has_division
= true;
306 occ
->num_divisions
++;
310 /* Compute the number of divisions that postdominate each block in OCC and
314 compute_merit (struct occurrence
*occ
)
316 struct occurrence
*occ_child
;
317 basic_block dom
= occ
->bb
;
319 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
322 if (occ_child
->children
)
323 compute_merit (occ_child
);
326 bb
= single_noncomplex_succ (dom
);
330 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
331 occ
->num_divisions
+= occ_child
->num_divisions
;
336 /* Return whether USE_STMT is a floating-point division by DEF. */
338 is_division_by (gimple use_stmt
, tree def
)
340 return is_gimple_assign (use_stmt
)
341 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
342 && gimple_assign_rhs2 (use_stmt
) == def
343 /* Do not recognize x / x as valid division, as we are getting
344 confused later by replacing all immediate uses x in such
346 && gimple_assign_rhs1 (use_stmt
) != def
;
349 /* Walk the subset of the dominator tree rooted at OCC, setting the
350 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
351 the given basic block. The field may be left NULL, of course,
352 if it is not possible or profitable to do the optimization.
354 DEF_BSI is an iterator pointing at the statement defining DEF.
355 If RECIP_DEF is set, a dominator already has a computation that can
359 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
360 tree def
, tree recip_def
, int threshold
)
364 gimple_stmt_iterator gsi
;
365 struct occurrence
*occ_child
;
368 && (occ
->bb_has_division
|| !flag_trapping_math
)
369 && occ
->num_divisions
>= threshold
)
371 /* Make a variable with the replacement and substitute it. */
372 type
= TREE_TYPE (def
);
373 recip_def
= create_tmp_reg (type
, "reciptmp");
374 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
375 build_one_cst (type
), def
);
377 if (occ
->bb_has_division
)
379 /* Case 1: insert before an existing division. */
380 gsi
= gsi_after_labels (occ
->bb
);
381 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
384 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
386 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
388 /* Case 2: insert right after the definition. Note that this will
389 never happen if the definition statement can throw, because in
390 that case the sole successor of the statement's basic block will
391 dominate all the uses as well. */
392 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
396 /* Case 3: insert in a basic block not containing defs/uses. */
397 gsi
= gsi_after_labels (occ
->bb
);
398 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
401 reciprocal_stats
.rdivs_inserted
++;
403 occ
->recip_def_stmt
= new_stmt
;
406 occ
->recip_def
= recip_def
;
407 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
408 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
412 /* Replace the division at USE_P with a multiplication by the reciprocal, if
416 replace_reciprocal (use_operand_p use_p
)
418 gimple use_stmt
= USE_STMT (use_p
);
419 basic_block bb
= gimple_bb (use_stmt
);
420 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
422 if (optimize_bb_for_speed_p (bb
)
423 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
425 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
426 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
427 SET_USE (use_p
, occ
->recip_def
);
428 fold_stmt_inplace (&gsi
);
429 update_stmt (use_stmt
);
434 /* Free OCC and return one more "struct occurrence" to be freed. */
436 static struct occurrence
*
437 free_bb (struct occurrence
*occ
)
439 struct occurrence
*child
, *next
;
441 /* First get the two pointers hanging off OCC. */
443 child
= occ
->children
;
445 occ_pool
->remove (occ
);
447 /* Now ensure that we don't recurse unless it is necessary. */
453 next
= free_bb (next
);
460 /* Look for floating-point divisions among DEF's uses, and try to
461 replace them by multiplications with the reciprocal. Add
462 as many statements computing the reciprocal as needed.
464 DEF must be a GIMPLE register of a floating-point type. */
467 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
470 imm_use_iterator use_iter
;
471 struct occurrence
*occ
;
472 int count
= 0, threshold
;
474 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
476 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
478 gimple use_stmt
= USE_STMT (use_p
);
479 if (is_division_by (use_stmt
, def
))
481 register_division_in (gimple_bb (use_stmt
));
486 /* Do the expensive part only if we can hope to optimize something. */
487 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
488 if (count
>= threshold
)
491 for (occ
= occ_head
; occ
; occ
= occ
->next
)
494 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
497 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
499 if (is_division_by (use_stmt
, def
))
501 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
502 replace_reciprocal (use_p
);
507 for (occ
= occ_head
; occ
; )
513 /* Go through all the floating-point SSA_NAMEs, and call
514 execute_cse_reciprocals_1 on each of them. */
517 const pass_data pass_data_cse_reciprocals
=
519 GIMPLE_PASS
, /* type */
521 OPTGROUP_NONE
, /* optinfo_flags */
523 PROP_ssa
, /* properties_required */
524 0, /* properties_provided */
525 0, /* properties_destroyed */
526 0, /* todo_flags_start */
527 TODO_update_ssa
, /* todo_flags_finish */
530 class pass_cse_reciprocals
: public gimple_opt_pass
533 pass_cse_reciprocals (gcc::context
*ctxt
)
534 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
537 /* opt_pass methods: */
538 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
539 virtual unsigned int execute (function
*);
541 }; // class pass_cse_reciprocals
544 pass_cse_reciprocals::execute (function
*fun
)
549 occ_pool
= new pool_allocator
<occurrence
>
550 ("dominators for recip", n_basic_blocks_for_fn (fun
) / 3 + 1);
552 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
553 calculate_dominance_info (CDI_DOMINATORS
);
554 calculate_dominance_info (CDI_POST_DOMINATORS
);
556 #ifdef ENABLE_CHECKING
557 FOR_EACH_BB_FN (bb
, fun
)
558 gcc_assert (!bb
->aux
);
561 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
562 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
563 && is_gimple_reg (arg
))
565 tree name
= ssa_default_def (fun
, arg
);
567 execute_cse_reciprocals_1 (NULL
, name
);
570 FOR_EACH_BB_FN (bb
, fun
)
574 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
577 gphi
*phi
= gsi
.phi ();
578 def
= PHI_RESULT (phi
);
579 if (! virtual_operand_p (def
)
580 && FLOAT_TYPE_P (TREE_TYPE (def
)))
581 execute_cse_reciprocals_1 (NULL
, def
);
584 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
587 gimple stmt
= gsi_stmt (gsi
);
589 if (gimple_has_lhs (stmt
)
590 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
591 && FLOAT_TYPE_P (TREE_TYPE (def
))
592 && TREE_CODE (def
) == SSA_NAME
)
593 execute_cse_reciprocals_1 (&gsi
, def
);
596 if (optimize_bb_for_size_p (bb
))
599 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
600 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
603 gimple stmt
= gsi_stmt (gsi
);
606 if (is_gimple_assign (stmt
)
607 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
609 tree arg1
= gimple_assign_rhs2 (stmt
);
612 if (TREE_CODE (arg1
) != SSA_NAME
)
615 stmt1
= SSA_NAME_DEF_STMT (arg1
);
617 if (is_gimple_call (stmt1
)
618 && gimple_call_lhs (stmt1
)
619 && (fndecl
= gimple_call_fndecl (stmt1
))
620 && (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
621 || DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
))
623 enum built_in_function code
;
628 code
= DECL_FUNCTION_CODE (fndecl
);
629 md_code
= DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
;
631 fndecl
= targetm
.builtin_reciprocal (code
, md_code
, false);
635 /* Check that all uses of the SSA name are divisions,
636 otherwise replacing the defining statement will do
639 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
641 gimple stmt2
= USE_STMT (use_p
);
642 if (is_gimple_debug (stmt2
))
644 if (!is_gimple_assign (stmt2
)
645 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
646 || gimple_assign_rhs1 (stmt2
) == arg1
647 || gimple_assign_rhs2 (stmt2
) != arg1
)
656 gimple_replace_ssa_lhs (stmt1
, arg1
);
657 gimple_call_set_fndecl (stmt1
, fndecl
);
659 reciprocal_stats
.rfuncs_inserted
++;
661 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
663 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
664 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
665 fold_stmt_inplace (&gsi
);
673 statistics_counter_event (fun
, "reciprocal divs inserted",
674 reciprocal_stats
.rdivs_inserted
);
675 statistics_counter_event (fun
, "reciprocal functions inserted",
676 reciprocal_stats
.rfuncs_inserted
);
678 free_dominance_info (CDI_DOMINATORS
);
679 free_dominance_info (CDI_POST_DOMINATORS
);
687 make_pass_cse_reciprocals (gcc::context
*ctxt
)
689 return new pass_cse_reciprocals (ctxt
);
692 /* Records an occurrence at statement USE_STMT in the vector of trees
693 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
694 is not yet initialized. Returns true if the occurrence was pushed on
695 the vector. Adjusts *TOP_BB to be the basic block dominating all
696 statements in the vector. */
699 maybe_record_sincos (vec
<gimple
> *stmts
,
700 basic_block
*top_bb
, gimple use_stmt
)
702 basic_block use_bb
= gimple_bb (use_stmt
);
704 && (*top_bb
== use_bb
705 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
706 stmts
->safe_push (use_stmt
);
708 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
710 stmts
->safe_push (use_stmt
);
719 /* Look for sin, cos and cexpi calls with the same argument NAME and
720 create a single call to cexpi CSEing the result in this case.
721 We first walk over all immediate uses of the argument collecting
722 statements that we can CSE in a vector and in a second pass replace
723 the statement rhs with a REALPART or IMAGPART expression on the
724 result of the cexpi call we insert before the use statement that
725 dominates all other candidates. */
728 execute_cse_sincos_1 (tree name
)
730 gimple_stmt_iterator gsi
;
731 imm_use_iterator use_iter
;
732 tree fndecl
, res
, type
;
733 gimple def_stmt
, use_stmt
, stmt
;
734 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
735 auto_vec
<gimple
> stmts
;
736 basic_block top_bb
= NULL
;
738 bool cfg_changed
= false;
740 type
= TREE_TYPE (name
);
741 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
743 if (gimple_code (use_stmt
) != GIMPLE_CALL
744 || !gimple_call_lhs (use_stmt
)
745 || !(fndecl
= gimple_call_fndecl (use_stmt
))
746 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
749 switch (DECL_FUNCTION_CODE (fndecl
))
751 CASE_FLT_FN (BUILT_IN_COS
):
752 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
755 CASE_FLT_FN (BUILT_IN_SIN
):
756 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
759 CASE_FLT_FN (BUILT_IN_CEXPI
):
760 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
767 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
770 /* Simply insert cexpi at the beginning of top_bb but not earlier than
771 the name def statement. */
772 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
775 stmt
= gimple_build_call (fndecl
, 1, name
);
776 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
777 gimple_call_set_lhs (stmt
, res
);
779 def_stmt
= SSA_NAME_DEF_STMT (name
);
780 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
781 && gimple_code (def_stmt
) != GIMPLE_PHI
782 && gimple_bb (def_stmt
) == top_bb
)
784 gsi
= gsi_for_stmt (def_stmt
);
785 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
789 gsi
= gsi_after_labels (top_bb
);
790 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
792 sincos_stats
.inserted
++;
794 /* And adjust the recorded old call sites. */
795 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
798 fndecl
= gimple_call_fndecl (use_stmt
);
800 switch (DECL_FUNCTION_CODE (fndecl
))
802 CASE_FLT_FN (BUILT_IN_COS
):
803 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
806 CASE_FLT_FN (BUILT_IN_SIN
):
807 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
810 CASE_FLT_FN (BUILT_IN_CEXPI
):
818 /* Replace call with a copy. */
819 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
821 gsi
= gsi_for_stmt (use_stmt
);
822 gsi_replace (&gsi
, stmt
, true);
823 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
830 /* To evaluate powi(x,n), the floating point value x raised to the
831 constant integer exponent n, we use a hybrid algorithm that
832 combines the "window method" with look-up tables. For an
833 introduction to exponentiation algorithms and "addition chains",
834 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
835 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
836 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
837 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
839 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
840 multiplications to inline before calling the system library's pow
841 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
842 so this default never requires calling pow, powf or powl. */
844 #ifndef POWI_MAX_MULTS
845 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
848 /* The size of the "optimal power tree" lookup table. All
849 exponents less than this value are simply looked up in the
850 powi_table below. This threshold is also used to size the
851 cache of pseudo registers that hold intermediate results. */
852 #define POWI_TABLE_SIZE 256
854 /* The size, in bits of the window, used in the "window method"
855 exponentiation algorithm. This is equivalent to a radix of
856 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
857 #define POWI_WINDOW_SIZE 3
859 /* The following table is an efficient representation of an
860 "optimal power tree". For each value, i, the corresponding
861 value, j, in the table states than an optimal evaluation
862 sequence for calculating pow(x,i) can be found by evaluating
863 pow(x,j)*pow(x,i-j). An optimal power tree for the first
864 100 integers is given in Knuth's "Seminumerical algorithms". */
866 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
868 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
869 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
870 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
871 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
872 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
873 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
874 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
875 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
876 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
877 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
878 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
879 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
880 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
881 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
882 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
883 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
884 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
885 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
886 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
887 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
888 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
889 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
890 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
891 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
892 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
893 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
894 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
895 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
896 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
897 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
898 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
899 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
903 /* Return the number of multiplications required to calculate
904 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
905 subroutine of powi_cost. CACHE is an array indicating
906 which exponents have already been calculated. */
909 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
911 /* If we've already calculated this exponent, then this evaluation
912 doesn't require any additional multiplications. */
917 return powi_lookup_cost (n
- powi_table
[n
], cache
)
918 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
921 /* Return the number of multiplications required to calculate
922 powi(x,n) for an arbitrary x, given the exponent N. This
923 function needs to be kept in sync with powi_as_mults below. */
926 powi_cost (HOST_WIDE_INT n
)
928 bool cache
[POWI_TABLE_SIZE
];
929 unsigned HOST_WIDE_INT digit
;
930 unsigned HOST_WIDE_INT val
;
936 /* Ignore the reciprocal when calculating the cost. */
937 val
= (n
< 0) ? -n
: n
;
939 /* Initialize the exponent cache. */
940 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
945 while (val
>= POWI_TABLE_SIZE
)
949 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
950 result
+= powi_lookup_cost (digit
, cache
)
951 + POWI_WINDOW_SIZE
+ 1;
952 val
>>= POWI_WINDOW_SIZE
;
961 return result
+ powi_lookup_cost (val
, cache
);
964 /* Recursive subroutine of powi_as_mults. This function takes the
965 array, CACHE, of already calculated exponents and an exponent N and
966 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
969 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
970 HOST_WIDE_INT n
, tree
*cache
)
972 tree op0
, op1
, ssa_target
;
973 unsigned HOST_WIDE_INT digit
;
976 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
979 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
981 if (n
< POWI_TABLE_SIZE
)
983 cache
[n
] = ssa_target
;
984 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
985 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
989 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
990 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
991 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
995 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
999 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1000 gimple_set_location (mult_stmt
, loc
);
1001 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1006 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1007 This function needs to be kept in sync with powi_cost above. */
1010 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1011 tree arg0
, HOST_WIDE_INT n
)
1013 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1018 return build_real (type
, dconst1
);
1020 memset (cache
, 0, sizeof (cache
));
1023 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1027 /* If the original exponent was negative, reciprocate the result. */
1028 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1029 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1030 build_real (type
, dconst1
), result
);
1031 gimple_set_location (div_stmt
, loc
);
1032 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1037 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1038 location info LOC. If the arguments are appropriate, create an
1039 equivalent sequence of statements prior to GSI using an optimal
1040 number of multiplications, and return an expession holding the
1044 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1045 tree arg0
, HOST_WIDE_INT n
)
1047 /* Avoid largest negative number. */
1049 && ((n
>= -1 && n
<= 2)
1050 || (optimize_function_for_speed_p (cfun
)
1051 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1052 return powi_as_mults (gsi
, loc
, arg0
, n
);
1057 /* Build a gimple call statement that calls FN with argument ARG.
1058 Set the lhs of the call statement to a fresh SSA name. Insert the
1059 statement prior to GSI's current position, and return the fresh
1063 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1069 call_stmt
= gimple_build_call (fn
, 1, arg
);
1070 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1071 gimple_set_lhs (call_stmt
, ssa_target
);
1072 gimple_set_location (call_stmt
, loc
);
1073 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1078 /* Build a gimple binary operation with the given CODE and arguments
1079 ARG0, ARG1, assigning the result to a new SSA name for variable
1080 TARGET. Insert the statement prior to GSI's current position, and
1081 return the fresh SSA name.*/
1084 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1085 const char *name
, enum tree_code code
,
1086 tree arg0
, tree arg1
)
1088 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1089 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1090 gimple_set_location (stmt
, loc
);
1091 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1095 /* Build a gimple reference operation with the given CODE and argument
1096 ARG, assigning the result to a new SSA name of TYPE with NAME.
1097 Insert the statement prior to GSI's current position, and return
1098 the fresh SSA name. */
1101 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1102 const char *name
, enum tree_code code
, tree arg0
)
1104 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1105 gimple stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1106 gimple_set_location (stmt
, loc
);
1107 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1111 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1112 prior to GSI's current position, and return the fresh SSA name. */
1115 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1116 tree type
, tree val
)
1118 tree result
= make_ssa_name (type
);
1119 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1120 gimple_set_location (stmt
, loc
);
1121 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1125 struct pow_synth_sqrt_info
1128 unsigned int deepest
;
1129 unsigned int num_mults
;
1132 /* Return true iff the real value C can be represented as a
1133 sum of powers of 0.5 up to N. That is:
1134 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1135 Record in INFO the various parameters of the synthesis algorithm such
1136 as the factors a[i], the maximum 0.5 power and the number of
1137 multiplications that will be required. */
1140 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1141 struct pow_synth_sqrt_info
*info
)
1143 REAL_VALUE_TYPE factor
= dconsthalf
;
1144 REAL_VALUE_TYPE remainder
= c
;
1147 info
->num_mults
= 0;
1148 memset (info
->factors
, 0, n
* sizeof (bool));
1150 for (unsigned i
= 0; i
< n
; i
++)
1152 REAL_VALUE_TYPE res
;
1154 /* If something inexact happened bail out now. */
1155 if (REAL_ARITHMETIC (res
, MINUS_EXPR
, remainder
, factor
))
1158 /* We have hit zero. The number is representable as a sum
1159 of powers of 0.5. */
1160 if (REAL_VALUES_EQUAL (res
, dconst0
))
1162 info
->factors
[i
] = true;
1163 info
->deepest
= i
+ 1;
1166 else if (!REAL_VALUE_NEGATIVE (res
))
1169 info
->factors
[i
] = true;
1173 info
->factors
[i
] = false;
1175 REAL_ARITHMETIC (factor
, MULT_EXPR
, factor
, dconsthalf
);
1180 /* Return the tree corresponding to FN being applied
1181 to ARG N times at GSI and LOC.
1182 Look up previous results from CACHE if need be.
1183 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1186 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1187 tree fn
, location_t loc
, tree
*cache
)
1189 tree res
= cache
[n
];
1192 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1193 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1200 /* Print to STREAM the repeated application of function FNAME to ARG
1201 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1205 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1209 fprintf (stream
, "%s", arg
);
1212 fprintf (stream
, "%s (", fname
);
1213 print_nested_fn (stream
, fname
, arg
, n
- 1);
1214 fprintf (stream
, ")");
1218 /* Print to STREAM the fractional sequence of sqrt chains
1219 applied to ARG, described by INFO. Used for the dump file. */
1222 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1223 struct pow_synth_sqrt_info
*info
)
1225 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1227 bool is_set
= info
->factors
[i
];
1230 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1231 if (i
!= info
->deepest
- 1)
1232 fprintf (stream
, " * ");
1237 /* Print to STREAM a representation of raising ARG to an integer
1238 power N. Used for the dump file. */
1241 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1244 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1246 fprintf (stream
, "%s", arg
);
1249 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1250 square roots. Place at GSI and LOC. Limit the maximum depth
1251 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1252 result of the expanded sequence or NULL_TREE if the expansion failed.
1254 This routine assumes that ARG1 is a real number with a fractional part
1255 (the integer exponent case will have been handled earlier in
1256 gimple_expand_builtin_pow).
1259 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1260 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1261 FRAC_PART == ARG1 - WHOLE_PART:
1262 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1263 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1264 if it can be expressed as such, that is if FRAC_PART satisfies:
1265 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1266 where integer a[i] is either 0 or 1.
1269 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1270 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1272 For ARG1 < 0.0 there are two approaches:
1273 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1274 is calculated as above.
1277 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1278 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1280 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1281 FRAC_PART := ARG1 - WHOLE_PART
1282 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1284 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1285 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1287 For ARG1 < 0.0 we choose between (A) and (B) depending on
1288 how many multiplications we'd have to do.
1289 So, for the example in (B): POW (x, -5.875), if we were to
1290 follow algorithm (A) we would produce:
1291 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1292 which contains more multiplications than approach (B).
1294 Hopefully, this approach will eliminate potentially expensive POW library
1295 calls when unsafe floating point math is enabled and allow the compiler to
1296 further optimise the multiplies, square roots and divides produced by this
1300 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1301 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1303 tree type
= TREE_TYPE (arg0
);
1304 machine_mode mode
= TYPE_MODE (type
);
1305 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1306 bool one_over
= true;
1311 if (TREE_CODE (arg1
) != REAL_CST
)
1314 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1316 gcc_assert (max_depth
> 0);
1317 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1319 struct pow_synth_sqrt_info synth_info
;
1320 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1321 synth_info
.deepest
= 0;
1322 synth_info
.num_mults
= 0;
1324 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1325 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1327 /* The whole and fractional parts of exp. */
1328 REAL_VALUE_TYPE whole_part
;
1329 REAL_VALUE_TYPE frac_part
;
1331 real_floor (&whole_part
, mode
, &exp
);
1332 REAL_ARITHMETIC (frac_part
, MINUS_EXPR
, exp
, whole_part
);
1335 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1336 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1340 real_ceil (&ceil_whole
, mode
, &exp
);
1341 REAL_ARITHMETIC (ceil_fract
, MINUS_EXPR
, ceil_whole
, exp
);
1344 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1347 /* Check whether it's more profitable to not use 1.0 / ... */
1350 struct pow_synth_sqrt_info alt_synth_info
;
1351 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1352 alt_synth_info
.deepest
= 0;
1353 alt_synth_info
.num_mults
= 0;
1355 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1357 && alt_synth_info
.deepest
<= synth_info
.deepest
1358 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1360 whole_part
= ceil_whole
;
1361 frac_part
= ceil_fract
;
1362 synth_info
.deepest
= alt_synth_info
.deepest
;
1363 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1364 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1365 (max_depth
+ 1) * sizeof (bool));
1370 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1371 REAL_VALUE_TYPE cint
;
1372 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1374 if (!real_identical (&whole_part
, &cint
))
1377 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1380 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1382 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1384 /* Calculate the integer part of the exponent. */
1387 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1396 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1397 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1403 fprintf (dump_file
, "1.0 / (");
1404 dump_integer_part (dump_file
, "x", n
);
1406 fprintf (dump_file
, " * ");
1407 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1408 fprintf (dump_file
, ")");
1412 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1413 fprintf (dump_file
, " / (");
1414 dump_integer_part (dump_file
, "x", n
);
1415 fprintf (dump_file
, ")");
1420 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1422 fprintf (dump_file
, " * ");
1423 dump_integer_part (dump_file
, "x", n
);
1426 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1430 tree fract_res
= NULL_TREE
;
1433 /* Calculate the fractional part of the exponent. */
1434 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1436 if (synth_info
.factors
[i
])
1438 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1441 fract_res
= sqrt_chain
;
1444 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1445 fract_res
, sqrt_chain
);
1449 tree res
= NULL_TREE
;
1456 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1457 fract_res
, integer_res
);
1461 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1462 build_real (type
, dconst1
), res
);
1466 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1467 fract_res
, integer_res
);
1471 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1472 fract_res
, integer_res
);
1476 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1477 with location info LOC. If possible, create an equivalent and
1478 less expensive sequence of statements prior to GSI, and return an
1479 expession holding the result. */
1482 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1483 tree arg0
, tree arg1
)
1485 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
1486 REAL_VALUE_TYPE c2
, dconst3
;
1488 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
1490 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
1491 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1493 dconst1_4
= dconst1
;
1494 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1496 /* If the exponent isn't a constant, there's nothing of interest
1498 if (TREE_CODE (arg1
) != REAL_CST
)
1501 /* If the exponent is equivalent to an integer, expand to an optimal
1502 multiplication sequence when profitable. */
1503 c
= TREE_REAL_CST (arg1
);
1504 n
= real_to_integer (&c
);
1505 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1506 c_is_int
= real_identical (&c
, &cint
);
1509 && ((n
>= -1 && n
<= 2)
1510 || (flag_unsafe_math_optimizations
1512 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1513 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1515 /* Attempt various optimizations using sqrt and cbrt. */
1516 type
= TREE_TYPE (arg0
);
1517 mode
= TYPE_MODE (type
);
1518 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1520 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1521 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1524 && REAL_VALUES_EQUAL (c
, dconsthalf
)
1525 && !HONOR_SIGNED_ZEROS (mode
))
1526 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1528 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1530 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1531 optimizations since 1./3. is not exactly representable. If x
1532 is negative and finite, the correct value of pow(x,1./3.) is
1533 a NaN with the "invalid" exception raised, because the value
1534 of 1./3. actually has an even denominator. The correct value
1535 of cbrt(x) is a negative real value. */
1536 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1537 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1539 if (flag_unsafe_math_optimizations
1541 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1542 && REAL_VALUES_EQUAL (c
, dconst1_3
))
1543 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1545 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1546 if we don't have a hardware sqrt insn. */
1547 dconst1_6
= dconst1_3
;
1548 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1550 if (flag_unsafe_math_optimizations
1553 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1556 && REAL_VALUES_EQUAL (c
, dconst1_6
))
1559 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1562 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1566 /* Attempt to expand the POW as a product of square root chains.
1567 Expand the 0.25 case even when otpimising for size. */
1568 if (flag_unsafe_math_optimizations
1571 && (speed_p
|| REAL_VALUES_EQUAL (c
, dconst1_4
))
1572 && !HONOR_SIGNED_ZEROS (mode
))
1574 unsigned int max_depth
= speed_p
1575 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH
)
1578 tree expand_with_sqrts
1579 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
1581 if (expand_with_sqrts
)
1582 return expand_with_sqrts
;
1585 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1586 n
= real_to_integer (&c2
);
1587 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1588 c2_is_int
= real_identical (&c2
, &cint
);
1590 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1592 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1593 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1595 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1596 different from pow(x, 1./3.) due to rounding and behavior with
1597 negative x, we need to constrain this transformation to unsafe
1598 math and positive x or finite math. */
1599 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
1600 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1601 real_round (&c2
, mode
, &c2
);
1602 n
= real_to_integer (&c2
);
1603 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1604 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1605 real_convert (&c2
, mode
, &c2
);
1607 if (flag_unsafe_math_optimizations
1609 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1610 && real_identical (&c2
, &c
)
1612 && optimize_function_for_speed_p (cfun
)
1613 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1615 tree powi_x_ndiv3
= NULL_TREE
;
1617 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1618 possible or profitable, give up. Skip the degenerate case when
1619 abs(n) < 3, where the result is always 1. */
1620 if (absu_hwi (n
) >= 3)
1622 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1628 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1629 as that creates an unnecessary variable. Instead, just produce
1630 either cbrt(x) or cbrt(x) * cbrt(x). */
1631 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1633 if (absu_hwi (n
) % 3 == 1)
1634 powi_cbrt_x
= cbrt_x
;
1636 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1639 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1640 if (absu_hwi (n
) < 3)
1641 result
= powi_cbrt_x
;
1643 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1644 powi_x_ndiv3
, powi_cbrt_x
);
1646 /* If n is negative, reciprocate the result. */
1648 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1649 build_real (type
, dconst1
), result
);
1654 /* No optimizations succeeded. */
1658 /* ARG is the argument to a cabs builtin call in GSI with location info
1659 LOC. Create a sequence of statements prior to GSI that calculates
1660 sqrt(R*R + I*I), where R and I are the real and imaginary components
1661 of ARG, respectively. Return an expression holding the result. */
1664 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1666 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1667 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1668 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1669 machine_mode mode
= TYPE_MODE (type
);
1671 if (!flag_unsafe_math_optimizations
1672 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1674 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1677 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1678 REALPART_EXPR
, arg
);
1679 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1680 real_part
, real_part
);
1681 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1682 IMAGPART_EXPR
, arg
);
1683 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1684 imag_part
, imag_part
);
1685 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1686 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1691 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1692 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1693 an optimal number of multiplies, when n is a constant. */
1697 const pass_data pass_data_cse_sincos
=
1699 GIMPLE_PASS
, /* type */
1700 "sincos", /* name */
1701 OPTGROUP_NONE
, /* optinfo_flags */
1702 TV_NONE
, /* tv_id */
1703 PROP_ssa
, /* properties_required */
1704 0, /* properties_provided */
1705 0, /* properties_destroyed */
1706 0, /* todo_flags_start */
1707 TODO_update_ssa
, /* todo_flags_finish */
1710 class pass_cse_sincos
: public gimple_opt_pass
1713 pass_cse_sincos (gcc::context
*ctxt
)
1714 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1717 /* opt_pass methods: */
1718 virtual bool gate (function
*)
1720 /* We no longer require either sincos or cexp, since powi expansion
1721 piggybacks on this pass. */
1725 virtual unsigned int execute (function
*);
1727 }; // class pass_cse_sincos
1730 pass_cse_sincos::execute (function
*fun
)
1733 bool cfg_changed
= false;
1735 calculate_dominance_info (CDI_DOMINATORS
);
1736 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1738 FOR_EACH_BB_FN (bb
, fun
)
1740 gimple_stmt_iterator gsi
;
1741 bool cleanup_eh
= false;
1743 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1745 gimple stmt
= gsi_stmt (gsi
);
1748 /* Only the last stmt in a bb could throw, no need to call
1749 gimple_purge_dead_eh_edges if we change something in the middle
1750 of a basic block. */
1753 if (is_gimple_call (stmt
)
1754 && gimple_call_lhs (stmt
)
1755 && (fndecl
= gimple_call_fndecl (stmt
))
1756 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1758 tree arg
, arg0
, arg1
, result
;
1762 switch (DECL_FUNCTION_CODE (fndecl
))
1764 CASE_FLT_FN (BUILT_IN_COS
):
1765 CASE_FLT_FN (BUILT_IN_SIN
):
1766 CASE_FLT_FN (BUILT_IN_CEXPI
):
1767 /* Make sure we have either sincos or cexp. */
1768 if (!targetm
.libc_has_function (function_c99_math_complex
)
1769 && !targetm
.libc_has_function (function_sincos
))
1772 arg
= gimple_call_arg (stmt
, 0);
1773 if (TREE_CODE (arg
) == SSA_NAME
)
1774 cfg_changed
|= execute_cse_sincos_1 (arg
);
1777 CASE_FLT_FN (BUILT_IN_POW
):
1778 arg0
= gimple_call_arg (stmt
, 0);
1779 arg1
= gimple_call_arg (stmt
, 1);
1781 loc
= gimple_location (stmt
);
1782 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1786 tree lhs
= gimple_get_lhs (stmt
);
1787 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1788 gimple_set_location (new_stmt
, loc
);
1789 unlink_stmt_vdef (stmt
);
1790 gsi_replace (&gsi
, new_stmt
, true);
1792 if (gimple_vdef (stmt
))
1793 release_ssa_name (gimple_vdef (stmt
));
1797 CASE_FLT_FN (BUILT_IN_POWI
):
1798 arg0
= gimple_call_arg (stmt
, 0);
1799 arg1
= gimple_call_arg (stmt
, 1);
1800 loc
= gimple_location (stmt
);
1802 if (real_minus_onep (arg0
))
1804 tree t0
, t1
, cond
, one
, minus_one
;
1807 t0
= TREE_TYPE (arg0
);
1808 t1
= TREE_TYPE (arg1
);
1809 one
= build_real (t0
, dconst1
);
1810 minus_one
= build_real (t0
, dconstm1
);
1812 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
1813 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
1814 arg1
, build_int_cst (t1
, 1));
1815 gimple_set_location (stmt
, loc
);
1816 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1818 result
= make_temp_ssa_name (t0
, NULL
, "powi");
1819 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
1821 gimple_set_location (stmt
, loc
);
1822 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1826 if (!tree_fits_shwi_p (arg1
))
1829 n
= tree_to_shwi (arg1
);
1830 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1835 tree lhs
= gimple_get_lhs (stmt
);
1836 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1837 gimple_set_location (new_stmt
, loc
);
1838 unlink_stmt_vdef (stmt
);
1839 gsi_replace (&gsi
, new_stmt
, true);
1841 if (gimple_vdef (stmt
))
1842 release_ssa_name (gimple_vdef (stmt
));
1846 CASE_FLT_FN (BUILT_IN_CABS
):
1847 arg0
= gimple_call_arg (stmt
, 0);
1848 loc
= gimple_location (stmt
);
1849 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1853 tree lhs
= gimple_get_lhs (stmt
);
1854 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1855 gimple_set_location (new_stmt
, loc
);
1856 unlink_stmt_vdef (stmt
);
1857 gsi_replace (&gsi
, new_stmt
, true);
1859 if (gimple_vdef (stmt
))
1860 release_ssa_name (gimple_vdef (stmt
));
1869 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1872 statistics_counter_event (fun
, "sincos statements inserted",
1873 sincos_stats
.inserted
);
1875 free_dominance_info (CDI_DOMINATORS
);
1876 return cfg_changed
? TODO_cleanup_cfg
: 0;
1882 make_pass_cse_sincos (gcc::context
*ctxt
)
1884 return new pass_cse_sincos (ctxt
);
1887 /* A symbolic number is used to detect byte permutation and selection
1888 patterns. Therefore the field N contains an artificial number
1889 consisting of octet sized markers:
1891 0 - target byte has the value 0
1892 FF - target byte has an unknown value (eg. due to sign extension)
1893 1..size - marker value is the target byte index minus one.
1895 To detect permutations on memory sources (arrays and structures), a symbolic
1896 number is also associated a base address (the array or structure the load is
1897 made from), an offset from the base address and a range which gives the
1898 difference between the highest and lowest accessed memory location to make
1899 such a symbolic number. The range is thus different from size which reflects
1900 the size of the type of current expression. Note that for non memory source,
1901 range holds the same value as size.
1903 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1904 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1905 still have a size of 2 but this time a range of 1. */
1907 struct symbolic_number
{
1912 HOST_WIDE_INT bytepos
;
1915 unsigned HOST_WIDE_INT range
;
1918 #define BITS_PER_MARKER 8
1919 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1920 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1921 #define HEAD_MARKER(n, size) \
1922 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1924 /* The number which the find_bswap_or_nop_1 result should match in
1925 order to have a nop. The number is masked according to the size of
1926 the symbolic number before using it. */
1927 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1928 (uint64_t)0x08070605 << 32 | 0x04030201)
1930 /* The number which the find_bswap_or_nop_1 result should match in
1931 order to have a byte swap. The number is masked according to the
1932 size of the symbolic number before using it. */
1933 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1934 (uint64_t)0x01020304 << 32 | 0x05060708)
1936 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1937 number N. Return false if the requested operation is not permitted
1938 on a symbolic number. */
1941 do_shift_rotate (enum tree_code code
,
1942 struct symbolic_number
*n
,
1945 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
1946 unsigned head_marker
;
1948 if (count
% BITS_PER_UNIT
!= 0)
1950 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
1952 /* Zero out the extra bits of N in order to avoid them being shifted
1953 into the significant bits. */
1954 if (size
< 64 / BITS_PER_MARKER
)
1955 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1963 head_marker
= HEAD_MARKER (n
->n
, size
);
1965 /* Arithmetic shift of signed type: result is dependent on the value. */
1966 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
1967 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
1968 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
1969 << ((size
- 1 - i
) * BITS_PER_MARKER
);
1972 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
1975 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
1980 /* Zero unused bits for size. */
1981 if (size
< 64 / BITS_PER_MARKER
)
1982 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1986 /* Perform sanity checking for the symbolic number N and the gimple
1990 verify_symbolic_number_p (struct symbolic_number
*n
, gimple stmt
)
1994 lhs_type
= gimple_expr_type (stmt
);
1996 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
1999 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
2005 /* Initialize the symbolic number N for the bswap pass from the base element
2006 SRC manipulated by the bitwise OR expression. */
2009 init_symbolic_number (struct symbolic_number
*n
, tree src
)
2013 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
2015 /* Set up the symbolic number N by setting each byte to a value between 1 and
2016 the byte size of rhs1. The highest order byte is set to n->size and the
2017 lowest order byte to 1. */
2018 n
->type
= TREE_TYPE (src
);
2019 size
= TYPE_PRECISION (n
->type
);
2020 if (size
% BITS_PER_UNIT
!= 0)
2022 size
/= BITS_PER_UNIT
;
2023 if (size
> 64 / BITS_PER_MARKER
)
2028 if (size
< 64 / BITS_PER_MARKER
)
2029 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2034 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2035 the answer. If so, REF is that memory source and the base of the memory area
2036 accessed and the offset of the access from that base are recorded in N. */
2039 find_bswap_or_nop_load (gimple stmt
, tree ref
, struct symbolic_number
*n
)
2041 /* Leaf node is an array or component ref. Memorize its base and
2042 offset from base to compare to other such leaf node. */
2043 HOST_WIDE_INT bitsize
, bitpos
;
2045 int unsignedp
, volatilep
;
2046 tree offset
, base_addr
;
2048 /* Not prepared to handle PDP endian. */
2049 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2052 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
2055 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
2056 &unsignedp
, &volatilep
, false);
2058 if (TREE_CODE (base_addr
) == MEM_REF
)
2060 offset_int bit_offset
= 0;
2061 tree off
= TREE_OPERAND (base_addr
, 1);
2063 if (!integer_zerop (off
))
2065 offset_int boff
, coff
= mem_ref_offset (base_addr
);
2066 boff
= wi::lshift (coff
, LOG2_BITS_PER_UNIT
);
2070 base_addr
= TREE_OPERAND (base_addr
, 0);
2072 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2073 if (wi::neg_p (bit_offset
))
2075 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
2076 offset_int tem
= bit_offset
.and_not (mask
);
2077 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2078 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2080 tem
= wi::arshift (tem
, LOG2_BITS_PER_UNIT
);
2082 offset
= size_binop (PLUS_EXPR
, offset
,
2083 wide_int_to_tree (sizetype
, tem
));
2085 offset
= wide_int_to_tree (sizetype
, tem
);
2088 bitpos
+= bit_offset
.to_shwi ();
2091 if (bitpos
% BITS_PER_UNIT
)
2093 if (bitsize
% BITS_PER_UNIT
)
2096 if (!init_symbolic_number (n
, ref
))
2098 n
->base_addr
= base_addr
;
2100 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
2101 n
->alias_set
= reference_alias_ptr_type (ref
);
2102 n
->vuse
= gimple_vuse (stmt
);
2106 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2107 symbolic number N1 and N2 whose source statements are respectively
2108 SOURCE_STMT1 and SOURCE_STMT2. */
2111 perform_symbolic_merge (gimple source_stmt1
, struct symbolic_number
*n1
,
2112 gimple source_stmt2
, struct symbolic_number
*n2
,
2113 struct symbolic_number
*n
)
2118 struct symbolic_number
*n_start
;
2120 /* Sources are different, cancel bswap if they are not memory location with
2121 the same base (array, structure, ...). */
2122 if (gimple_assign_rhs1 (source_stmt1
) != gimple_assign_rhs1 (source_stmt2
))
2125 HOST_WIDE_INT start_sub
, end_sub
, end1
, end2
, end
;
2126 struct symbolic_number
*toinc_n_ptr
, *n_end
;
2128 if (!n1
->base_addr
|| !n2
->base_addr
2129 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
2132 if (!n1
->offset
!= !n2
->offset
2133 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
2136 if (n1
->bytepos
< n2
->bytepos
)
2139 start_sub
= n2
->bytepos
- n1
->bytepos
;
2140 source_stmt
= source_stmt1
;
2145 start_sub
= n1
->bytepos
- n2
->bytepos
;
2146 source_stmt
= source_stmt2
;
2149 /* Find the highest address at which a load is performed and
2150 compute related info. */
2151 end1
= n1
->bytepos
+ (n1
->range
- 1);
2152 end2
= n2
->bytepos
+ (n2
->range
- 1);
2156 end_sub
= end2
- end1
;
2161 end_sub
= end1
- end2
;
2163 n_end
= (end2
> end1
) ? n2
: n1
;
2165 /* Find symbolic number whose lsb is the most significant. */
2166 if (BYTES_BIG_ENDIAN
)
2167 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
2169 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
2171 n
->range
= end
- n_start
->bytepos
+ 1;
2173 /* Check that the range of memory covered can be represented by
2174 a symbolic number. */
2175 if (n
->range
> 64 / BITS_PER_MARKER
)
2178 /* Reinterpret byte marks in symbolic number holding the value of
2179 bigger weight according to target endianness. */
2180 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
2181 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
2182 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
2185 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
2186 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
2187 toinc_n_ptr
->n
+= inc
;
2192 n
->range
= n1
->range
;
2194 source_stmt
= source_stmt1
;
2198 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
2199 n
->alias_set
= n1
->alias_set
;
2201 n
->alias_set
= ptr_type_node
;
2202 n
->vuse
= n_start
->vuse
;
2203 n
->base_addr
= n_start
->base_addr
;
2204 n
->offset
= n_start
->offset
;
2205 n
->bytepos
= n_start
->bytepos
;
2206 n
->type
= n_start
->type
;
2207 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2209 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
2211 uint64_t masked1
, masked2
;
2213 masked1
= n1
->n
& mask
;
2214 masked2
= n2
->n
& mask
;
2215 if (masked1
&& masked2
&& masked1
!= masked2
)
2218 n
->n
= n1
->n
| n2
->n
;
2223 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2224 the operation given by the rhs of STMT on the result. If the operation
2225 could successfully be executed the function returns a gimple stmt whose
2226 rhs's first tree is the expression of the source operand and NULL
2230 find_bswap_or_nop_1 (gimple stmt
, struct symbolic_number
*n
, int limit
)
2232 enum tree_code code
;
2233 tree rhs1
, rhs2
= NULL
;
2234 gimple rhs1_stmt
, rhs2_stmt
, source_stmt1
;
2235 enum gimple_rhs_class rhs_class
;
2237 if (!limit
|| !is_gimple_assign (stmt
))
2240 rhs1
= gimple_assign_rhs1 (stmt
);
2242 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
2245 if (TREE_CODE (rhs1
) != SSA_NAME
)
2248 code
= gimple_assign_rhs_code (stmt
);
2249 rhs_class
= gimple_assign_rhs_class (stmt
);
2250 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2252 if (rhs_class
== GIMPLE_BINARY_RHS
)
2253 rhs2
= gimple_assign_rhs2 (stmt
);
2255 /* Handle unary rhs and binary rhs with integer constants as second
2258 if (rhs_class
== GIMPLE_UNARY_RHS
2259 || (rhs_class
== GIMPLE_BINARY_RHS
2260 && TREE_CODE (rhs2
) == INTEGER_CST
))
2262 if (code
!= BIT_AND_EXPR
2263 && code
!= LSHIFT_EXPR
2264 && code
!= RSHIFT_EXPR
2265 && code
!= LROTATE_EXPR
2266 && code
!= RROTATE_EXPR
2267 && !CONVERT_EXPR_CODE_P (code
))
2270 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
2272 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2273 we have to initialize the symbolic number. */
2276 if (gimple_assign_load_p (stmt
)
2277 || !init_symbolic_number (n
, rhs1
))
2279 source_stmt1
= stmt
;
2286 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2287 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
2288 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
2290 /* Only constants masking full bytes are allowed. */
2291 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
2292 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
2295 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
2304 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
2309 int i
, type_size
, old_type_size
;
2312 type
= gimple_expr_type (stmt
);
2313 type_size
= TYPE_PRECISION (type
);
2314 if (type_size
% BITS_PER_UNIT
!= 0)
2316 type_size
/= BITS_PER_UNIT
;
2317 if (type_size
> 64 / BITS_PER_MARKER
)
2320 /* Sign extension: result is dependent on the value. */
2321 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2322 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
2323 && HEAD_MARKER (n
->n
, old_type_size
))
2324 for (i
= 0; i
< type_size
- old_type_size
; i
++)
2325 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
2326 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
2328 if (type_size
< 64 / BITS_PER_MARKER
)
2330 /* If STMT casts to a smaller type mask out the bits not
2331 belonging to the target type. */
2332 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
2336 n
->range
= type_size
;
2342 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
2345 /* Handle binary rhs. */
2347 if (rhs_class
== GIMPLE_BINARY_RHS
)
2349 struct symbolic_number n1
, n2
;
2350 gimple source_stmt
, source_stmt2
;
2352 if (code
!= BIT_IOR_EXPR
)
2355 if (TREE_CODE (rhs2
) != SSA_NAME
)
2358 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2363 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
2368 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
2373 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
2376 if (!n1
.vuse
!= !n2
.vuse
2377 || (n1
.vuse
&& !operand_equal_p (n1
.vuse
, n2
.vuse
, 0)))
2381 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
2386 if (!verify_symbolic_number_p (n
, stmt
))
2398 /* Check if STMT completes a bswap implementation or a read in a given
2399 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2400 accordingly. It also sets N to represent the kind of operations
2401 performed: size of the resulting expression and whether it works on
2402 a memory source, and if so alias-set and vuse. At last, the
2403 function returns a stmt whose rhs's first tree is the source
2407 find_bswap_or_nop (gimple stmt
, struct symbolic_number
*n
, bool *bswap
)
2409 /* The number which the find_bswap_or_nop_1 result should match in order
2410 to have a full byte swap. The number is shifted to the right
2411 according to the size of the symbolic number before using it. */
2412 uint64_t cmpxchg
= CMPXCHG
;
2413 uint64_t cmpnop
= CMPNOP
;
2418 /* The last parameter determines the depth search limit. It usually
2419 correlates directly to the number n of bytes to be touched. We
2420 increase that number by log2(n) + 1 here in order to also
2421 cover signed -> unsigned conversions of the src operand as can be seen
2422 in libgcc, and for initial shift/and operation of the src operand. */
2423 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
2424 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
2425 source_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
2430 /* Find real size of result (highest non-zero byte). */
2436 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
2440 /* Zero out the extra bits of N and CMP*. */
2441 if (n
->range
< (int) sizeof (int64_t))
2445 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
2446 cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
2450 /* A complete byte swap should make the symbolic number to start with
2451 the largest digit in the highest order byte. Unchanged symbolic
2452 number indicates a read with same endianness as target architecture. */
2455 else if (n
->n
== cmpxchg
)
2460 /* Useless bit manipulation performed by code. */
2461 if (!n
->base_addr
&& n
->n
== cmpnop
)
2464 n
->range
*= BITS_PER_UNIT
;
2470 const pass_data pass_data_optimize_bswap
=
2472 GIMPLE_PASS
, /* type */
2474 OPTGROUP_NONE
, /* optinfo_flags */
2475 TV_NONE
, /* tv_id */
2476 PROP_ssa
, /* properties_required */
2477 0, /* properties_provided */
2478 0, /* properties_destroyed */
2479 0, /* todo_flags_start */
2480 0, /* todo_flags_finish */
2483 class pass_optimize_bswap
: public gimple_opt_pass
2486 pass_optimize_bswap (gcc::context
*ctxt
)
2487 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
2490 /* opt_pass methods: */
2491 virtual bool gate (function
*)
2493 return flag_expensive_optimizations
&& optimize
;
2496 virtual unsigned int execute (function
*);
2498 }; // class pass_optimize_bswap
2500 /* Perform the bswap optimization: replace the expression computed in the rhs
2501 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2502 Which of these alternatives replace the rhs is given by N->base_addr (non
2503 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2504 load to perform are also given in N while the builtin bswap invoke is given
2505 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2506 load statements involved to construct the rhs in CUR_STMT and N->range gives
2507 the size of the rhs expression for maintaining some statistics.
2509 Note that if the replacement involve a load, CUR_STMT is moved just after
2510 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2511 changing of basic block. */
2514 bswap_replace (gimple cur_stmt
, gimple src_stmt
, tree fndecl
, tree bswap_type
,
2515 tree load_type
, struct symbolic_number
*n
, bool bswap
)
2517 gimple_stmt_iterator gsi
;
2521 gsi
= gsi_for_stmt (cur_stmt
);
2522 src
= gimple_assign_rhs1 (src_stmt
);
2523 tgt
= gimple_assign_lhs (cur_stmt
);
2525 /* Need to load the value from memory first. */
2528 gimple_stmt_iterator gsi_ins
= gsi_for_stmt (src_stmt
);
2529 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
2530 tree load_offset_ptr
, aligned_load_type
;
2531 gimple addr_stmt
, load_stmt
;
2533 HOST_WIDE_INT load_offset
= 0;
2535 align
= get_object_alignment (src
);
2536 /* If the new access is smaller than the original one, we need
2537 to perform big endian adjustment. */
2538 if (BYTES_BIG_ENDIAN
)
2540 HOST_WIDE_INT bitsize
, bitpos
;
2542 int unsignedp
, volatilep
;
2545 get_inner_reference (src
, &bitsize
, &bitpos
, &offset
, &mode
,
2546 &unsignedp
, &volatilep
, false);
2547 if (n
->range
< (unsigned HOST_WIDE_INT
) bitsize
)
2549 load_offset
= (bitsize
- n
->range
) / BITS_PER_UNIT
;
2550 unsigned HOST_WIDE_INT l
2551 = (load_offset
* BITS_PER_UNIT
) & (align
- 1);
2558 && align
< GET_MODE_ALIGNMENT (TYPE_MODE (load_type
))
2559 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type
), align
))
2562 /* Move cur_stmt just before one of the load of the original
2563 to ensure it has the same VUSE. See PR61517 for what could
2565 gsi_move_before (&gsi
, &gsi_ins
);
2566 gsi
= gsi_for_stmt (cur_stmt
);
2568 /* Compute address to load from and cast according to the size
2570 addr_expr
= build_fold_addr_expr (unshare_expr (src
));
2571 if (is_gimple_mem_ref_addr (addr_expr
))
2572 addr_tmp
= addr_expr
;
2575 addr_tmp
= make_temp_ssa_name (TREE_TYPE (addr_expr
), NULL
,
2577 addr_stmt
= gimple_build_assign (addr_tmp
, addr_expr
);
2578 gsi_insert_before (&gsi
, addr_stmt
, GSI_SAME_STMT
);
2581 /* Perform the load. */
2582 aligned_load_type
= load_type
;
2583 if (align
< TYPE_ALIGN (load_type
))
2584 aligned_load_type
= build_aligned_type (load_type
, align
);
2585 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
2586 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
2592 nop_stats
.found_16bit
++;
2593 else if (n
->range
== 32)
2594 nop_stats
.found_32bit
++;
2597 gcc_assert (n
->range
== 64);
2598 nop_stats
.found_64bit
++;
2601 /* Convert the result of load if necessary. */
2602 if (!useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
2604 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
2606 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2607 gimple_set_vuse (load_stmt
, n
->vuse
);
2608 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2609 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
2613 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
2614 gimple_set_vuse (cur_stmt
, n
->vuse
);
2616 update_stmt (cur_stmt
);
2621 "%d bit load in target endianness found at: ",
2623 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2629 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
2630 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2631 gimple_set_vuse (load_stmt
, n
->vuse
);
2632 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2638 bswap_stats
.found_16bit
++;
2639 else if (n
->range
== 32)
2640 bswap_stats
.found_32bit
++;
2643 gcc_assert (n
->range
== 64);
2644 bswap_stats
.found_64bit
++;
2649 /* Convert the src expression if necessary. */
2650 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
2652 gimple convert_stmt
;
2654 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
2655 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
2656 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2659 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2660 are considered as rotation of 2N bit values by N bits is generally not
2661 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2662 gives 0x03040102 while a bswap for that value is 0x04030201. */
2663 if (bswap
&& n
->range
== 16)
2665 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
2666 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
2667 bswap_stmt
= gimple_build_assign (NULL
, src
);
2670 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
2674 /* Convert the result if necessary. */
2675 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
2677 gimple convert_stmt
;
2679 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
2680 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
2681 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2684 gimple_set_lhs (bswap_stmt
, tmp
);
2688 fprintf (dump_file
, "%d bit bswap implementation found at: ",
2690 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2693 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
2694 gsi_remove (&gsi
, true);
2698 /* Find manual byte swap implementations as well as load in a given
2699 endianness. Byte swaps are turned into a bswap builtin invokation
2700 while endian loads are converted to bswap builtin invokation or
2701 simple load according to the target endianness. */
2704 pass_optimize_bswap::execute (function
*fun
)
2707 bool bswap32_p
, bswap64_p
;
2708 bool changed
= false;
2709 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
2711 if (BITS_PER_UNIT
!= 8)
2714 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2715 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
2716 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2717 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
2718 || (bswap32_p
&& word_mode
== SImode
)));
2720 /* Determine the argument type of the builtins. The code later on
2721 assumes that the return and argument type are the same. */
2724 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2725 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2730 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2731 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2734 memset (&nop_stats
, 0, sizeof (nop_stats
));
2735 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
2737 FOR_EACH_BB_FN (bb
, fun
)
2739 gimple_stmt_iterator gsi
;
2741 /* We do a reverse scan for bswap patterns to make sure we get the
2742 widest match. As bswap pattern matching doesn't handle previously
2743 inserted smaller bswap replacements as sub-patterns, the wider
2744 variant wouldn't be detected. */
2745 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
2747 gimple src_stmt
, cur_stmt
= gsi_stmt (gsi
);
2748 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
2749 enum tree_code code
;
2750 struct symbolic_number n
;
2753 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2754 might be moved to a different basic block by bswap_replace and gsi
2755 must not points to it if that's the case. Moving the gsi_prev
2756 there make sure that gsi points to the statement previous to
2757 cur_stmt while still making sure that all statements are
2758 considered in this basic block. */
2761 if (!is_gimple_assign (cur_stmt
))
2764 code
= gimple_assign_rhs_code (cur_stmt
);
2769 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
2770 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
2780 src_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
2788 /* Already in canonical form, nothing to do. */
2789 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
2791 load_type
= bswap_type
= uint16_type_node
;
2794 load_type
= uint32_type_node
;
2797 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2798 bswap_type
= bswap32_type
;
2802 load_type
= uint64_type_node
;
2805 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2806 bswap_type
= bswap64_type
;
2813 if (bswap
&& !fndecl
&& n
.range
!= 16)
2816 if (bswap_replace (cur_stmt
, src_stmt
, fndecl
, bswap_type
, load_type
,
2822 statistics_counter_event (fun
, "16-bit nop implementations found",
2823 nop_stats
.found_16bit
);
2824 statistics_counter_event (fun
, "32-bit nop implementations found",
2825 nop_stats
.found_32bit
);
2826 statistics_counter_event (fun
, "64-bit nop implementations found",
2827 nop_stats
.found_64bit
);
2828 statistics_counter_event (fun
, "16-bit bswap implementations found",
2829 bswap_stats
.found_16bit
);
2830 statistics_counter_event (fun
, "32-bit bswap implementations found",
2831 bswap_stats
.found_32bit
);
2832 statistics_counter_event (fun
, "64-bit bswap implementations found",
2833 bswap_stats
.found_64bit
);
2835 return (changed
? TODO_update_ssa
: 0);
2841 make_pass_optimize_bswap (gcc::context
*ctxt
)
2843 return new pass_optimize_bswap (ctxt
);
2846 /* Return true if stmt is a type conversion operation that can be stripped
2847 when used in a widening multiply operation. */
2849 widening_mult_conversion_strippable_p (tree result_type
, gimple stmt
)
2851 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2853 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2858 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2861 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2863 /* If the type of OP has the same precision as the result, then
2864 we can strip this conversion. The multiply operation will be
2865 selected to create the correct extension as a by-product. */
2866 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2869 /* We can also strip a conversion if it preserves the signed-ness of
2870 the operation and doesn't narrow the range. */
2871 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2873 /* If the inner-most type is unsigned, then we can strip any
2874 intermediate widening operation. If it's signed, then the
2875 intermediate widening operation must also be signed. */
2876 if ((TYPE_UNSIGNED (inner_op_type
)
2877 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2878 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2884 return rhs_code
== FIXED_CONVERT_EXPR
;
2887 /* Return true if RHS is a suitable operand for a widening multiplication,
2888 assuming a target type of TYPE.
2889 There are two cases:
2891 - RHS makes some value at least twice as wide. Store that value
2892 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2894 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2895 but leave *TYPE_OUT untouched. */
2898 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2904 if (TREE_CODE (rhs
) == SSA_NAME
)
2906 stmt
= SSA_NAME_DEF_STMT (rhs
);
2907 if (is_gimple_assign (stmt
))
2909 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2913 rhs1
= gimple_assign_rhs1 (stmt
);
2915 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2917 *new_rhs_out
= rhs1
;
2926 type1
= TREE_TYPE (rhs1
);
2928 if (TREE_CODE (type1
) != TREE_CODE (type
)
2929 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2932 *new_rhs_out
= rhs1
;
2937 if (TREE_CODE (rhs
) == INTEGER_CST
)
2947 /* Return true if STMT performs a widening multiplication, assuming the
2948 output type is TYPE. If so, store the unwidened types of the operands
2949 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2950 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2951 and *TYPE2_OUT would give the operands of the multiplication. */
2954 is_widening_mult_p (gimple stmt
,
2955 tree
*type1_out
, tree
*rhs1_out
,
2956 tree
*type2_out
, tree
*rhs2_out
)
2958 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2960 if (TREE_CODE (type
) != INTEGER_TYPE
2961 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2964 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2968 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2972 if (*type1_out
== NULL
)
2974 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2976 *type1_out
= *type2_out
;
2979 if (*type2_out
== NULL
)
2981 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2983 *type2_out
= *type1_out
;
2986 /* Ensure that the larger of the two operands comes first. */
2987 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2989 std::swap (*type1_out
, *type2_out
);
2990 std::swap (*rhs1_out
, *rhs2_out
);
2996 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2997 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2998 value is true iff we converted the statement. */
3001 convert_mult_to_widen (gimple stmt
, gimple_stmt_iterator
*gsi
)
3003 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
3004 enum insn_code handler
;
3005 machine_mode to_mode
, from_mode
, actual_mode
;
3007 int actual_precision
;
3008 location_t loc
= gimple_location (stmt
);
3009 bool from_unsigned1
, from_unsigned2
;
3011 lhs
= gimple_assign_lhs (stmt
);
3012 type
= TREE_TYPE (lhs
);
3013 if (TREE_CODE (type
) != INTEGER_TYPE
)
3016 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
3019 to_mode
= TYPE_MODE (type
);
3020 from_mode
= TYPE_MODE (type1
);
3021 from_unsigned1
= TYPE_UNSIGNED (type1
);
3022 from_unsigned2
= TYPE_UNSIGNED (type2
);
3024 if (from_unsigned1
&& from_unsigned2
)
3025 op
= umul_widen_optab
;
3026 else if (!from_unsigned1
&& !from_unsigned2
)
3027 op
= smul_widen_optab
;
3029 op
= usmul_widen_optab
;
3031 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
3034 if (handler
== CODE_FOR_nothing
)
3036 if (op
!= smul_widen_optab
)
3038 /* We can use a signed multiply with unsigned types as long as
3039 there is a wider mode to use, or it is the smaller of the two
3040 types that is unsigned. Note that type1 >= type2, always. */
3041 if ((TYPE_UNSIGNED (type1
)
3042 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3043 || (TYPE_UNSIGNED (type2
)
3044 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3046 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3047 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
3051 op
= smul_widen_optab
;
3052 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
3056 if (handler
== CODE_FOR_nothing
)
3059 from_unsigned1
= from_unsigned2
= false;
3065 /* Ensure that the inputs to the handler are in the correct precison
3066 for the opcode. This will be the full mode size. */
3067 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3068 if (2 * actual_precision
> TYPE_PRECISION (type
))
3070 if (actual_precision
!= TYPE_PRECISION (type1
)
3071 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3072 rhs1
= build_and_insert_cast (gsi
, loc
,
3073 build_nonstandard_integer_type
3074 (actual_precision
, from_unsigned1
), rhs1
);
3075 if (actual_precision
!= TYPE_PRECISION (type2
)
3076 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3077 rhs2
= build_and_insert_cast (gsi
, loc
,
3078 build_nonstandard_integer_type
3079 (actual_precision
, from_unsigned2
), rhs2
);
3081 /* Handle constants. */
3082 if (TREE_CODE (rhs1
) == INTEGER_CST
)
3083 rhs1
= fold_convert (type1
, rhs1
);
3084 if (TREE_CODE (rhs2
) == INTEGER_CST
)
3085 rhs2
= fold_convert (type2
, rhs2
);
3087 gimple_assign_set_rhs1 (stmt
, rhs1
);
3088 gimple_assign_set_rhs2 (stmt
, rhs2
);
3089 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
3091 widen_mul_stats
.widen_mults_inserted
++;
3095 /* Process a single gimple statement STMT, which is found at the
3096 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3097 rhs (given by CODE), and try to convert it into a
3098 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3099 is true iff we converted the statement. */
3102 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple stmt
,
3103 enum tree_code code
)
3105 gimple rhs1_stmt
= NULL
, rhs2_stmt
= NULL
;
3106 gimple conv1_stmt
= NULL
, conv2_stmt
= NULL
, conv_stmt
;
3107 tree type
, type1
, type2
, optype
;
3108 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
3109 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
3111 enum tree_code wmult_code
;
3112 enum insn_code handler
;
3113 machine_mode to_mode
, from_mode
, actual_mode
;
3114 location_t loc
= gimple_location (stmt
);
3115 int actual_precision
;
3116 bool from_unsigned1
, from_unsigned2
;
3118 lhs
= gimple_assign_lhs (stmt
);
3119 type
= TREE_TYPE (lhs
);
3120 if (TREE_CODE (type
) != INTEGER_TYPE
3121 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
3124 if (code
== MINUS_EXPR
)
3125 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
3127 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
3129 rhs1
= gimple_assign_rhs1 (stmt
);
3130 rhs2
= gimple_assign_rhs2 (stmt
);
3132 if (TREE_CODE (rhs1
) == SSA_NAME
)
3134 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3135 if (is_gimple_assign (rhs1_stmt
))
3136 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3139 if (TREE_CODE (rhs2
) == SSA_NAME
)
3141 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3142 if (is_gimple_assign (rhs2_stmt
))
3143 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3146 /* Allow for one conversion statement between the multiply
3147 and addition/subtraction statement. If there are more than
3148 one conversions then we assume they would invalidate this
3149 transformation. If that's not the case then they should have
3150 been folded before now. */
3151 if (CONVERT_EXPR_CODE_P (rhs1_code
))
3153 conv1_stmt
= rhs1_stmt
;
3154 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
3155 if (TREE_CODE (rhs1
) == SSA_NAME
)
3157 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3158 if (is_gimple_assign (rhs1_stmt
))
3159 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3164 if (CONVERT_EXPR_CODE_P (rhs2_code
))
3166 conv2_stmt
= rhs2_stmt
;
3167 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
3168 if (TREE_CODE (rhs2
) == SSA_NAME
)
3170 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3171 if (is_gimple_assign (rhs2_stmt
))
3172 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3178 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3179 is_widening_mult_p, but we still need the rhs returns.
3181 It might also appear that it would be sufficient to use the existing
3182 operands of the widening multiply, but that would limit the choice of
3183 multiply-and-accumulate instructions.
3185 If the widened-multiplication result has more than one uses, it is
3186 probably wiser not to do the conversion. */
3187 if (code
== PLUS_EXPR
3188 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
3190 if (!has_single_use (rhs1
)
3191 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
3192 &type2
, &mult_rhs2
))
3195 conv_stmt
= conv1_stmt
;
3197 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
3199 if (!has_single_use (rhs2
)
3200 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
3201 &type2
, &mult_rhs2
))
3204 conv_stmt
= conv2_stmt
;
3209 to_mode
= TYPE_MODE (type
);
3210 from_mode
= TYPE_MODE (type1
);
3211 from_unsigned1
= TYPE_UNSIGNED (type1
);
3212 from_unsigned2
= TYPE_UNSIGNED (type2
);
3215 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3216 if (from_unsigned1
!= from_unsigned2
)
3218 if (!INTEGRAL_TYPE_P (type
))
3220 /* We can use a signed multiply with unsigned types as long as
3221 there is a wider mode to use, or it is the smaller of the two
3222 types that is unsigned. Note that type1 >= type2, always. */
3224 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3226 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3228 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3229 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
3233 from_unsigned1
= from_unsigned2
= false;
3234 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
3238 /* If there was a conversion between the multiply and addition
3239 then we need to make sure it fits a multiply-and-accumulate.
3240 The should be a single mode change which does not change the
3244 /* We use the original, unmodified data types for this. */
3245 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
3246 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
3247 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
3248 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
3250 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
3252 /* Conversion is a truncate. */
3253 if (TYPE_PRECISION (to_type
) < data_size
)
3256 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
3258 /* Conversion is an extend. Check it's the right sort. */
3259 if (TYPE_UNSIGNED (from_type
) != is_unsigned
3260 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
3263 /* else convert is a no-op for our purposes. */
3266 /* Verify that the machine can perform a widening multiply
3267 accumulate in this mode/signedness combination, otherwise
3268 this transformation is likely to pessimize code. */
3269 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
3270 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
3271 from_mode
, 0, &actual_mode
);
3273 if (handler
== CODE_FOR_nothing
)
3276 /* Ensure that the inputs to the handler are in the correct precison
3277 for the opcode. This will be the full mode size. */
3278 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3279 if (actual_precision
!= TYPE_PRECISION (type1
)
3280 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3281 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
3282 build_nonstandard_integer_type
3283 (actual_precision
, from_unsigned1
),
3285 if (actual_precision
!= TYPE_PRECISION (type2
)
3286 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3287 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
3288 build_nonstandard_integer_type
3289 (actual_precision
, from_unsigned2
),
3292 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
3293 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
3295 /* Handle constants. */
3296 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
3297 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
3298 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
3299 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
3301 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
3303 update_stmt (gsi_stmt (*gsi
));
3304 widen_mul_stats
.maccs_inserted
++;
3308 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3309 with uses in additions and subtractions to form fused multiply-add
3310 operations. Returns true if successful and MUL_STMT should be removed. */
3313 convert_mult_to_fma (gimple mul_stmt
, tree op1
, tree op2
)
3315 tree mul_result
= gimple_get_lhs (mul_stmt
);
3316 tree type
= TREE_TYPE (mul_result
);
3317 gimple use_stmt
, neguse_stmt
;
3319 use_operand_p use_p
;
3320 imm_use_iterator imm_iter
;
3322 if (FLOAT_TYPE_P (type
)
3323 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
3326 /* We don't want to do bitfield reduction ops. */
3327 if (INTEGRAL_TYPE_P (type
)
3328 && (TYPE_PRECISION (type
)
3329 != GET_MODE_PRECISION (TYPE_MODE (type
))))
3332 /* If the target doesn't support it, don't generate it. We assume that
3333 if fma isn't available then fms, fnma or fnms are not either. */
3334 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
3337 /* If the multiplication has zero uses, it is kept around probably because
3338 of -fnon-call-exceptions. Don't optimize it away in that case,
3340 if (has_zero_uses (mul_result
))
3343 /* Make sure that the multiplication statement becomes dead after
3344 the transformation, thus that all uses are transformed to FMAs.
3345 This means we assume that an FMA operation has the same cost
3347 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3349 enum tree_code use_code
;
3350 tree result
= mul_result
;
3351 bool negate_p
= false;
3353 use_stmt
= USE_STMT (use_p
);
3355 if (is_gimple_debug (use_stmt
))
3358 /* For now restrict this operations to single basic blocks. In theory
3359 we would want to support sinking the multiplication in
3365 to form a fma in the then block and sink the multiplication to the
3367 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3370 if (!is_gimple_assign (use_stmt
))
3373 use_code
= gimple_assign_rhs_code (use_stmt
);
3375 /* A negate on the multiplication leads to FNMA. */
3376 if (use_code
== NEGATE_EXPR
)
3381 result
= gimple_assign_lhs (use_stmt
);
3383 /* Make sure the negate statement becomes dead with this
3384 single transformation. */
3385 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3386 &use_p
, &neguse_stmt
))
3389 /* Make sure the multiplication isn't also used on that stmt. */
3390 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3391 if (USE_FROM_PTR (usep
) == mul_result
)
3395 use_stmt
= neguse_stmt
;
3396 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3398 if (!is_gimple_assign (use_stmt
))
3401 use_code
= gimple_assign_rhs_code (use_stmt
);
3408 if (gimple_assign_rhs2 (use_stmt
) == result
)
3409 negate_p
= !negate_p
;
3414 /* FMA can only be formed from PLUS and MINUS. */
3418 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3419 by a MULT_EXPR that we'll visit later, we might be able to
3420 get a more profitable match with fnma.
3421 OTOH, if we don't, a negate / fma pair has likely lower latency
3422 that a mult / subtract pair. */
3423 if (use_code
== MINUS_EXPR
&& !negate_p
3424 && gimple_assign_rhs1 (use_stmt
) == result
3425 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
3426 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
3428 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
3430 if (TREE_CODE (rhs2
) == SSA_NAME
)
3432 gimple stmt2
= SSA_NAME_DEF_STMT (rhs2
);
3433 if (has_single_use (rhs2
)
3434 && is_gimple_assign (stmt2
)
3435 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3440 /* We can't handle a * b + a * b. */
3441 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
3444 /* While it is possible to validate whether or not the exact form
3445 that we've recognized is available in the backend, the assumption
3446 is that the transformation is never a loss. For instance, suppose
3447 the target only has the plain FMA pattern available. Consider
3448 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3449 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3450 still have 3 operations, but in the FMA form the two NEGs are
3451 independent and could be run in parallel. */
3454 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
3456 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3457 enum tree_code use_code
;
3458 tree addop
, mulop1
= op1
, result
= mul_result
;
3459 bool negate_p
= false;
3461 if (is_gimple_debug (use_stmt
))
3464 use_code
= gimple_assign_rhs_code (use_stmt
);
3465 if (use_code
== NEGATE_EXPR
)
3467 result
= gimple_assign_lhs (use_stmt
);
3468 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3469 gsi_remove (&gsi
, true);
3470 release_defs (use_stmt
);
3472 use_stmt
= neguse_stmt
;
3473 gsi
= gsi_for_stmt (use_stmt
);
3474 use_code
= gimple_assign_rhs_code (use_stmt
);
3478 if (gimple_assign_rhs1 (use_stmt
) == result
)
3480 addop
= gimple_assign_rhs2 (use_stmt
);
3481 /* a * b - c -> a * b + (-c) */
3482 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3483 addop
= force_gimple_operand_gsi (&gsi
,
3484 build1 (NEGATE_EXPR
,
3486 true, NULL_TREE
, true,
3491 addop
= gimple_assign_rhs1 (use_stmt
);
3492 /* a - b * c -> (-b) * c + a */
3493 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3494 negate_p
= !negate_p
;
3498 mulop1
= force_gimple_operand_gsi (&gsi
,
3499 build1 (NEGATE_EXPR
,
3501 true, NULL_TREE
, true,
3504 fma_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
3505 FMA_EXPR
, mulop1
, op2
, addop
);
3506 gsi_replace (&gsi
, fma_stmt
, true);
3507 widen_mul_stats
.fmas_inserted
++;
3513 /* Find integer multiplications where the operands are extended from
3514 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3515 where appropriate. */
3519 const pass_data pass_data_optimize_widening_mul
=
3521 GIMPLE_PASS
, /* type */
3522 "widening_mul", /* name */
3523 OPTGROUP_NONE
, /* optinfo_flags */
3524 TV_NONE
, /* tv_id */
3525 PROP_ssa
, /* properties_required */
3526 0, /* properties_provided */
3527 0, /* properties_destroyed */
3528 0, /* todo_flags_start */
3529 TODO_update_ssa
, /* todo_flags_finish */
3532 class pass_optimize_widening_mul
: public gimple_opt_pass
3535 pass_optimize_widening_mul (gcc::context
*ctxt
)
3536 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
3539 /* opt_pass methods: */
3540 virtual bool gate (function
*)
3542 return flag_expensive_optimizations
&& optimize
;
3545 virtual unsigned int execute (function
*);
3547 }; // class pass_optimize_widening_mul
3550 pass_optimize_widening_mul::execute (function
*fun
)
3553 bool cfg_changed
= false;
3555 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
3557 FOR_EACH_BB_FN (bb
, fun
)
3559 gimple_stmt_iterator gsi
;
3561 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
3563 gimple stmt
= gsi_stmt (gsi
);
3564 enum tree_code code
;
3566 if (is_gimple_assign (stmt
))
3568 code
= gimple_assign_rhs_code (stmt
);
3572 if (!convert_mult_to_widen (stmt
, &gsi
)
3573 && convert_mult_to_fma (stmt
,
3574 gimple_assign_rhs1 (stmt
),
3575 gimple_assign_rhs2 (stmt
)))
3577 gsi_remove (&gsi
, true);
3578 release_defs (stmt
);
3585 convert_plusminus_to_widen (&gsi
, stmt
, code
);
3591 else if (is_gimple_call (stmt
)
3592 && gimple_call_lhs (stmt
))
3594 tree fndecl
= gimple_call_fndecl (stmt
);
3596 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3598 switch (DECL_FUNCTION_CODE (fndecl
))
3603 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
3604 && REAL_VALUES_EQUAL
3605 (TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
3607 && convert_mult_to_fma (stmt
,
3608 gimple_call_arg (stmt
, 0),
3609 gimple_call_arg (stmt
, 0)))
3611 unlink_stmt_vdef (stmt
);
3612 if (gsi_remove (&gsi
, true)
3613 && gimple_purge_dead_eh_edges (bb
))
3615 release_defs (stmt
);
3628 statistics_counter_event (fun
, "widening multiplications inserted",
3629 widen_mul_stats
.widen_mults_inserted
);
3630 statistics_counter_event (fun
, "widening maccs inserted",
3631 widen_mul_stats
.maccs_inserted
);
3632 statistics_counter_event (fun
, "fused multiply-adds inserted",
3633 widen_mul_stats
.fmas_inserted
);
3635 return cfg_changed
? TODO_cleanup_cfg
: 0;
3641 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
3643 return new pass_optimize_widening_mul (ctxt
);