1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
117 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
123 /* The basic block represented by this structure. */
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
130 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
131 was inserted in BB. */
132 gimple
*recip_def_stmt
;
134 /* Pointer to a list of "struct occurrence"s for blocks dominated
136 struct occurrence
*children
;
138 /* Pointer to the next "struct occurrence"s in the list of blocks
139 sharing a common dominator. */
140 struct occurrence
*next
;
142 /* The number of divisions that are in BB before compute_merit. The
143 number of divisions that are in BB or post-dominate it after
147 /* True if the basic block has a division, false if it is a common
148 dominator for basic blocks that do. If it is false and trapping
149 math is active, BB is not a candidate for inserting a reciprocal. */
150 bool bb_has_division
;
155 /* Number of 1.0/X ops inserted. */
158 /* Number of 1.0/FUNC ops inserted. */
164 /* Number of cexpi calls inserted. */
170 /* Number of hand-written 16-bit nop / bswaps found. */
173 /* Number of hand-written 32-bit nop / bswaps found. */
176 /* Number of hand-written 64-bit nop / bswaps found. */
178 } nop_stats
, bswap_stats
;
182 /* Number of widening multiplication ops inserted. */
183 int widen_mults_inserted
;
185 /* Number of integer multiply-and-accumulate ops inserted. */
188 /* Number of fp fused multiply-add ops inserted. */
191 /* Number of divmod calls inserted. */
192 int divmod_calls_inserted
;
195 /* The instance of "struct occurrence" representing the highest
196 interesting block in the dominator tree. */
197 static struct occurrence
*occ_head
;
199 /* Allocation pool for getting instances of "struct occurrence". */
200 static object_allocator
<occurrence
> *occ_pool
;
204 /* Allocate and return a new struct occurrence for basic block BB, and
205 whose children list is headed by CHILDREN. */
206 static struct occurrence
*
207 occ_new (basic_block bb
, struct occurrence
*children
)
209 struct occurrence
*occ
;
211 bb
->aux
= occ
= occ_pool
->allocate ();
212 memset (occ
, 0, sizeof (struct occurrence
));
215 occ
->children
= children
;
220 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
221 list of "struct occurrence"s, one per basic block, having IDOM as
222 their common dominator.
224 We try to insert NEW_OCC as deep as possible in the tree, and we also
225 insert any other block that is a common dominator for BB and one
226 block already in the tree. */
229 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
230 struct occurrence
**p_head
)
232 struct occurrence
*occ
, **p_occ
;
234 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
236 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
237 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
240 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
243 occ
->next
= new_occ
->children
;
244 new_occ
->children
= occ
;
246 /* Try the next block (it may as well be dominated by BB). */
249 else if (dom
== occ_bb
)
251 /* OCC_BB dominates BB. Tail recurse to look deeper. */
252 insert_bb (new_occ
, dom
, &occ
->children
);
256 else if (dom
!= idom
)
258 gcc_assert (!dom
->aux
);
260 /* There is a dominator between IDOM and BB, add it and make
261 two children out of NEW_OCC and OCC. First, remove OCC from
267 /* None of the previous blocks has DOM as a dominator: if we tail
268 recursed, we would reexamine them uselessly. Just switch BB with
269 DOM, and go on looking for blocks dominated by DOM. */
270 new_occ
= occ_new (dom
, new_occ
);
275 /* Nothing special, go on with the next element. */
280 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
281 new_occ
->next
= *p_head
;
285 /* Register that we found a division in BB. */
288 register_division_in (basic_block bb
)
290 struct occurrence
*occ
;
292 occ
= (struct occurrence
*) bb
->aux
;
295 occ
= occ_new (bb
, NULL
);
296 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
299 occ
->bb_has_division
= true;
300 occ
->num_divisions
++;
304 /* Compute the number of divisions that postdominate each block in OCC and
308 compute_merit (struct occurrence
*occ
)
310 struct occurrence
*occ_child
;
311 basic_block dom
= occ
->bb
;
313 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
316 if (occ_child
->children
)
317 compute_merit (occ_child
);
320 bb
= single_noncomplex_succ (dom
);
324 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
325 occ
->num_divisions
+= occ_child
->num_divisions
;
330 /* Return whether USE_STMT is a floating-point division by DEF. */
332 is_division_by (gimple
*use_stmt
, tree def
)
334 return is_gimple_assign (use_stmt
)
335 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
336 && gimple_assign_rhs2 (use_stmt
) == def
337 /* Do not recognize x / x as valid division, as we are getting
338 confused later by replacing all immediate uses x in such
340 && gimple_assign_rhs1 (use_stmt
) != def
;
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345 the given basic block. The field may be left NULL, of course,
346 if it is not possible or profitable to do the optimization.
348 DEF_BSI is an iterator pointing at the statement defining DEF.
349 If RECIP_DEF is set, a dominator already has a computation that can
353 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
354 tree def
, tree recip_def
, int threshold
)
358 gimple_stmt_iterator gsi
;
359 struct occurrence
*occ_child
;
362 && (occ
->bb_has_division
|| !flag_trapping_math
)
363 && occ
->num_divisions
>= threshold
)
365 /* Make a variable with the replacement and substitute it. */
366 type
= TREE_TYPE (def
);
367 recip_def
= create_tmp_reg (type
, "reciptmp");
368 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
369 build_one_cst (type
), def
);
371 if (occ
->bb_has_division
)
373 /* Case 1: insert before an existing division. */
374 gsi
= gsi_after_labels (occ
->bb
);
375 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
378 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
380 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
382 /* Case 2: insert right after the definition. Note that this will
383 never happen if the definition statement can throw, because in
384 that case the sole successor of the statement's basic block will
385 dominate all the uses as well. */
386 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
390 /* Case 3: insert in a basic block not containing defs/uses. */
391 gsi
= gsi_after_labels (occ
->bb
);
392 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
395 reciprocal_stats
.rdivs_inserted
++;
397 occ
->recip_def_stmt
= new_stmt
;
400 occ
->recip_def
= recip_def
;
401 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
402 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
410 replace_reciprocal (use_operand_p use_p
)
412 gimple
*use_stmt
= USE_STMT (use_p
);
413 basic_block bb
= gimple_bb (use_stmt
);
414 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
416 if (optimize_bb_for_speed_p (bb
)
417 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
419 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
420 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
421 SET_USE (use_p
, occ
->recip_def
);
422 fold_stmt_inplace (&gsi
);
423 update_stmt (use_stmt
);
428 /* Free OCC and return one more "struct occurrence" to be freed. */
430 static struct occurrence
*
431 free_bb (struct occurrence
*occ
)
433 struct occurrence
*child
, *next
;
435 /* First get the two pointers hanging off OCC. */
437 child
= occ
->children
;
439 occ_pool
->remove (occ
);
441 /* Now ensure that we don't recurse unless it is necessary. */
447 next
= free_bb (next
);
454 /* Look for floating-point divisions among DEF's uses, and try to
455 replace them by multiplications with the reciprocal. Add
456 as many statements computing the reciprocal as needed.
458 DEF must be a GIMPLE register of a floating-point type. */
461 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
464 imm_use_iterator use_iter
;
465 struct occurrence
*occ
;
466 int count
= 0, threshold
;
468 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
470 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
472 gimple
*use_stmt
= USE_STMT (use_p
);
473 if (is_division_by (use_stmt
, def
))
475 register_division_in (gimple_bb (use_stmt
));
480 /* Do the expensive part only if we can hope to optimize something. */
481 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
482 if (count
>= threshold
)
485 for (occ
= occ_head
; occ
; occ
= occ
->next
)
488 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
491 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
493 if (is_division_by (use_stmt
, def
))
495 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
496 replace_reciprocal (use_p
);
501 for (occ
= occ_head
; occ
; )
507 /* Return an internal function that implements the reciprocal of CALL,
508 or IFN_LAST if there is no such function that the target supports. */
511 internal_fn_reciprocal (gcall
*call
)
515 switch (gimple_call_combined_fn (call
))
525 tree_pair types
= direct_internal_fn_types (ifn
, call
);
526 if (!direct_internal_fn_supported_p (ifn
, types
, OPTIMIZE_FOR_SPEED
))
532 /* Go through all the floating-point SSA_NAMEs, and call
533 execute_cse_reciprocals_1 on each of them. */
536 const pass_data pass_data_cse_reciprocals
=
538 GIMPLE_PASS
, /* type */
540 OPTGROUP_NONE
, /* optinfo_flags */
542 PROP_ssa
, /* properties_required */
543 0, /* properties_provided */
544 0, /* properties_destroyed */
545 0, /* todo_flags_start */
546 TODO_update_ssa
, /* todo_flags_finish */
549 class pass_cse_reciprocals
: public gimple_opt_pass
552 pass_cse_reciprocals (gcc::context
*ctxt
)
553 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
556 /* opt_pass methods: */
557 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
558 virtual unsigned int execute (function
*);
560 }; // class pass_cse_reciprocals
563 pass_cse_reciprocals::execute (function
*fun
)
568 occ_pool
= new object_allocator
<occurrence
> ("dominators for recip");
570 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
571 calculate_dominance_info (CDI_DOMINATORS
);
572 calculate_dominance_info (CDI_POST_DOMINATORS
);
575 FOR_EACH_BB_FN (bb
, fun
)
576 gcc_assert (!bb
->aux
);
578 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
579 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
580 && is_gimple_reg (arg
))
582 tree name
= ssa_default_def (fun
, arg
);
584 execute_cse_reciprocals_1 (NULL
, name
);
587 FOR_EACH_BB_FN (bb
, fun
)
591 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
594 gphi
*phi
= gsi
.phi ();
595 def
= PHI_RESULT (phi
);
596 if (! virtual_operand_p (def
)
597 && FLOAT_TYPE_P (TREE_TYPE (def
)))
598 execute_cse_reciprocals_1 (NULL
, def
);
601 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
604 gimple
*stmt
= gsi_stmt (gsi
);
606 if (gimple_has_lhs (stmt
)
607 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
608 && FLOAT_TYPE_P (TREE_TYPE (def
))
609 && TREE_CODE (def
) == SSA_NAME
)
610 execute_cse_reciprocals_1 (&gsi
, def
);
613 if (optimize_bb_for_size_p (bb
))
616 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
617 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
620 gimple
*stmt
= gsi_stmt (gsi
);
622 if (is_gimple_assign (stmt
)
623 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
625 tree arg1
= gimple_assign_rhs2 (stmt
);
628 if (TREE_CODE (arg1
) != SSA_NAME
)
631 stmt1
= SSA_NAME_DEF_STMT (arg1
);
633 if (is_gimple_call (stmt1
)
634 && gimple_call_lhs (stmt1
))
639 tree fndecl
= NULL_TREE
;
641 gcall
*call
= as_a
<gcall
*> (stmt1
);
642 internal_fn ifn
= internal_fn_reciprocal (call
);
645 fndecl
= gimple_call_fndecl (call
);
647 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_MD
)
649 fndecl
= targetm
.builtin_reciprocal (fndecl
);
654 /* Check that all uses of the SSA name are divisions,
655 otherwise replacing the defining statement will do
658 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
660 gimple
*stmt2
= USE_STMT (use_p
);
661 if (is_gimple_debug (stmt2
))
663 if (!is_gimple_assign (stmt2
)
664 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
665 || gimple_assign_rhs1 (stmt2
) == arg1
666 || gimple_assign_rhs2 (stmt2
) != arg1
)
675 gimple_replace_ssa_lhs (call
, arg1
);
676 if (gimple_call_internal_p (call
) != (ifn
!= IFN_LAST
))
678 auto_vec
<tree
, 4> args
;
679 for (unsigned int i
= 0;
680 i
< gimple_call_num_args (call
); i
++)
681 args
.safe_push (gimple_call_arg (call
, i
));
684 stmt2
= gimple_build_call_vec (fndecl
, args
);
686 stmt2
= gimple_build_call_internal_vec (ifn
, args
);
687 gimple_call_set_lhs (stmt2
, arg1
);
688 if (gimple_vdef (call
))
690 gimple_set_vdef (stmt2
, gimple_vdef (call
));
691 SSA_NAME_DEF_STMT (gimple_vdef (stmt2
)) = stmt2
;
693 gimple_set_vuse (stmt2
, gimple_vuse (call
));
694 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
695 gsi_replace (&gsi2
, stmt2
, true);
700 gimple_call_set_fndecl (call
, fndecl
);
702 gimple_call_set_internal_fn (call
, ifn
);
705 reciprocal_stats
.rfuncs_inserted
++;
707 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
709 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
710 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
711 fold_stmt_inplace (&gsi
);
719 statistics_counter_event (fun
, "reciprocal divs inserted",
720 reciprocal_stats
.rdivs_inserted
);
721 statistics_counter_event (fun
, "reciprocal functions inserted",
722 reciprocal_stats
.rfuncs_inserted
);
724 free_dominance_info (CDI_DOMINATORS
);
725 free_dominance_info (CDI_POST_DOMINATORS
);
733 make_pass_cse_reciprocals (gcc::context
*ctxt
)
735 return new pass_cse_reciprocals (ctxt
);
738 /* Records an occurrence at statement USE_STMT in the vector of trees
739 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
740 is not yet initialized. Returns true if the occurrence was pushed on
741 the vector. Adjusts *TOP_BB to be the basic block dominating all
742 statements in the vector. */
745 maybe_record_sincos (vec
<gimple
*> *stmts
,
746 basic_block
*top_bb
, gimple
*use_stmt
)
748 basic_block use_bb
= gimple_bb (use_stmt
);
750 && (*top_bb
== use_bb
751 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
752 stmts
->safe_push (use_stmt
);
754 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
756 stmts
->safe_push (use_stmt
);
765 /* Look for sin, cos and cexpi calls with the same argument NAME and
766 create a single call to cexpi CSEing the result in this case.
767 We first walk over all immediate uses of the argument collecting
768 statements that we can CSE in a vector and in a second pass replace
769 the statement rhs with a REALPART or IMAGPART expression on the
770 result of the cexpi call we insert before the use statement that
771 dominates all other candidates. */
774 execute_cse_sincos_1 (tree name
)
776 gimple_stmt_iterator gsi
;
777 imm_use_iterator use_iter
;
778 tree fndecl
, res
, type
;
779 gimple
*def_stmt
, *use_stmt
, *stmt
;
780 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
781 auto_vec
<gimple
*> stmts
;
782 basic_block top_bb
= NULL
;
784 bool cfg_changed
= false;
786 type
= TREE_TYPE (name
);
787 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
789 if (gimple_code (use_stmt
) != GIMPLE_CALL
790 || !gimple_call_lhs (use_stmt
))
793 switch (gimple_call_combined_fn (use_stmt
))
796 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
800 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
804 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
811 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
814 /* Simply insert cexpi at the beginning of top_bb but not earlier than
815 the name def statement. */
816 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
819 stmt
= gimple_build_call (fndecl
, 1, name
);
820 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
821 gimple_call_set_lhs (stmt
, res
);
823 def_stmt
= SSA_NAME_DEF_STMT (name
);
824 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
825 && gimple_code (def_stmt
) != GIMPLE_PHI
826 && gimple_bb (def_stmt
) == top_bb
)
828 gsi
= gsi_for_stmt (def_stmt
);
829 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
833 gsi
= gsi_after_labels (top_bb
);
834 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
836 sincos_stats
.inserted
++;
838 /* And adjust the recorded old call sites. */
839 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
843 switch (gimple_call_combined_fn (use_stmt
))
846 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
850 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
861 /* Replace call with a copy. */
862 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
864 gsi
= gsi_for_stmt (use_stmt
);
865 gsi_replace (&gsi
, stmt
, true);
866 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
873 /* To evaluate powi(x,n), the floating point value x raised to the
874 constant integer exponent n, we use a hybrid algorithm that
875 combines the "window method" with look-up tables. For an
876 introduction to exponentiation algorithms and "addition chains",
877 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
878 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
879 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
880 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
882 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
883 multiplications to inline before calling the system library's pow
884 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
885 so this default never requires calling pow, powf or powl. */
887 #ifndef POWI_MAX_MULTS
888 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
891 /* The size of the "optimal power tree" lookup table. All
892 exponents less than this value are simply looked up in the
893 powi_table below. This threshold is also used to size the
894 cache of pseudo registers that hold intermediate results. */
895 #define POWI_TABLE_SIZE 256
897 /* The size, in bits of the window, used in the "window method"
898 exponentiation algorithm. This is equivalent to a radix of
899 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
900 #define POWI_WINDOW_SIZE 3
902 /* The following table is an efficient representation of an
903 "optimal power tree". For each value, i, the corresponding
904 value, j, in the table states than an optimal evaluation
905 sequence for calculating pow(x,i) can be found by evaluating
906 pow(x,j)*pow(x,i-j). An optimal power tree for the first
907 100 integers is given in Knuth's "Seminumerical algorithms". */
909 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
911 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
912 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
913 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
914 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
915 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
916 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
917 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
918 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
919 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
920 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
921 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
922 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
923 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
924 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
925 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
926 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
927 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
928 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
929 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
930 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
931 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
932 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
933 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
934 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
935 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
936 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
937 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
938 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
939 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
940 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
941 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
942 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
946 /* Return the number of multiplications required to calculate
947 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
948 subroutine of powi_cost. CACHE is an array indicating
949 which exponents have already been calculated. */
952 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
954 /* If we've already calculated this exponent, then this evaluation
955 doesn't require any additional multiplications. */
960 return powi_lookup_cost (n
- powi_table
[n
], cache
)
961 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
964 /* Return the number of multiplications required to calculate
965 powi(x,n) for an arbitrary x, given the exponent N. This
966 function needs to be kept in sync with powi_as_mults below. */
969 powi_cost (HOST_WIDE_INT n
)
971 bool cache
[POWI_TABLE_SIZE
];
972 unsigned HOST_WIDE_INT digit
;
973 unsigned HOST_WIDE_INT val
;
979 /* Ignore the reciprocal when calculating the cost. */
980 val
= (n
< 0) ? -n
: n
;
982 /* Initialize the exponent cache. */
983 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
988 while (val
>= POWI_TABLE_SIZE
)
992 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
993 result
+= powi_lookup_cost (digit
, cache
)
994 + POWI_WINDOW_SIZE
+ 1;
995 val
>>= POWI_WINDOW_SIZE
;
1004 return result
+ powi_lookup_cost (val
, cache
);
1007 /* Recursive subroutine of powi_as_mults. This function takes the
1008 array, CACHE, of already calculated exponents and an exponent N and
1009 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1012 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1013 HOST_WIDE_INT n
, tree
*cache
)
1015 tree op0
, op1
, ssa_target
;
1016 unsigned HOST_WIDE_INT digit
;
1019 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
1022 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
1024 if (n
< POWI_TABLE_SIZE
)
1026 cache
[n
] = ssa_target
;
1027 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
1028 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
1032 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
1033 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
1034 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
1038 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1042 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1043 gimple_set_location (mult_stmt
, loc
);
1044 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1049 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1050 This function needs to be kept in sync with powi_cost above. */
1053 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1054 tree arg0
, HOST_WIDE_INT n
)
1056 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1061 return build_real (type
, dconst1
);
1063 memset (cache
, 0, sizeof (cache
));
1066 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1070 /* If the original exponent was negative, reciprocate the result. */
1071 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1072 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1073 build_real (type
, dconst1
), result
);
1074 gimple_set_location (div_stmt
, loc
);
1075 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1080 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1081 location info LOC. If the arguments are appropriate, create an
1082 equivalent sequence of statements prior to GSI using an optimal
1083 number of multiplications, and return an expession holding the
1087 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1088 tree arg0
, HOST_WIDE_INT n
)
1090 /* Avoid largest negative number. */
1092 && ((n
>= -1 && n
<= 2)
1093 || (optimize_function_for_speed_p (cfun
)
1094 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1095 return powi_as_mults (gsi
, loc
, arg0
, n
);
1100 /* Build a gimple call statement that calls FN with argument ARG.
1101 Set the lhs of the call statement to a fresh SSA name. Insert the
1102 statement prior to GSI's current position, and return the fresh
1106 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1112 call_stmt
= gimple_build_call (fn
, 1, arg
);
1113 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1114 gimple_set_lhs (call_stmt
, ssa_target
);
1115 gimple_set_location (call_stmt
, loc
);
1116 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1121 /* Build a gimple binary operation with the given CODE and arguments
1122 ARG0, ARG1, assigning the result to a new SSA name for variable
1123 TARGET. Insert the statement prior to GSI's current position, and
1124 return the fresh SSA name.*/
1127 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1128 const char *name
, enum tree_code code
,
1129 tree arg0
, tree arg1
)
1131 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1132 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1133 gimple_set_location (stmt
, loc
);
1134 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1138 /* Build a gimple reference operation with the given CODE and argument
1139 ARG, assigning the result to a new SSA name of TYPE with NAME.
1140 Insert the statement prior to GSI's current position, and return
1141 the fresh SSA name. */
1144 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1145 const char *name
, enum tree_code code
, tree arg0
)
1147 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1148 gimple
*stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1149 gimple_set_location (stmt
, loc
);
1150 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1154 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1155 prior to GSI's current position, and return the fresh SSA name. */
1158 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1159 tree type
, tree val
)
1161 tree result
= make_ssa_name (type
);
1162 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1163 gimple_set_location (stmt
, loc
);
1164 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1168 struct pow_synth_sqrt_info
1171 unsigned int deepest
;
1172 unsigned int num_mults
;
1175 /* Return true iff the real value C can be represented as a
1176 sum of powers of 0.5 up to N. That is:
1177 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1178 Record in INFO the various parameters of the synthesis algorithm such
1179 as the factors a[i], the maximum 0.5 power and the number of
1180 multiplications that will be required. */
1183 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1184 struct pow_synth_sqrt_info
*info
)
1186 REAL_VALUE_TYPE factor
= dconsthalf
;
1187 REAL_VALUE_TYPE remainder
= c
;
1190 info
->num_mults
= 0;
1191 memset (info
->factors
, 0, n
* sizeof (bool));
1193 for (unsigned i
= 0; i
< n
; i
++)
1195 REAL_VALUE_TYPE res
;
1197 /* If something inexact happened bail out now. */
1198 if (real_arithmetic (&res
, MINUS_EXPR
, &remainder
, &factor
))
1201 /* We have hit zero. The number is representable as a sum
1202 of powers of 0.5. */
1203 if (real_equal (&res
, &dconst0
))
1205 info
->factors
[i
] = true;
1206 info
->deepest
= i
+ 1;
1209 else if (!REAL_VALUE_NEGATIVE (res
))
1212 info
->factors
[i
] = true;
1216 info
->factors
[i
] = false;
1218 real_arithmetic (&factor
, MULT_EXPR
, &factor
, &dconsthalf
);
1223 /* Return the tree corresponding to FN being applied
1224 to ARG N times at GSI and LOC.
1225 Look up previous results from CACHE if need be.
1226 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1229 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1230 tree fn
, location_t loc
, tree
*cache
)
1232 tree res
= cache
[n
];
1235 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1236 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1243 /* Print to STREAM the repeated application of function FNAME to ARG
1244 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1248 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1252 fprintf (stream
, "%s", arg
);
1255 fprintf (stream
, "%s (", fname
);
1256 print_nested_fn (stream
, fname
, arg
, n
- 1);
1257 fprintf (stream
, ")");
1261 /* Print to STREAM the fractional sequence of sqrt chains
1262 applied to ARG, described by INFO. Used for the dump file. */
1265 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1266 struct pow_synth_sqrt_info
*info
)
1268 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1270 bool is_set
= info
->factors
[i
];
1273 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1274 if (i
!= info
->deepest
- 1)
1275 fprintf (stream
, " * ");
1280 /* Print to STREAM a representation of raising ARG to an integer
1281 power N. Used for the dump file. */
1284 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1287 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1289 fprintf (stream
, "%s", arg
);
1292 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1293 square roots. Place at GSI and LOC. Limit the maximum depth
1294 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1295 result of the expanded sequence or NULL_TREE if the expansion failed.
1297 This routine assumes that ARG1 is a real number with a fractional part
1298 (the integer exponent case will have been handled earlier in
1299 gimple_expand_builtin_pow).
1302 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1303 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1304 FRAC_PART == ARG1 - WHOLE_PART:
1305 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1306 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1307 if it can be expressed as such, that is if FRAC_PART satisfies:
1308 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1309 where integer a[i] is either 0 or 1.
1312 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1313 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1315 For ARG1 < 0.0 there are two approaches:
1316 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1317 is calculated as above.
1320 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1321 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1323 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1324 FRAC_PART := ARG1 - WHOLE_PART
1325 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1327 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1328 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1330 For ARG1 < 0.0 we choose between (A) and (B) depending on
1331 how many multiplications we'd have to do.
1332 So, for the example in (B): POW (x, -5.875), if we were to
1333 follow algorithm (A) we would produce:
1334 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1335 which contains more multiplications than approach (B).
1337 Hopefully, this approach will eliminate potentially expensive POW library
1338 calls when unsafe floating point math is enabled and allow the compiler to
1339 further optimise the multiplies, square roots and divides produced by this
1343 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1344 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1346 tree type
= TREE_TYPE (arg0
);
1347 machine_mode mode
= TYPE_MODE (type
);
1348 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1349 bool one_over
= true;
1354 if (TREE_CODE (arg1
) != REAL_CST
)
1357 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1359 gcc_assert (max_depth
> 0);
1360 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1362 struct pow_synth_sqrt_info synth_info
;
1363 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1364 synth_info
.deepest
= 0;
1365 synth_info
.num_mults
= 0;
1367 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1368 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1370 /* The whole and fractional parts of exp. */
1371 REAL_VALUE_TYPE whole_part
;
1372 REAL_VALUE_TYPE frac_part
;
1374 real_floor (&whole_part
, mode
, &exp
);
1375 real_arithmetic (&frac_part
, MINUS_EXPR
, &exp
, &whole_part
);
1378 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1379 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1383 real_ceil (&ceil_whole
, mode
, &exp
);
1384 real_arithmetic (&ceil_fract
, MINUS_EXPR
, &ceil_whole
, &exp
);
1387 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1390 /* Check whether it's more profitable to not use 1.0 / ... */
1393 struct pow_synth_sqrt_info alt_synth_info
;
1394 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1395 alt_synth_info
.deepest
= 0;
1396 alt_synth_info
.num_mults
= 0;
1398 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1400 && alt_synth_info
.deepest
<= synth_info
.deepest
1401 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1403 whole_part
= ceil_whole
;
1404 frac_part
= ceil_fract
;
1405 synth_info
.deepest
= alt_synth_info
.deepest
;
1406 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1407 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1408 (max_depth
+ 1) * sizeof (bool));
1413 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1414 REAL_VALUE_TYPE cint
;
1415 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1417 if (!real_identical (&whole_part
, &cint
))
1420 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1423 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1425 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1427 /* Calculate the integer part of the exponent. */
1430 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1439 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1440 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1446 fprintf (dump_file
, "1.0 / (");
1447 dump_integer_part (dump_file
, "x", n
);
1449 fprintf (dump_file
, " * ");
1450 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1451 fprintf (dump_file
, ")");
1455 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1456 fprintf (dump_file
, " / (");
1457 dump_integer_part (dump_file
, "x", n
);
1458 fprintf (dump_file
, ")");
1463 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1465 fprintf (dump_file
, " * ");
1466 dump_integer_part (dump_file
, "x", n
);
1469 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1473 tree fract_res
= NULL_TREE
;
1476 /* Calculate the fractional part of the exponent. */
1477 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1479 if (synth_info
.factors
[i
])
1481 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1484 fract_res
= sqrt_chain
;
1487 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1488 fract_res
, sqrt_chain
);
1492 tree res
= NULL_TREE
;
1499 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1500 fract_res
, integer_res
);
1504 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1505 build_real (type
, dconst1
), res
);
1509 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1510 fract_res
, integer_res
);
1514 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1515 fract_res
, integer_res
);
1519 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1520 with location info LOC. If possible, create an equivalent and
1521 less expensive sequence of statements prior to GSI, and return an
1522 expession holding the result. */
1525 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1526 tree arg0
, tree arg1
)
1528 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
1529 REAL_VALUE_TYPE c2
, dconst3
;
1531 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
1533 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
1534 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1536 dconst1_4
= dconst1
;
1537 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1539 /* If the exponent isn't a constant, there's nothing of interest
1541 if (TREE_CODE (arg1
) != REAL_CST
)
1544 /* Don't perform the operation if flag_signaling_nans is on
1545 and the operand is a signaling NaN. */
1546 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1
)))
1547 && ((TREE_CODE (arg0
) == REAL_CST
1548 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0
)))
1549 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1
))))
1552 /* If the exponent is equivalent to an integer, expand to an optimal
1553 multiplication sequence when profitable. */
1554 c
= TREE_REAL_CST (arg1
);
1555 n
= real_to_integer (&c
);
1556 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1557 c_is_int
= real_identical (&c
, &cint
);
1560 && ((n
>= -1 && n
<= 2)
1561 || (flag_unsafe_math_optimizations
1563 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1564 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1566 /* Attempt various optimizations using sqrt and cbrt. */
1567 type
= TREE_TYPE (arg0
);
1568 mode
= TYPE_MODE (type
);
1569 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1571 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1572 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1575 && real_equal (&c
, &dconsthalf
)
1576 && !HONOR_SIGNED_ZEROS (mode
))
1577 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1579 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1581 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1582 optimizations since 1./3. is not exactly representable. If x
1583 is negative and finite, the correct value of pow(x,1./3.) is
1584 a NaN with the "invalid" exception raised, because the value
1585 of 1./3. actually has an even denominator. The correct value
1586 of cbrt(x) is a negative real value. */
1587 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1588 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1590 if (flag_unsafe_math_optimizations
1592 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1593 && real_equal (&c
, &dconst1_3
))
1594 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1596 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1597 if we don't have a hardware sqrt insn. */
1598 dconst1_6
= dconst1_3
;
1599 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1601 if (flag_unsafe_math_optimizations
1604 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1607 && real_equal (&c
, &dconst1_6
))
1610 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1613 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1617 /* Attempt to expand the POW as a product of square root chains.
1618 Expand the 0.25 case even when otpimising for size. */
1619 if (flag_unsafe_math_optimizations
1622 && (speed_p
|| real_equal (&c
, &dconst1_4
))
1623 && !HONOR_SIGNED_ZEROS (mode
))
1625 unsigned int max_depth
= speed_p
1626 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH
)
1629 tree expand_with_sqrts
1630 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
1632 if (expand_with_sqrts
)
1633 return expand_with_sqrts
;
1636 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1637 n
= real_to_integer (&c2
);
1638 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1639 c2_is_int
= real_identical (&c2
, &cint
);
1641 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1643 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1644 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1646 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1647 different from pow(x, 1./3.) due to rounding and behavior with
1648 negative x, we need to constrain this transformation to unsafe
1649 math and positive x or finite math. */
1650 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
1651 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1652 real_round (&c2
, mode
, &c2
);
1653 n
= real_to_integer (&c2
);
1654 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1655 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1656 real_convert (&c2
, mode
, &c2
);
1658 if (flag_unsafe_math_optimizations
1660 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1661 && real_identical (&c2
, &c
)
1663 && optimize_function_for_speed_p (cfun
)
1664 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1666 tree powi_x_ndiv3
= NULL_TREE
;
1668 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1669 possible or profitable, give up. Skip the degenerate case when
1670 abs(n) < 3, where the result is always 1. */
1671 if (absu_hwi (n
) >= 3)
1673 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1679 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1680 as that creates an unnecessary variable. Instead, just produce
1681 either cbrt(x) or cbrt(x) * cbrt(x). */
1682 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1684 if (absu_hwi (n
) % 3 == 1)
1685 powi_cbrt_x
= cbrt_x
;
1687 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1690 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1691 if (absu_hwi (n
) < 3)
1692 result
= powi_cbrt_x
;
1694 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1695 powi_x_ndiv3
, powi_cbrt_x
);
1697 /* If n is negative, reciprocate the result. */
1699 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1700 build_real (type
, dconst1
), result
);
1705 /* No optimizations succeeded. */
1709 /* ARG is the argument to a cabs builtin call in GSI with location info
1710 LOC. Create a sequence of statements prior to GSI that calculates
1711 sqrt(R*R + I*I), where R and I are the real and imaginary components
1712 of ARG, respectively. Return an expression holding the result. */
1715 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1717 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1718 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1719 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1720 machine_mode mode
= TYPE_MODE (type
);
1722 if (!flag_unsafe_math_optimizations
1723 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1725 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1728 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1729 REALPART_EXPR
, arg
);
1730 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1731 real_part
, real_part
);
1732 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1733 IMAGPART_EXPR
, arg
);
1734 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1735 imag_part
, imag_part
);
1736 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1737 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1742 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1743 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1744 an optimal number of multiplies, when n is a constant. */
1748 const pass_data pass_data_cse_sincos
=
1750 GIMPLE_PASS
, /* type */
1751 "sincos", /* name */
1752 OPTGROUP_NONE
, /* optinfo_flags */
1753 TV_NONE
, /* tv_id */
1754 PROP_ssa
, /* properties_required */
1755 PROP_gimple_opt_math
, /* properties_provided */
1756 0, /* properties_destroyed */
1757 0, /* todo_flags_start */
1758 TODO_update_ssa
, /* todo_flags_finish */
1761 class pass_cse_sincos
: public gimple_opt_pass
1764 pass_cse_sincos (gcc::context
*ctxt
)
1765 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1768 /* opt_pass methods: */
1769 virtual bool gate (function
*)
1771 /* We no longer require either sincos or cexp, since powi expansion
1772 piggybacks on this pass. */
1776 virtual unsigned int execute (function
*);
1778 }; // class pass_cse_sincos
1781 pass_cse_sincos::execute (function
*fun
)
1784 bool cfg_changed
= false;
1786 calculate_dominance_info (CDI_DOMINATORS
);
1787 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1789 FOR_EACH_BB_FN (bb
, fun
)
1791 gimple_stmt_iterator gsi
;
1792 bool cleanup_eh
= false;
1794 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1796 gimple
*stmt
= gsi_stmt (gsi
);
1798 /* Only the last stmt in a bb could throw, no need to call
1799 gimple_purge_dead_eh_edges if we change something in the middle
1800 of a basic block. */
1803 if (is_gimple_call (stmt
)
1804 && gimple_call_lhs (stmt
))
1806 tree arg
, arg0
, arg1
, result
;
1810 switch (gimple_call_combined_fn (stmt
))
1815 /* Make sure we have either sincos or cexp. */
1816 if (!targetm
.libc_has_function (function_c99_math_complex
)
1817 && !targetm
.libc_has_function (function_sincos
))
1820 arg
= gimple_call_arg (stmt
, 0);
1821 if (TREE_CODE (arg
) == SSA_NAME
)
1822 cfg_changed
|= execute_cse_sincos_1 (arg
);
1826 arg0
= gimple_call_arg (stmt
, 0);
1827 arg1
= gimple_call_arg (stmt
, 1);
1829 loc
= gimple_location (stmt
);
1830 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1834 tree lhs
= gimple_get_lhs (stmt
);
1835 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1836 gimple_set_location (new_stmt
, loc
);
1837 unlink_stmt_vdef (stmt
);
1838 gsi_replace (&gsi
, new_stmt
, true);
1840 if (gimple_vdef (stmt
))
1841 release_ssa_name (gimple_vdef (stmt
));
1846 arg0
= gimple_call_arg (stmt
, 0);
1847 arg1
= gimple_call_arg (stmt
, 1);
1848 loc
= gimple_location (stmt
);
1850 if (real_minus_onep (arg0
))
1852 tree t0
, t1
, cond
, one
, minus_one
;
1855 t0
= TREE_TYPE (arg0
);
1856 t1
= TREE_TYPE (arg1
);
1857 one
= build_real (t0
, dconst1
);
1858 minus_one
= build_real (t0
, dconstm1
);
1860 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
1861 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
1862 arg1
, build_int_cst (t1
, 1));
1863 gimple_set_location (stmt
, loc
);
1864 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1866 result
= make_temp_ssa_name (t0
, NULL
, "powi");
1867 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
1869 gimple_set_location (stmt
, loc
);
1870 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1874 if (!tree_fits_shwi_p (arg1
))
1877 n
= tree_to_shwi (arg1
);
1878 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1883 tree lhs
= gimple_get_lhs (stmt
);
1884 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1885 gimple_set_location (new_stmt
, loc
);
1886 unlink_stmt_vdef (stmt
);
1887 gsi_replace (&gsi
, new_stmt
, true);
1889 if (gimple_vdef (stmt
))
1890 release_ssa_name (gimple_vdef (stmt
));
1895 arg0
= gimple_call_arg (stmt
, 0);
1896 loc
= gimple_location (stmt
);
1897 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1901 tree lhs
= gimple_get_lhs (stmt
);
1902 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1903 gimple_set_location (new_stmt
, loc
);
1904 unlink_stmt_vdef (stmt
);
1905 gsi_replace (&gsi
, new_stmt
, true);
1907 if (gimple_vdef (stmt
))
1908 release_ssa_name (gimple_vdef (stmt
));
1917 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1920 statistics_counter_event (fun
, "sincos statements inserted",
1921 sincos_stats
.inserted
);
1923 return cfg_changed
? TODO_cleanup_cfg
: 0;
1929 make_pass_cse_sincos (gcc::context
*ctxt
)
1931 return new pass_cse_sincos (ctxt
);
1934 /* A symbolic number structure is used to detect byte permutation and selection
1935 patterns of a source. To achieve that, its field N contains an artificial
1936 number consisting of BITS_PER_MARKER sized markers tracking where does each
1937 byte come from in the source:
1939 0 - target byte has the value 0
1940 FF - target byte has an unknown value (eg. due to sign extension)
1941 1..size - marker value is the byte index in the source (0 for lsb).
1943 To detect permutations on memory sources (arrays and structures), a symbolic
1944 number is also associated:
1945 - a base address BASE_ADDR and an OFFSET giving the address of the source;
1946 - a range which gives the difference between the highest and lowest accessed
1947 memory location to make such a symbolic number;
1948 - the address SRC of the source element of lowest address as a convenience
1949 to easily get BASE_ADDR + offset + lowest bytepos;
1950 - number of expressions N_OPS bitwise ored together to represent
1951 approximate cost of the computation.
1953 Note 1: the range is different from size as size reflects the size of the
1954 type of the current expression. For instance, for an array char a[],
1955 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
1956 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
1959 Note 2: for non-memory sources, range holds the same value as size.
1961 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
1963 struct symbolic_number
{
1968 HOST_WIDE_INT bytepos
;
1972 unsigned HOST_WIDE_INT range
;
1976 #define BITS_PER_MARKER 8
1977 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1978 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1979 #define HEAD_MARKER(n, size) \
1980 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1982 /* The number which the find_bswap_or_nop_1 result should match in
1983 order to have a nop. The number is masked according to the size of
1984 the symbolic number before using it. */
1985 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1986 (uint64_t)0x08070605 << 32 | 0x04030201)
1988 /* The number which the find_bswap_or_nop_1 result should match in
1989 order to have a byte swap. The number is masked according to the
1990 size of the symbolic number before using it. */
1991 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1992 (uint64_t)0x01020304 << 32 | 0x05060708)
1994 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1995 number N. Return false if the requested operation is not permitted
1996 on a symbolic number. */
1999 do_shift_rotate (enum tree_code code
,
2000 struct symbolic_number
*n
,
2003 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2004 unsigned head_marker
;
2006 if (count
% BITS_PER_UNIT
!= 0)
2008 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
2010 /* Zero out the extra bits of N in order to avoid them being shifted
2011 into the significant bits. */
2012 if (size
< 64 / BITS_PER_MARKER
)
2013 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2021 head_marker
= HEAD_MARKER (n
->n
, size
);
2023 /* Arithmetic shift of signed type: result is dependent on the value. */
2024 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
2025 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
2026 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
2027 << ((size
- 1 - i
) * BITS_PER_MARKER
);
2030 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
2033 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
2038 /* Zero unused bits for size. */
2039 if (size
< 64 / BITS_PER_MARKER
)
2040 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2044 /* Perform sanity checking for the symbolic number N and the gimple
2048 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
2052 lhs_type
= gimple_expr_type (stmt
);
2054 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
2057 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
2063 /* Initialize the symbolic number N for the bswap pass from the base element
2064 SRC manipulated by the bitwise OR expression. */
2067 init_symbolic_number (struct symbolic_number
*n
, tree src
)
2071 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
2074 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
2077 /* Set up the symbolic number N by setting each byte to a value between 1 and
2078 the byte size of rhs1. The highest order byte is set to n->size and the
2079 lowest order byte to 1. */
2080 n
->type
= TREE_TYPE (src
);
2081 size
= TYPE_PRECISION (n
->type
);
2082 if (size
% BITS_PER_UNIT
!= 0)
2084 size
/= BITS_PER_UNIT
;
2085 if (size
> 64 / BITS_PER_MARKER
)
2091 if (size
< 64 / BITS_PER_MARKER
)
2092 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2097 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2098 the answer. If so, REF is that memory source and the base of the memory area
2099 accessed and the offset of the access from that base are recorded in N. */
2102 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
2104 /* Leaf node is an array or component ref. Memorize its base and
2105 offset from base to compare to other such leaf node. */
2106 HOST_WIDE_INT bitsize
, bitpos
;
2108 int unsignedp
, reversep
, volatilep
;
2109 tree offset
, base_addr
;
2111 /* Not prepared to handle PDP endian. */
2112 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2115 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
2118 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
2119 &unsignedp
, &reversep
, &volatilep
);
2121 if (TREE_CODE (base_addr
) == MEM_REF
)
2123 offset_int bit_offset
= 0;
2124 tree off
= TREE_OPERAND (base_addr
, 1);
2126 if (!integer_zerop (off
))
2128 offset_int boff
, coff
= mem_ref_offset (base_addr
);
2129 boff
= coff
<< LOG2_BITS_PER_UNIT
;
2133 base_addr
= TREE_OPERAND (base_addr
, 0);
2135 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2136 if (wi::neg_p (bit_offset
))
2138 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
2139 offset_int tem
= bit_offset
.and_not (mask
);
2140 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2141 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2143 tem
>>= LOG2_BITS_PER_UNIT
;
2145 offset
= size_binop (PLUS_EXPR
, offset
,
2146 wide_int_to_tree (sizetype
, tem
));
2148 offset
= wide_int_to_tree (sizetype
, tem
);
2151 bitpos
+= bit_offset
.to_shwi ();
2154 if (bitpos
% BITS_PER_UNIT
)
2156 if (bitsize
% BITS_PER_UNIT
)
2161 if (!init_symbolic_number (n
, ref
))
2163 n
->base_addr
= base_addr
;
2165 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
2166 n
->alias_set
= reference_alias_ptr_type (ref
);
2167 n
->vuse
= gimple_vuse (stmt
);
2171 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2172 symbolic number N1 and N2 whose source statements are respectively
2173 SOURCE_STMT1 and SOURCE_STMT2. */
2176 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
2177 gimple
*source_stmt2
, struct symbolic_number
*n2
,
2178 struct symbolic_number
*n
)
2182 gimple
*source_stmt
;
2183 struct symbolic_number
*n_start
;
2185 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
2186 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
2187 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
2188 rhs1
= TREE_OPERAND (rhs1
, 0);
2189 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
2190 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
2191 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
2192 rhs2
= TREE_OPERAND (rhs2
, 0);
2194 /* Sources are different, cancel bswap if they are not memory location with
2195 the same base (array, structure, ...). */
2199 HOST_WIDE_INT start_sub
, end_sub
, end1
, end2
, end
;
2200 struct symbolic_number
*toinc_n_ptr
, *n_end
;
2201 basic_block bb1
, bb2
;
2203 if (!n1
->base_addr
|| !n2
->base_addr
2204 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
2207 if (!n1
->offset
!= !n2
->offset
2208 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
2211 if (n1
->bytepos
< n2
->bytepos
)
2214 start_sub
= n2
->bytepos
- n1
->bytepos
;
2219 start_sub
= n1
->bytepos
- n2
->bytepos
;
2222 bb1
= gimple_bb (source_stmt1
);
2223 bb2
= gimple_bb (source_stmt2
);
2224 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
2225 source_stmt
= source_stmt1
;
2227 source_stmt
= source_stmt2
;
2229 /* Find the highest address at which a load is performed and
2230 compute related info. */
2231 end1
= n1
->bytepos
+ (n1
->range
- 1);
2232 end2
= n2
->bytepos
+ (n2
->range
- 1);
2236 end_sub
= end2
- end1
;
2241 end_sub
= end1
- end2
;
2243 n_end
= (end2
> end1
) ? n2
: n1
;
2245 /* Find symbolic number whose lsb is the most significant. */
2246 if (BYTES_BIG_ENDIAN
)
2247 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
2249 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
2251 n
->range
= end
- n_start
->bytepos
+ 1;
2253 /* Check that the range of memory covered can be represented by
2254 a symbolic number. */
2255 if (n
->range
> 64 / BITS_PER_MARKER
)
2258 /* Reinterpret byte marks in symbolic number holding the value of
2259 bigger weight according to target endianness. */
2260 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
2261 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
2262 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
2265 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
2266 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
2267 toinc_n_ptr
->n
+= inc
;
2272 n
->range
= n1
->range
;
2274 source_stmt
= source_stmt1
;
2278 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
2279 n
->alias_set
= n1
->alias_set
;
2281 n
->alias_set
= ptr_type_node
;
2282 n
->vuse
= n_start
->vuse
;
2283 n
->base_addr
= n_start
->base_addr
;
2284 n
->offset
= n_start
->offset
;
2285 n
->src
= n_start
->src
;
2286 n
->bytepos
= n_start
->bytepos
;
2287 n
->type
= n_start
->type
;
2288 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2290 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
2292 uint64_t masked1
, masked2
;
2294 masked1
= n1
->n
& mask
;
2295 masked2
= n2
->n
& mask
;
2296 if (masked1
&& masked2
&& masked1
!= masked2
)
2299 n
->n
= n1
->n
| n2
->n
;
2300 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
2305 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2306 the operation given by the rhs of STMT on the result. If the operation
2307 could successfully be executed the function returns a gimple stmt whose
2308 rhs's first tree is the expression of the source operand and NULL
2312 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
2314 enum tree_code code
;
2315 tree rhs1
, rhs2
= NULL
;
2316 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
2317 enum gimple_rhs_class rhs_class
;
2319 if (!limit
|| !is_gimple_assign (stmt
))
2322 rhs1
= gimple_assign_rhs1 (stmt
);
2324 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
2327 /* Handle BIT_FIELD_REF. */
2328 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
2329 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
2331 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
2332 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
2333 if (bitpos
% BITS_PER_UNIT
== 0
2334 && bitsize
% BITS_PER_UNIT
== 0
2335 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
2337 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
2338 if (BYTES_BIG_ENDIAN
)
2339 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
2342 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
2347 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
2348 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
2349 i
++, tmp
<<= BITS_PER_UNIT
)
2350 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
2354 n
->type
= TREE_TYPE (rhs1
);
2356 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2358 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
2364 if (TREE_CODE (rhs1
) != SSA_NAME
)
2367 code
= gimple_assign_rhs_code (stmt
);
2368 rhs_class
= gimple_assign_rhs_class (stmt
);
2369 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2371 if (rhs_class
== GIMPLE_BINARY_RHS
)
2372 rhs2
= gimple_assign_rhs2 (stmt
);
2374 /* Handle unary rhs and binary rhs with integer constants as second
2377 if (rhs_class
== GIMPLE_UNARY_RHS
2378 || (rhs_class
== GIMPLE_BINARY_RHS
2379 && TREE_CODE (rhs2
) == INTEGER_CST
))
2381 if (code
!= BIT_AND_EXPR
2382 && code
!= LSHIFT_EXPR
2383 && code
!= RSHIFT_EXPR
2384 && code
!= LROTATE_EXPR
2385 && code
!= RROTATE_EXPR
2386 && !CONVERT_EXPR_CODE_P (code
))
2389 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
2391 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2392 we have to initialize the symbolic number. */
2395 if (gimple_assign_load_p (stmt
)
2396 || !init_symbolic_number (n
, rhs1
))
2398 source_stmt1
= stmt
;
2405 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2406 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
2407 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
2409 /* Only constants masking full bytes are allowed. */
2410 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
2411 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
2414 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
2423 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
2428 int i
, type_size
, old_type_size
;
2431 type
= gimple_expr_type (stmt
);
2432 type_size
= TYPE_PRECISION (type
);
2433 if (type_size
% BITS_PER_UNIT
!= 0)
2435 type_size
/= BITS_PER_UNIT
;
2436 if (type_size
> 64 / BITS_PER_MARKER
)
2439 /* Sign extension: result is dependent on the value. */
2440 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2441 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
2442 && HEAD_MARKER (n
->n
, old_type_size
))
2443 for (i
= 0; i
< type_size
- old_type_size
; i
++)
2444 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
2445 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
2447 if (type_size
< 64 / BITS_PER_MARKER
)
2449 /* If STMT casts to a smaller type mask out the bits not
2450 belonging to the target type. */
2451 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
2455 n
->range
= type_size
;
2461 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
2464 /* Handle binary rhs. */
2466 if (rhs_class
== GIMPLE_BINARY_RHS
)
2468 struct symbolic_number n1
, n2
;
2469 gimple
*source_stmt
, *source_stmt2
;
2471 if (code
!= BIT_IOR_EXPR
)
2474 if (TREE_CODE (rhs2
) != SSA_NAME
)
2477 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2482 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
2487 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
2492 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
2495 if (!n1
.vuse
!= !n2
.vuse
2496 || (n1
.vuse
&& !operand_equal_p (n1
.vuse
, n2
.vuse
, 0)))
2500 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
2505 if (!verify_symbolic_number_p (n
, stmt
))
2517 /* Check if STMT completes a bswap implementation or a read in a given
2518 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2519 accordingly. It also sets N to represent the kind of operations
2520 performed: size of the resulting expression and whether it works on
2521 a memory source, and if so alias-set and vuse. At last, the
2522 function returns a stmt whose rhs's first tree is the source
2526 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
2529 uint64_t tmpn
, mask
;
2530 /* The number which the find_bswap_or_nop_1 result should match in order
2531 to have a full byte swap. The number is shifted to the right
2532 according to the size of the symbolic number before using it. */
2533 uint64_t cmpxchg
= CMPXCHG
;
2534 uint64_t cmpnop
= CMPNOP
;
2539 /* The last parameter determines the depth search limit. It usually
2540 correlates directly to the number n of bytes to be touched. We
2541 increase that number by log2(n) + 1 here in order to also
2542 cover signed -> unsigned conversions of the src operand as can be seen
2543 in libgcc, and for initial shift/and operation of the src operand. */
2544 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
2545 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
2546 ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
2551 /* Find real size of result (highest non-zero byte). */
2553 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
2557 /* Zero out the bits corresponding to untouched bytes in original gimple
2559 if (n
->range
< (int) sizeof (int64_t))
2561 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
2562 cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
2566 /* Zero out the bits corresponding to unused bytes in the result of the
2567 gimple expression. */
2568 if (rsize
< n
->range
)
2570 if (BYTES_BIG_ENDIAN
)
2572 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
2574 cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
2578 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
2579 cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
2585 /* A complete byte swap should make the symbolic number to start with
2586 the largest digit in the highest order byte. Unchanged symbolic
2587 number indicates a read with same endianness as target architecture. */
2590 else if (n
->n
== cmpxchg
)
2595 /* Useless bit manipulation performed by code. */
2596 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
2599 n
->range
*= BITS_PER_UNIT
;
2605 const pass_data pass_data_optimize_bswap
=
2607 GIMPLE_PASS
, /* type */
2609 OPTGROUP_NONE
, /* optinfo_flags */
2610 TV_NONE
, /* tv_id */
2611 PROP_ssa
, /* properties_required */
2612 0, /* properties_provided */
2613 0, /* properties_destroyed */
2614 0, /* todo_flags_start */
2615 0, /* todo_flags_finish */
2618 class pass_optimize_bswap
: public gimple_opt_pass
2621 pass_optimize_bswap (gcc::context
*ctxt
)
2622 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
2625 /* opt_pass methods: */
2626 virtual bool gate (function
*)
2628 return flag_expensive_optimizations
&& optimize
;
2631 virtual unsigned int execute (function
*);
2633 }; // class pass_optimize_bswap
2635 /* Perform the bswap optimization: replace the expression computed in the rhs
2636 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2637 Which of these alternatives replace the rhs is given by N->base_addr (non
2638 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2639 load to perform are also given in N while the builtin bswap invoke is given
2640 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2641 load statements involved to construct the rhs in CUR_STMT and N->range gives
2642 the size of the rhs expression for maintaining some statistics.
2644 Note that if the replacement involve a load, CUR_STMT is moved just after
2645 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2646 changing of basic block. */
2649 bswap_replace (gimple
*cur_stmt
, gimple
*ins_stmt
, tree fndecl
,
2650 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
2653 gimple_stmt_iterator gsi
;
2657 gsi
= gsi_for_stmt (cur_stmt
);
2659 tgt
= gimple_assign_lhs (cur_stmt
);
2661 /* Need to load the value from memory first. */
2664 gimple_stmt_iterator gsi_ins
= gsi_for_stmt (ins_stmt
);
2665 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
2666 tree load_offset_ptr
, aligned_load_type
;
2667 gimple
*addr_stmt
, *load_stmt
;
2669 HOST_WIDE_INT load_offset
= 0;
2670 basic_block ins_bb
, cur_bb
;
2672 ins_bb
= gimple_bb (ins_stmt
);
2673 cur_bb
= gimple_bb (cur_stmt
);
2674 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
2677 align
= get_object_alignment (src
);
2679 /* Move cur_stmt just before one of the load of the original
2680 to ensure it has the same VUSE. See PR61517 for what could
2682 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
2683 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
2684 gsi_move_before (&gsi
, &gsi_ins
);
2685 gsi
= gsi_for_stmt (cur_stmt
);
2687 /* Compute address to load from and cast according to the size
2689 addr_expr
= build_fold_addr_expr (unshare_expr (src
));
2690 if (is_gimple_mem_ref_addr (addr_expr
))
2691 addr_tmp
= addr_expr
;
2694 addr_tmp
= make_temp_ssa_name (TREE_TYPE (addr_expr
), NULL
,
2696 addr_stmt
= gimple_build_assign (addr_tmp
, addr_expr
);
2697 gsi_insert_before (&gsi
, addr_stmt
, GSI_SAME_STMT
);
2700 /* Perform the load. */
2701 aligned_load_type
= load_type
;
2702 if (align
< TYPE_ALIGN (load_type
))
2703 aligned_load_type
= build_aligned_type (load_type
, align
);
2704 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
2705 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
2711 nop_stats
.found_16bit
++;
2712 else if (n
->range
== 32)
2713 nop_stats
.found_32bit
++;
2716 gcc_assert (n
->range
== 64);
2717 nop_stats
.found_64bit
++;
2720 /* Convert the result of load if necessary. */
2721 if (!useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
2723 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
2725 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2726 gimple_set_vuse (load_stmt
, n
->vuse
);
2727 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2728 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
2732 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
2733 gimple_set_vuse (cur_stmt
, n
->vuse
);
2735 update_stmt (cur_stmt
);
2740 "%d bit load in target endianness found at: ",
2742 print_gimple_stmt (dump_file
, cur_stmt
, 0);
2748 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
2749 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2750 gimple_set_vuse (load_stmt
, n
->vuse
);
2751 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2758 if (!useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
2760 if (!is_gimple_val (src
))
2762 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
2765 g
= gimple_build_assign (tgt
, src
);
2767 nop_stats
.found_16bit
++;
2768 else if (n
->range
== 32)
2769 nop_stats
.found_32bit
++;
2772 gcc_assert (n
->range
== 64);
2773 nop_stats
.found_64bit
++;
2778 "%d bit reshuffle in target endianness found at: ",
2780 print_gimple_stmt (dump_file
, cur_stmt
, 0);
2782 gsi_replace (&gsi
, g
, true);
2785 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
2786 src
= TREE_OPERAND (src
, 0);
2789 bswap_stats
.found_16bit
++;
2790 else if (n
->range
== 32)
2791 bswap_stats
.found_32bit
++;
2794 gcc_assert (n
->range
== 64);
2795 bswap_stats
.found_64bit
++;
2800 /* Convert the src expression if necessary. */
2801 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
2803 gimple
*convert_stmt
;
2805 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
2806 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
2807 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2810 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2811 are considered as rotation of 2N bit values by N bits is generally not
2812 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2813 gives 0x03040102 while a bswap for that value is 0x04030201. */
2814 if (bswap
&& n
->range
== 16)
2816 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
2817 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
2818 bswap_stmt
= gimple_build_assign (NULL
, src
);
2821 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
2825 /* Convert the result if necessary. */
2826 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
2828 gimple
*convert_stmt
;
2830 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
2831 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
2832 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2835 gimple_set_lhs (bswap_stmt
, tmp
);
2839 fprintf (dump_file
, "%d bit bswap implementation found at: ",
2841 print_gimple_stmt (dump_file
, cur_stmt
, 0);
2844 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
2845 gsi_remove (&gsi
, true);
2849 /* Find manual byte swap implementations as well as load in a given
2850 endianness. Byte swaps are turned into a bswap builtin invokation
2851 while endian loads are converted to bswap builtin invokation or
2852 simple load according to the target endianness. */
2855 pass_optimize_bswap::execute (function
*fun
)
2858 bool bswap32_p
, bswap64_p
;
2859 bool changed
= false;
2860 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
2862 if (BITS_PER_UNIT
!= 8)
2865 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2866 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
2867 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2868 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
2869 || (bswap32_p
&& word_mode
== SImode
)));
2871 /* Determine the argument type of the builtins. The code later on
2872 assumes that the return and argument type are the same. */
2875 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2876 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2881 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2882 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2885 memset (&nop_stats
, 0, sizeof (nop_stats
));
2886 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
2887 calculate_dominance_info (CDI_DOMINATORS
);
2889 FOR_EACH_BB_FN (bb
, fun
)
2891 gimple_stmt_iterator gsi
;
2893 /* We do a reverse scan for bswap patterns to make sure we get the
2894 widest match. As bswap pattern matching doesn't handle previously
2895 inserted smaller bswap replacements as sub-patterns, the wider
2896 variant wouldn't be detected. */
2897 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
2899 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
2900 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
2901 enum tree_code code
;
2902 struct symbolic_number n
;
2905 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2906 might be moved to a different basic block by bswap_replace and gsi
2907 must not points to it if that's the case. Moving the gsi_prev
2908 there make sure that gsi points to the statement previous to
2909 cur_stmt while still making sure that all statements are
2910 considered in this basic block. */
2913 if (!is_gimple_assign (cur_stmt
))
2916 code
= gimple_assign_rhs_code (cur_stmt
);
2921 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
2922 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
2932 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
2940 /* Already in canonical form, nothing to do. */
2941 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
2943 load_type
= bswap_type
= uint16_type_node
;
2946 load_type
= uint32_type_node
;
2949 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2950 bswap_type
= bswap32_type
;
2954 load_type
= uint64_type_node
;
2957 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2958 bswap_type
= bswap64_type
;
2965 if (bswap
&& !fndecl
&& n
.range
!= 16)
2968 if (bswap_replace (cur_stmt
, ins_stmt
, fndecl
, bswap_type
, load_type
,
2974 statistics_counter_event (fun
, "16-bit nop implementations found",
2975 nop_stats
.found_16bit
);
2976 statistics_counter_event (fun
, "32-bit nop implementations found",
2977 nop_stats
.found_32bit
);
2978 statistics_counter_event (fun
, "64-bit nop implementations found",
2979 nop_stats
.found_64bit
);
2980 statistics_counter_event (fun
, "16-bit bswap implementations found",
2981 bswap_stats
.found_16bit
);
2982 statistics_counter_event (fun
, "32-bit bswap implementations found",
2983 bswap_stats
.found_32bit
);
2984 statistics_counter_event (fun
, "64-bit bswap implementations found",
2985 bswap_stats
.found_64bit
);
2987 return (changed
? TODO_update_ssa
: 0);
2993 make_pass_optimize_bswap (gcc::context
*ctxt
)
2995 return new pass_optimize_bswap (ctxt
);
2998 /* Return true if stmt is a type conversion operation that can be stripped
2999 when used in a widening multiply operation. */
3001 widening_mult_conversion_strippable_p (tree result_type
, gimple
*stmt
)
3003 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
3005 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
3010 if (!CONVERT_EXPR_CODE_P (rhs_code
))
3013 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3015 /* If the type of OP has the same precision as the result, then
3016 we can strip this conversion. The multiply operation will be
3017 selected to create the correct extension as a by-product. */
3018 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
3021 /* We can also strip a conversion if it preserves the signed-ness of
3022 the operation and doesn't narrow the range. */
3023 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
3025 /* If the inner-most type is unsigned, then we can strip any
3026 intermediate widening operation. If it's signed, then the
3027 intermediate widening operation must also be signed. */
3028 if ((TYPE_UNSIGNED (inner_op_type
)
3029 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
3030 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
3036 return rhs_code
== FIXED_CONVERT_EXPR
;
3039 /* Return true if RHS is a suitable operand for a widening multiplication,
3040 assuming a target type of TYPE.
3041 There are two cases:
3043 - RHS makes some value at least twice as wide. Store that value
3044 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
3046 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
3047 but leave *TYPE_OUT untouched. */
3050 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
3056 if (TREE_CODE (rhs
) == SSA_NAME
)
3058 stmt
= SSA_NAME_DEF_STMT (rhs
);
3059 if (is_gimple_assign (stmt
))
3061 if (! widening_mult_conversion_strippable_p (type
, stmt
))
3065 rhs1
= gimple_assign_rhs1 (stmt
);
3067 if (TREE_CODE (rhs1
) == INTEGER_CST
)
3069 *new_rhs_out
= rhs1
;
3078 type1
= TREE_TYPE (rhs1
);
3080 if (TREE_CODE (type1
) != TREE_CODE (type
)
3081 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
3084 *new_rhs_out
= rhs1
;
3089 if (TREE_CODE (rhs
) == INTEGER_CST
)
3099 /* Return true if STMT performs a widening multiplication, assuming the
3100 output type is TYPE. If so, store the unwidened types of the operands
3101 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3102 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3103 and *TYPE2_OUT would give the operands of the multiplication. */
3106 is_widening_mult_p (gimple
*stmt
,
3107 tree
*type1_out
, tree
*rhs1_out
,
3108 tree
*type2_out
, tree
*rhs2_out
)
3110 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3112 if (TREE_CODE (type
) != INTEGER_TYPE
3113 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
3116 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
3120 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
3124 if (*type1_out
== NULL
)
3126 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
3128 *type1_out
= *type2_out
;
3131 if (*type2_out
== NULL
)
3133 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
3135 *type2_out
= *type1_out
;
3138 /* Ensure that the larger of the two operands comes first. */
3139 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
3141 std::swap (*type1_out
, *type2_out
);
3142 std::swap (*rhs1_out
, *rhs2_out
);
3148 /* Check to see if the CALL statement is an invocation of copysign
3149 with 1. being the first argument. */
3151 is_copysign_call_with_1 (gimple
*call
)
3153 gcall
*c
= dyn_cast
<gcall
*> (call
);
3157 enum combined_fn code
= gimple_call_combined_fn (c
);
3159 if (code
== CFN_LAST
)
3162 if (builtin_fn_p (code
))
3164 switch (as_builtin_fn (code
))
3166 CASE_FLT_FN (BUILT_IN_COPYSIGN
):
3167 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN
):
3168 return real_onep (gimple_call_arg (c
, 0));
3174 if (internal_fn_p (code
))
3176 switch (as_internal_fn (code
))
3179 return real_onep (gimple_call_arg (c
, 0));
3188 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
3189 This only happens when the the xorsign optab is defined, if the
3190 pattern is not a xorsign pattern or if expansion fails FALSE is
3191 returned, otherwise TRUE is returned. */
3193 convert_expand_mult_copysign (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
3195 tree treeop0
, treeop1
, lhs
, type
;
3196 location_t loc
= gimple_location (stmt
);
3197 lhs
= gimple_assign_lhs (stmt
);
3198 treeop0
= gimple_assign_rhs1 (stmt
);
3199 treeop1
= gimple_assign_rhs2 (stmt
);
3200 type
= TREE_TYPE (lhs
);
3201 machine_mode mode
= TYPE_MODE (type
);
3203 if (HONOR_SNANS (type
) || !has_single_use (lhs
))
3206 if (TREE_CODE (treeop0
) == SSA_NAME
&& TREE_CODE (treeop1
) == SSA_NAME
)
3208 gimple
*call0
= SSA_NAME_DEF_STMT (treeop0
);
3209 if (!is_copysign_call_with_1 (call0
))
3211 call0
= SSA_NAME_DEF_STMT (treeop1
);
3212 if (!is_copysign_call_with_1 (call0
))
3218 if (optab_handler (xorsign_optab
, mode
) == CODE_FOR_nothing
)
3221 gcall
*c
= as_a
<gcall
*> (call0
);
3222 treeop0
= gimple_call_arg (c
, 1);
3225 = gimple_build_call_internal (IFN_XORSIGN
, 2, treeop1
, treeop0
);
3226 gimple_set_lhs (call_stmt
, lhs
);
3227 gimple_set_location (call_stmt
, loc
);
3228 gsi_replace (gsi
, call_stmt
, true);
3235 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3236 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3237 value is true iff we converted the statement. */
3240 convert_mult_to_widen (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
3242 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
3243 enum insn_code handler
;
3244 machine_mode to_mode
, from_mode
, actual_mode
;
3246 int actual_precision
;
3247 location_t loc
= gimple_location (stmt
);
3248 bool from_unsigned1
, from_unsigned2
;
3250 lhs
= gimple_assign_lhs (stmt
);
3251 type
= TREE_TYPE (lhs
);
3252 if (TREE_CODE (type
) != INTEGER_TYPE
)
3255 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
3258 to_mode
= TYPE_MODE (type
);
3259 from_mode
= TYPE_MODE (type1
);
3260 from_unsigned1
= TYPE_UNSIGNED (type1
);
3261 from_unsigned2
= TYPE_UNSIGNED (type2
);
3263 if (from_unsigned1
&& from_unsigned2
)
3264 op
= umul_widen_optab
;
3265 else if (!from_unsigned1
&& !from_unsigned2
)
3266 op
= smul_widen_optab
;
3268 op
= usmul_widen_optab
;
3270 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
3273 if (handler
== CODE_FOR_nothing
)
3275 if (op
!= smul_widen_optab
)
3277 /* We can use a signed multiply with unsigned types as long as
3278 there is a wider mode to use, or it is the smaller of the two
3279 types that is unsigned. Note that type1 >= type2, always. */
3280 if ((TYPE_UNSIGNED (type1
)
3281 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3282 || (TYPE_UNSIGNED (type2
)
3283 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3285 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3286 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
3290 op
= smul_widen_optab
;
3291 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
3295 if (handler
== CODE_FOR_nothing
)
3298 from_unsigned1
= from_unsigned2
= false;
3304 /* Ensure that the inputs to the handler are in the correct precison
3305 for the opcode. This will be the full mode size. */
3306 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3307 if (2 * actual_precision
> TYPE_PRECISION (type
))
3309 if (actual_precision
!= TYPE_PRECISION (type1
)
3310 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3311 rhs1
= build_and_insert_cast (gsi
, loc
,
3312 build_nonstandard_integer_type
3313 (actual_precision
, from_unsigned1
), rhs1
);
3314 if (actual_precision
!= TYPE_PRECISION (type2
)
3315 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3316 rhs2
= build_and_insert_cast (gsi
, loc
,
3317 build_nonstandard_integer_type
3318 (actual_precision
, from_unsigned2
), rhs2
);
3320 /* Handle constants. */
3321 if (TREE_CODE (rhs1
) == INTEGER_CST
)
3322 rhs1
= fold_convert (type1
, rhs1
);
3323 if (TREE_CODE (rhs2
) == INTEGER_CST
)
3324 rhs2
= fold_convert (type2
, rhs2
);
3326 gimple_assign_set_rhs1 (stmt
, rhs1
);
3327 gimple_assign_set_rhs2 (stmt
, rhs2
);
3328 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
3330 widen_mul_stats
.widen_mults_inserted
++;
3334 /* Process a single gimple statement STMT, which is found at the
3335 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3336 rhs (given by CODE), and try to convert it into a
3337 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3338 is true iff we converted the statement. */
3341 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
3342 enum tree_code code
)
3344 gimple
*rhs1_stmt
= NULL
, *rhs2_stmt
= NULL
;
3345 gimple
*conv1_stmt
= NULL
, *conv2_stmt
= NULL
, *conv_stmt
;
3346 tree type
, type1
, type2
, optype
;
3347 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
3348 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
3350 enum tree_code wmult_code
;
3351 enum insn_code handler
;
3352 machine_mode to_mode
, from_mode
, actual_mode
;
3353 location_t loc
= gimple_location (stmt
);
3354 int actual_precision
;
3355 bool from_unsigned1
, from_unsigned2
;
3357 lhs
= gimple_assign_lhs (stmt
);
3358 type
= TREE_TYPE (lhs
);
3359 if (TREE_CODE (type
) != INTEGER_TYPE
3360 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
3363 if (code
== MINUS_EXPR
)
3364 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
3366 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
3368 rhs1
= gimple_assign_rhs1 (stmt
);
3369 rhs2
= gimple_assign_rhs2 (stmt
);
3371 if (TREE_CODE (rhs1
) == SSA_NAME
)
3373 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3374 if (is_gimple_assign (rhs1_stmt
))
3375 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3378 if (TREE_CODE (rhs2
) == SSA_NAME
)
3380 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3381 if (is_gimple_assign (rhs2_stmt
))
3382 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3385 /* Allow for one conversion statement between the multiply
3386 and addition/subtraction statement. If there are more than
3387 one conversions then we assume they would invalidate this
3388 transformation. If that's not the case then they should have
3389 been folded before now. */
3390 if (CONVERT_EXPR_CODE_P (rhs1_code
))
3392 conv1_stmt
= rhs1_stmt
;
3393 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
3394 if (TREE_CODE (rhs1
) == SSA_NAME
)
3396 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3397 if (is_gimple_assign (rhs1_stmt
))
3398 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3403 if (CONVERT_EXPR_CODE_P (rhs2_code
))
3405 conv2_stmt
= rhs2_stmt
;
3406 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
3407 if (TREE_CODE (rhs2
) == SSA_NAME
)
3409 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3410 if (is_gimple_assign (rhs2_stmt
))
3411 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3417 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3418 is_widening_mult_p, but we still need the rhs returns.
3420 It might also appear that it would be sufficient to use the existing
3421 operands of the widening multiply, but that would limit the choice of
3422 multiply-and-accumulate instructions.
3424 If the widened-multiplication result has more than one uses, it is
3425 probably wiser not to do the conversion. */
3426 if (code
== PLUS_EXPR
3427 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
3429 if (!has_single_use (rhs1
)
3430 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
3431 &type2
, &mult_rhs2
))
3434 conv_stmt
= conv1_stmt
;
3436 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
3438 if (!has_single_use (rhs2
)
3439 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
3440 &type2
, &mult_rhs2
))
3443 conv_stmt
= conv2_stmt
;
3448 to_mode
= TYPE_MODE (type
);
3449 from_mode
= TYPE_MODE (type1
);
3450 from_unsigned1
= TYPE_UNSIGNED (type1
);
3451 from_unsigned2
= TYPE_UNSIGNED (type2
);
3454 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3455 if (from_unsigned1
!= from_unsigned2
)
3457 if (!INTEGRAL_TYPE_P (type
))
3459 /* We can use a signed multiply with unsigned types as long as
3460 there is a wider mode to use, or it is the smaller of the two
3461 types that is unsigned. Note that type1 >= type2, always. */
3463 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3465 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3467 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3468 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
3472 from_unsigned1
= from_unsigned2
= false;
3473 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
3477 /* If there was a conversion between the multiply and addition
3478 then we need to make sure it fits a multiply-and-accumulate.
3479 The should be a single mode change which does not change the
3483 /* We use the original, unmodified data types for this. */
3484 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
3485 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
3486 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
3487 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
3489 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
3491 /* Conversion is a truncate. */
3492 if (TYPE_PRECISION (to_type
) < data_size
)
3495 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
3497 /* Conversion is an extend. Check it's the right sort. */
3498 if (TYPE_UNSIGNED (from_type
) != is_unsigned
3499 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
3502 /* else convert is a no-op for our purposes. */
3505 /* Verify that the machine can perform a widening multiply
3506 accumulate in this mode/signedness combination, otherwise
3507 this transformation is likely to pessimize code. */
3508 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
3509 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
3510 from_mode
, 0, &actual_mode
);
3512 if (handler
== CODE_FOR_nothing
)
3515 /* Ensure that the inputs to the handler are in the correct precison
3516 for the opcode. This will be the full mode size. */
3517 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3518 if (actual_precision
!= TYPE_PRECISION (type1
)
3519 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3520 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
3521 build_nonstandard_integer_type
3522 (actual_precision
, from_unsigned1
),
3524 if (actual_precision
!= TYPE_PRECISION (type2
)
3525 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3526 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
3527 build_nonstandard_integer_type
3528 (actual_precision
, from_unsigned2
),
3531 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
3532 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
3534 /* Handle constants. */
3535 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
3536 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
3537 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
3538 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
3540 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
3542 update_stmt (gsi_stmt (*gsi
));
3543 widen_mul_stats
.maccs_inserted
++;
3547 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3548 with uses in additions and subtractions to form fused multiply-add
3549 operations. Returns true if successful and MUL_STMT should be removed. */
3552 convert_mult_to_fma (gimple
*mul_stmt
, tree op1
, tree op2
)
3554 tree mul_result
= gimple_get_lhs (mul_stmt
);
3555 tree type
= TREE_TYPE (mul_result
);
3556 gimple
*use_stmt
, *neguse_stmt
;
3558 use_operand_p use_p
;
3559 imm_use_iterator imm_iter
;
3561 if (FLOAT_TYPE_P (type
)
3562 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
3565 /* We don't want to do bitfield reduction ops. */
3566 if (INTEGRAL_TYPE_P (type
)
3567 && (TYPE_PRECISION (type
)
3568 != GET_MODE_PRECISION (TYPE_MODE (type
))))
3571 /* If the target doesn't support it, don't generate it. We assume that
3572 if fma isn't available then fms, fnma or fnms are not either. */
3573 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
3576 /* If the multiplication has zero uses, it is kept around probably because
3577 of -fnon-call-exceptions. Don't optimize it away in that case,
3579 if (has_zero_uses (mul_result
))
3582 /* Make sure that the multiplication statement becomes dead after
3583 the transformation, thus that all uses are transformed to FMAs.
3584 This means we assume that an FMA operation has the same cost
3586 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3588 enum tree_code use_code
;
3589 tree result
= mul_result
;
3590 bool negate_p
= false;
3592 use_stmt
= USE_STMT (use_p
);
3594 if (is_gimple_debug (use_stmt
))
3597 /* For now restrict this operations to single basic blocks. In theory
3598 we would want to support sinking the multiplication in
3604 to form a fma in the then block and sink the multiplication to the
3606 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3609 if (!is_gimple_assign (use_stmt
))
3612 use_code
= gimple_assign_rhs_code (use_stmt
);
3614 /* A negate on the multiplication leads to FNMA. */
3615 if (use_code
== NEGATE_EXPR
)
3620 result
= gimple_assign_lhs (use_stmt
);
3622 /* Make sure the negate statement becomes dead with this
3623 single transformation. */
3624 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3625 &use_p
, &neguse_stmt
))
3628 /* Make sure the multiplication isn't also used on that stmt. */
3629 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3630 if (USE_FROM_PTR (usep
) == mul_result
)
3634 use_stmt
= neguse_stmt
;
3635 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3637 if (!is_gimple_assign (use_stmt
))
3640 use_code
= gimple_assign_rhs_code (use_stmt
);
3647 if (gimple_assign_rhs2 (use_stmt
) == result
)
3648 negate_p
= !negate_p
;
3653 /* FMA can only be formed from PLUS and MINUS. */
3657 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3658 by a MULT_EXPR that we'll visit later, we might be able to
3659 get a more profitable match with fnma.
3660 OTOH, if we don't, a negate / fma pair has likely lower latency
3661 that a mult / subtract pair. */
3662 if (use_code
== MINUS_EXPR
&& !negate_p
3663 && gimple_assign_rhs1 (use_stmt
) == result
3664 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
3665 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
3667 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
3669 if (TREE_CODE (rhs2
) == SSA_NAME
)
3671 gimple
*stmt2
= SSA_NAME_DEF_STMT (rhs2
);
3672 if (has_single_use (rhs2
)
3673 && is_gimple_assign (stmt2
)
3674 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3679 /* We can't handle a * b + a * b. */
3680 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
3683 /* While it is possible to validate whether or not the exact form
3684 that we've recognized is available in the backend, the assumption
3685 is that the transformation is never a loss. For instance, suppose
3686 the target only has the plain FMA pattern available. Consider
3687 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3688 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3689 still have 3 operations, but in the FMA form the two NEGs are
3690 independent and could be run in parallel. */
3693 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
3695 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3696 enum tree_code use_code
;
3697 tree addop
, mulop1
= op1
, result
= mul_result
;
3698 bool negate_p
= false;
3700 if (is_gimple_debug (use_stmt
))
3703 use_code
= gimple_assign_rhs_code (use_stmt
);
3704 if (use_code
== NEGATE_EXPR
)
3706 result
= gimple_assign_lhs (use_stmt
);
3707 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3708 gsi_remove (&gsi
, true);
3709 release_defs (use_stmt
);
3711 use_stmt
= neguse_stmt
;
3712 gsi
= gsi_for_stmt (use_stmt
);
3713 use_code
= gimple_assign_rhs_code (use_stmt
);
3717 if (gimple_assign_rhs1 (use_stmt
) == result
)
3719 addop
= gimple_assign_rhs2 (use_stmt
);
3720 /* a * b - c -> a * b + (-c) */
3721 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3722 addop
= force_gimple_operand_gsi (&gsi
,
3723 build1 (NEGATE_EXPR
,
3725 true, NULL_TREE
, true,
3730 addop
= gimple_assign_rhs1 (use_stmt
);
3731 /* a - b * c -> (-b) * c + a */
3732 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3733 negate_p
= !negate_p
;
3737 mulop1
= force_gimple_operand_gsi (&gsi
,
3738 build1 (NEGATE_EXPR
,
3740 true, NULL_TREE
, true,
3743 fma_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
3744 FMA_EXPR
, mulop1
, op2
, addop
);
3745 gsi_replace (&gsi
, fma_stmt
, true);
3746 widen_mul_stats
.fmas_inserted
++;
3753 /* Helper function of match_uaddsub_overflow. Return 1
3754 if USE_STMT is unsigned overflow check ovf != 0 for
3755 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3759 uaddsub_overflow_check_p (gimple
*stmt
, gimple
*use_stmt
)
3761 enum tree_code ccode
= ERROR_MARK
;
3762 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
3763 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3765 ccode
= gimple_cond_code (use_stmt
);
3766 crhs1
= gimple_cond_lhs (use_stmt
);
3767 crhs2
= gimple_cond_rhs (use_stmt
);
3769 else if (is_gimple_assign (use_stmt
))
3771 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3773 ccode
= gimple_assign_rhs_code (use_stmt
);
3774 crhs1
= gimple_assign_rhs1 (use_stmt
);
3775 crhs2
= gimple_assign_rhs2 (use_stmt
);
3777 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
3779 tree cond
= gimple_assign_rhs1 (use_stmt
);
3780 if (COMPARISON_CLASS_P (cond
))
3782 ccode
= TREE_CODE (cond
);
3783 crhs1
= TREE_OPERAND (cond
, 0);
3784 crhs2
= TREE_OPERAND (cond
, 1);
3795 if (TREE_CODE_CLASS (ccode
) != tcc_comparison
)
3798 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3799 tree lhs
= gimple_assign_lhs (stmt
);
3800 tree rhs1
= gimple_assign_rhs1 (stmt
);
3801 tree rhs2
= gimple_assign_rhs2 (stmt
);
3807 /* r = a - b; r > a or r <= a
3808 r = a + b; a > r or a <= r or b > r or b <= r. */
3809 if ((code
== MINUS_EXPR
&& crhs1
== lhs
&& crhs2
== rhs1
)
3810 || (code
== PLUS_EXPR
&& (crhs1
== rhs1
|| crhs1
== rhs2
)
3812 return ccode
== GT_EXPR
? 1 : -1;
3816 /* r = a - b; a < r or a >= r
3817 r = a + b; r < a or r >= a or r < b or r >= b. */
3818 if ((code
== MINUS_EXPR
&& crhs1
== rhs1
&& crhs2
== lhs
)
3819 || (code
== PLUS_EXPR
&& crhs1
== lhs
3820 && (crhs2
== rhs1
|| crhs2
== rhs2
)))
3821 return ccode
== LT_EXPR
? 1 : -1;
3829 /* Recognize for unsigned x
3832 where there are other uses of x and replace it with
3833 _7 = SUB_OVERFLOW (y, z);
3834 x = REALPART_EXPR <_7>;
3835 _8 = IMAGPART_EXPR <_7>;
3837 and similarly for addition. */
3840 match_uaddsub_overflow (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
3841 enum tree_code code
)
3843 tree lhs
= gimple_assign_lhs (stmt
);
3844 tree type
= TREE_TYPE (lhs
);
3845 use_operand_p use_p
;
3846 imm_use_iterator iter
;
3847 bool use_seen
= false;
3848 bool ovf_use_seen
= false;
3851 gcc_checking_assert (code
== PLUS_EXPR
|| code
== MINUS_EXPR
);
3852 if (!INTEGRAL_TYPE_P (type
)
3853 || !TYPE_UNSIGNED (type
)
3854 || has_zero_uses (lhs
)
3855 || has_single_use (lhs
)
3856 || optab_handler (code
== PLUS_EXPR
? uaddv4_optab
: usubv4_optab
,
3857 TYPE_MODE (type
)) == CODE_FOR_nothing
)
3860 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
3862 use_stmt
= USE_STMT (use_p
);
3863 if (is_gimple_debug (use_stmt
))
3866 if (uaddsub_overflow_check_p (stmt
, use_stmt
))
3867 ovf_use_seen
= true;
3870 if (ovf_use_seen
&& use_seen
)
3874 if (!ovf_use_seen
|| !use_seen
)
3877 tree ctype
= build_complex_type (type
);
3878 tree rhs1
= gimple_assign_rhs1 (stmt
);
3879 tree rhs2
= gimple_assign_rhs2 (stmt
);
3880 gcall
*g
= gimple_build_call_internal (code
== PLUS_EXPR
3881 ? IFN_ADD_OVERFLOW
: IFN_SUB_OVERFLOW
,
3883 tree ctmp
= make_ssa_name (ctype
);
3884 gimple_call_set_lhs (g
, ctmp
);
3885 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
3886 gassign
*g2
= gimple_build_assign (lhs
, REALPART_EXPR
,
3887 build1 (REALPART_EXPR
, type
, ctmp
));
3888 gsi_replace (gsi
, g2
, true);
3889 tree ovf
= make_ssa_name (type
);
3890 g2
= gimple_build_assign (ovf
, IMAGPART_EXPR
,
3891 build1 (IMAGPART_EXPR
, type
, ctmp
));
3892 gsi_insert_after (gsi
, g2
, GSI_NEW_STMT
);
3894 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3896 if (is_gimple_debug (use_stmt
))
3899 int ovf_use
= uaddsub_overflow_check_p (stmt
, use_stmt
);
3902 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3904 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
3905 gimple_cond_set_lhs (cond_stmt
, ovf
);
3906 gimple_cond_set_rhs (cond_stmt
, build_int_cst (type
, 0));
3907 gimple_cond_set_code (cond_stmt
, ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3911 gcc_checking_assert (is_gimple_assign (use_stmt
));
3912 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3914 gimple_assign_set_rhs1 (use_stmt
, ovf
);
3915 gimple_assign_set_rhs2 (use_stmt
, build_int_cst (type
, 0));
3916 gimple_assign_set_rhs_code (use_stmt
,
3917 ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3921 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
3923 tree cond
= build2 (ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
,
3924 boolean_type_node
, ovf
,
3925 build_int_cst (type
, 0));
3926 gimple_assign_set_rhs1 (use_stmt
, cond
);
3929 update_stmt (use_stmt
);
3934 /* Return true if target has support for divmod. */
3937 target_supports_divmod_p (optab divmod_optab
, optab div_optab
, machine_mode mode
)
3939 /* If target supports hardware divmod insn, use it for divmod. */
3940 if (optab_handler (divmod_optab
, mode
) != CODE_FOR_nothing
)
3943 /* Check if libfunc for divmod is available. */
3944 rtx libfunc
= optab_libfunc (divmod_optab
, mode
);
3945 if (libfunc
!= NULL_RTX
)
3947 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3948 we don't want to use the libfunc even if it exists for given mode. */
3949 for (machine_mode div_mode
= mode
;
3950 div_mode
!= VOIDmode
;
3951 div_mode
= GET_MODE_WIDER_MODE (div_mode
))
3952 if (optab_handler (div_optab
, div_mode
) != CODE_FOR_nothing
)
3955 return targetm
.expand_divmod_libfunc
!= NULL
;
3961 /* Check if stmt is candidate for divmod transform. */
3964 divmod_candidate_p (gassign
*stmt
)
3966 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3967 machine_mode mode
= TYPE_MODE (type
);
3968 optab divmod_optab
, div_optab
;
3970 if (TYPE_UNSIGNED (type
))
3972 divmod_optab
= udivmod_optab
;
3973 div_optab
= udiv_optab
;
3977 divmod_optab
= sdivmod_optab
;
3978 div_optab
= sdiv_optab
;
3981 tree op1
= gimple_assign_rhs1 (stmt
);
3982 tree op2
= gimple_assign_rhs2 (stmt
);
3984 /* Disable the transform if either is a constant, since division-by-constant
3985 may have specialized expansion. */
3986 if (CONSTANT_CLASS_P (op1
) || CONSTANT_CLASS_P (op2
))
3989 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3990 expand using the [su]divv optabs. */
3991 if (TYPE_OVERFLOW_TRAPS (type
))
3994 if (!target_supports_divmod_p (divmod_optab
, div_optab
, mode
))
4000 /* This function looks for:
4001 t1 = a TRUNC_DIV_EXPR b;
4002 t2 = a TRUNC_MOD_EXPR b;
4003 and transforms it to the following sequence:
4004 complex_tmp = DIVMOD (a, b);
4005 t1 = REALPART_EXPR(a);
4006 t2 = IMAGPART_EXPR(b);
4007 For conditions enabling the transform see divmod_candidate_p().
4009 The pass has three parts:
4010 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
4011 other trunc_div_expr and trunc_mod_expr stmts.
4012 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
4014 3) Insert DIVMOD call just before top_stmt and update entries in
4015 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
4016 IMAGPART_EXPR for mod). */
4019 convert_to_divmod (gassign
*stmt
)
4021 if (stmt_can_throw_internal (stmt
)
4022 || !divmod_candidate_p (stmt
))
4025 tree op1
= gimple_assign_rhs1 (stmt
);
4026 tree op2
= gimple_assign_rhs2 (stmt
);
4028 imm_use_iterator use_iter
;
4030 auto_vec
<gimple
*> stmts
;
4032 gimple
*top_stmt
= stmt
;
4033 basic_block top_bb
= gimple_bb (stmt
);
4035 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
4036 at-least stmt and possibly other trunc_div/trunc_mod stmts
4037 having same operands as stmt. */
4039 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, op1
)
4041 if (is_gimple_assign (use_stmt
)
4042 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
4043 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
4044 && operand_equal_p (op1
, gimple_assign_rhs1 (use_stmt
), 0)
4045 && operand_equal_p (op2
, gimple_assign_rhs2 (use_stmt
), 0))
4047 if (stmt_can_throw_internal (use_stmt
))
4050 basic_block bb
= gimple_bb (use_stmt
);
4054 if (gimple_uid (use_stmt
) < gimple_uid (top_stmt
))
4055 top_stmt
= use_stmt
;
4057 else if (dominated_by_p (CDI_DOMINATORS
, top_bb
, bb
))
4060 top_stmt
= use_stmt
;
4065 tree top_op1
= gimple_assign_rhs1 (top_stmt
);
4066 tree top_op2
= gimple_assign_rhs2 (top_stmt
);
4068 stmts
.safe_push (top_stmt
);
4069 bool div_seen
= (gimple_assign_rhs_code (top_stmt
) == TRUNC_DIV_EXPR
);
4071 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
4072 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
4073 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
4074 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
4076 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, top_op1
)
4078 if (is_gimple_assign (use_stmt
)
4079 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
4080 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
4081 && operand_equal_p (top_op1
, gimple_assign_rhs1 (use_stmt
), 0)
4082 && operand_equal_p (top_op2
, gimple_assign_rhs2 (use_stmt
), 0))
4084 if (use_stmt
== top_stmt
4085 || stmt_can_throw_internal (use_stmt
)
4086 || !dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
), top_bb
))
4089 stmts
.safe_push (use_stmt
);
4090 if (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
)
4098 /* Part 3: Create libcall to internal fn DIVMOD:
4099 divmod_tmp = DIVMOD (op1, op2). */
4101 gcall
*call_stmt
= gimple_build_call_internal (IFN_DIVMOD
, 2, op1
, op2
);
4102 tree res
= make_temp_ssa_name (build_complex_type (TREE_TYPE (op1
)),
4103 call_stmt
, "divmod_tmp");
4104 gimple_call_set_lhs (call_stmt
, res
);
4106 /* Insert the call before top_stmt. */
4107 gimple_stmt_iterator top_stmt_gsi
= gsi_for_stmt (top_stmt
);
4108 gsi_insert_before (&top_stmt_gsi
, call_stmt
, GSI_SAME_STMT
);
4110 widen_mul_stats
.divmod_calls_inserted
++;
4112 /* Update all statements in stmts vector:
4113 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
4114 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
4116 for (unsigned i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
4120 switch (gimple_assign_rhs_code (use_stmt
))
4122 case TRUNC_DIV_EXPR
:
4123 new_rhs
= fold_build1 (REALPART_EXPR
, TREE_TYPE (op1
), res
);
4126 case TRUNC_MOD_EXPR
:
4127 new_rhs
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (op1
), res
);
4134 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
4135 gimple_assign_set_rhs_from_tree (&gsi
, new_rhs
);
4136 update_stmt (use_stmt
);
4142 /* Find integer multiplications where the operands are extended from
4143 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4144 where appropriate. */
4148 const pass_data pass_data_optimize_widening_mul
=
4150 GIMPLE_PASS
, /* type */
4151 "widening_mul", /* name */
4152 OPTGROUP_NONE
, /* optinfo_flags */
4153 TV_NONE
, /* tv_id */
4154 PROP_ssa
, /* properties_required */
4155 0, /* properties_provided */
4156 0, /* properties_destroyed */
4157 0, /* todo_flags_start */
4158 TODO_update_ssa
, /* todo_flags_finish */
4161 class pass_optimize_widening_mul
: public gimple_opt_pass
4164 pass_optimize_widening_mul (gcc::context
*ctxt
)
4165 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
4168 /* opt_pass methods: */
4169 virtual bool gate (function
*)
4171 return flag_expensive_optimizations
&& optimize
;
4174 virtual unsigned int execute (function
*);
4176 }; // class pass_optimize_widening_mul
4179 pass_optimize_widening_mul::execute (function
*fun
)
4182 bool cfg_changed
= false;
4184 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
4185 calculate_dominance_info (CDI_DOMINATORS
);
4186 renumber_gimple_stmt_uids ();
4188 FOR_EACH_BB_FN (bb
, fun
)
4190 gimple_stmt_iterator gsi
;
4192 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
4194 gimple
*stmt
= gsi_stmt (gsi
);
4195 enum tree_code code
;
4197 if (is_gimple_assign (stmt
))
4199 code
= gimple_assign_rhs_code (stmt
);
4203 if (!convert_mult_to_widen (stmt
, &gsi
)
4204 && !convert_expand_mult_copysign (stmt
, &gsi
)
4205 && convert_mult_to_fma (stmt
,
4206 gimple_assign_rhs1 (stmt
),
4207 gimple_assign_rhs2 (stmt
)))
4209 gsi_remove (&gsi
, true);
4210 release_defs (stmt
);
4217 if (!convert_plusminus_to_widen (&gsi
, stmt
, code
))
4218 match_uaddsub_overflow (&gsi
, stmt
, code
);
4221 case TRUNC_MOD_EXPR
:
4222 convert_to_divmod (as_a
<gassign
*> (stmt
));
4228 else if (is_gimple_call (stmt
)
4229 && gimple_call_lhs (stmt
))
4231 tree fndecl
= gimple_call_fndecl (stmt
);
4233 && gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
4235 switch (DECL_FUNCTION_CODE (fndecl
))
4240 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
4242 (&TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
4244 && convert_mult_to_fma (stmt
,
4245 gimple_call_arg (stmt
, 0),
4246 gimple_call_arg (stmt
, 0)))
4248 unlink_stmt_vdef (stmt
);
4249 if (gsi_remove (&gsi
, true)
4250 && gimple_purge_dead_eh_edges (bb
))
4252 release_defs (stmt
);
4265 statistics_counter_event (fun
, "widening multiplications inserted",
4266 widen_mul_stats
.widen_mults_inserted
);
4267 statistics_counter_event (fun
, "widening maccs inserted",
4268 widen_mul_stats
.maccs_inserted
);
4269 statistics_counter_event (fun
, "fused multiply-adds inserted",
4270 widen_mul_stats
.fmas_inserted
);
4271 statistics_counter_event (fun
, "divmod calls inserted",
4272 widen_mul_stats
.divmod_calls_inserted
);
4274 return cfg_changed
? TODO_cleanup_cfg
: 0;
4280 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
4282 return new pass_optimize_widening_mul (ctxt
);