1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
98 #include "fold-const.h"
99 #include "internal-fn.h"
100 #include "gimple-fold.h"
101 #include "gimple-iterator.h"
102 #include "gimplify.h"
103 #include "gimplify-me.h"
104 #include "stor-layout.h"
105 #include "tree-cfg.h"
106 #include "insn-config.h"
111 #include "emit-rtl.h"
115 #include "tree-dfa.h"
116 #include "tree-ssa.h"
117 #include "tree-pass.h"
118 #include "alloc-pool.h"
120 #include "gimple-pretty-print.h"
121 #include "builtins.h"
124 /* FIXME: RTL headers have to be included here for optabs. */
125 #include "expr.h" /* Because optabs.h wants sepops. */
126 #include "insn-codes.h"
129 /* This structure represents one basic block that either computes a
130 division, or is a common dominator for basic block that compute a
133 /* The basic block represented by this structure. */
136 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
140 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
141 was inserted in BB. */
142 gimple recip_def_stmt
;
144 /* Pointer to a list of "struct occurrence"s for blocks dominated
146 struct occurrence
*children
;
148 /* Pointer to the next "struct occurrence"s in the list of blocks
149 sharing a common dominator. */
150 struct occurrence
*next
;
152 /* The number of divisions that are in BB before compute_merit. The
153 number of divisions that are in BB or post-dominate it after
157 /* True if the basic block has a division, false if it is a common
158 dominator for basic blocks that do. If it is false and trapping
159 math is active, BB is not a candidate for inserting a reciprocal. */
160 bool bb_has_division
;
165 /* Number of 1.0/X ops inserted. */
168 /* Number of 1.0/FUNC ops inserted. */
174 /* Number of cexpi calls inserted. */
180 /* Number of hand-written 16-bit nop / bswaps found. */
183 /* Number of hand-written 32-bit nop / bswaps found. */
186 /* Number of hand-written 64-bit nop / bswaps found. */
188 } nop_stats
, bswap_stats
;
192 /* Number of widening multiplication ops inserted. */
193 int widen_mults_inserted
;
195 /* Number of integer multiply-and-accumulate ops inserted. */
198 /* Number of fp fused multiply-add ops inserted. */
202 /* The instance of "struct occurrence" representing the highest
203 interesting block in the dominator tree. */
204 static struct occurrence
*occ_head
;
206 /* Allocation pool for getting instances of "struct occurrence". */
207 static object_allocator
<occurrence
> *occ_pool
;
211 /* Allocate and return a new struct occurrence for basic block BB, and
212 whose children list is headed by CHILDREN. */
213 static struct occurrence
*
214 occ_new (basic_block bb
, struct occurrence
*children
)
216 struct occurrence
*occ
;
218 bb
->aux
= occ
= occ_pool
->allocate ();
219 memset (occ
, 0, sizeof (struct occurrence
));
222 occ
->children
= children
;
227 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
228 list of "struct occurrence"s, one per basic block, having IDOM as
229 their common dominator.
231 We try to insert NEW_OCC as deep as possible in the tree, and we also
232 insert any other block that is a common dominator for BB and one
233 block already in the tree. */
236 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
237 struct occurrence
**p_head
)
239 struct occurrence
*occ
, **p_occ
;
241 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
243 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
244 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
247 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
250 occ
->next
= new_occ
->children
;
251 new_occ
->children
= occ
;
253 /* Try the next block (it may as well be dominated by BB). */
256 else if (dom
== occ_bb
)
258 /* OCC_BB dominates BB. Tail recurse to look deeper. */
259 insert_bb (new_occ
, dom
, &occ
->children
);
263 else if (dom
!= idom
)
265 gcc_assert (!dom
->aux
);
267 /* There is a dominator between IDOM and BB, add it and make
268 two children out of NEW_OCC and OCC. First, remove OCC from
274 /* None of the previous blocks has DOM as a dominator: if we tail
275 recursed, we would reexamine them uselessly. Just switch BB with
276 DOM, and go on looking for blocks dominated by DOM. */
277 new_occ
= occ_new (dom
, new_occ
);
282 /* Nothing special, go on with the next element. */
287 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
288 new_occ
->next
= *p_head
;
292 /* Register that we found a division in BB. */
295 register_division_in (basic_block bb
)
297 struct occurrence
*occ
;
299 occ
= (struct occurrence
*) bb
->aux
;
302 occ
= occ_new (bb
, NULL
);
303 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
306 occ
->bb_has_division
= true;
307 occ
->num_divisions
++;
311 /* Compute the number of divisions that postdominate each block in OCC and
315 compute_merit (struct occurrence
*occ
)
317 struct occurrence
*occ_child
;
318 basic_block dom
= occ
->bb
;
320 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
323 if (occ_child
->children
)
324 compute_merit (occ_child
);
327 bb
= single_noncomplex_succ (dom
);
331 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
332 occ
->num_divisions
+= occ_child
->num_divisions
;
337 /* Return whether USE_STMT is a floating-point division by DEF. */
339 is_division_by (gimple use_stmt
, tree def
)
341 return is_gimple_assign (use_stmt
)
342 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
343 && gimple_assign_rhs2 (use_stmt
) == def
344 /* Do not recognize x / x as valid division, as we are getting
345 confused later by replacing all immediate uses x in such
347 && gimple_assign_rhs1 (use_stmt
) != def
;
350 /* Walk the subset of the dominator tree rooted at OCC, setting the
351 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
352 the given basic block. The field may be left NULL, of course,
353 if it is not possible or profitable to do the optimization.
355 DEF_BSI is an iterator pointing at the statement defining DEF.
356 If RECIP_DEF is set, a dominator already has a computation that can
360 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
361 tree def
, tree recip_def
, int threshold
)
365 gimple_stmt_iterator gsi
;
366 struct occurrence
*occ_child
;
369 && (occ
->bb_has_division
|| !flag_trapping_math
)
370 && occ
->num_divisions
>= threshold
)
372 /* Make a variable with the replacement and substitute it. */
373 type
= TREE_TYPE (def
);
374 recip_def
= create_tmp_reg (type
, "reciptmp");
375 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
376 build_one_cst (type
), def
);
378 if (occ
->bb_has_division
)
380 /* Case 1: insert before an existing division. */
381 gsi
= gsi_after_labels (occ
->bb
);
382 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
385 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
387 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
389 /* Case 2: insert right after the definition. Note that this will
390 never happen if the definition statement can throw, because in
391 that case the sole successor of the statement's basic block will
392 dominate all the uses as well. */
393 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
397 /* Case 3: insert in a basic block not containing defs/uses. */
398 gsi
= gsi_after_labels (occ
->bb
);
399 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
402 reciprocal_stats
.rdivs_inserted
++;
404 occ
->recip_def_stmt
= new_stmt
;
407 occ
->recip_def
= recip_def
;
408 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
409 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
413 /* Replace the division at USE_P with a multiplication by the reciprocal, if
417 replace_reciprocal (use_operand_p use_p
)
419 gimple use_stmt
= USE_STMT (use_p
);
420 basic_block bb
= gimple_bb (use_stmt
);
421 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
423 if (optimize_bb_for_speed_p (bb
)
424 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
426 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
427 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
428 SET_USE (use_p
, occ
->recip_def
);
429 fold_stmt_inplace (&gsi
);
430 update_stmt (use_stmt
);
435 /* Free OCC and return one more "struct occurrence" to be freed. */
437 static struct occurrence
*
438 free_bb (struct occurrence
*occ
)
440 struct occurrence
*child
, *next
;
442 /* First get the two pointers hanging off OCC. */
444 child
= occ
->children
;
446 occ_pool
->remove (occ
);
448 /* Now ensure that we don't recurse unless it is necessary. */
454 next
= free_bb (next
);
461 /* Look for floating-point divisions among DEF's uses, and try to
462 replace them by multiplications with the reciprocal. Add
463 as many statements computing the reciprocal as needed.
465 DEF must be a GIMPLE register of a floating-point type. */
468 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
471 imm_use_iterator use_iter
;
472 struct occurrence
*occ
;
473 int count
= 0, threshold
;
475 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
477 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
479 gimple use_stmt
= USE_STMT (use_p
);
480 if (is_division_by (use_stmt
, def
))
482 register_division_in (gimple_bb (use_stmt
));
487 /* Do the expensive part only if we can hope to optimize something. */
488 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
489 if (count
>= threshold
)
492 for (occ
= occ_head
; occ
; occ
= occ
->next
)
495 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
498 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
500 if (is_division_by (use_stmt
, def
))
502 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
503 replace_reciprocal (use_p
);
508 for (occ
= occ_head
; occ
; )
514 /* Go through all the floating-point SSA_NAMEs, and call
515 execute_cse_reciprocals_1 on each of them. */
518 const pass_data pass_data_cse_reciprocals
=
520 GIMPLE_PASS
, /* type */
522 OPTGROUP_NONE
, /* optinfo_flags */
524 PROP_ssa
, /* properties_required */
525 0, /* properties_provided */
526 0, /* properties_destroyed */
527 0, /* todo_flags_start */
528 TODO_update_ssa
, /* todo_flags_finish */
531 class pass_cse_reciprocals
: public gimple_opt_pass
534 pass_cse_reciprocals (gcc::context
*ctxt
)
535 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
538 /* opt_pass methods: */
539 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
540 virtual unsigned int execute (function
*);
542 }; // class pass_cse_reciprocals
545 pass_cse_reciprocals::execute (function
*fun
)
550 occ_pool
= new object_allocator
<occurrence
>
551 ("dominators for recip", n_basic_blocks_for_fn (fun
) / 3 + 1);
553 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
554 calculate_dominance_info (CDI_DOMINATORS
);
555 calculate_dominance_info (CDI_POST_DOMINATORS
);
557 #ifdef ENABLE_CHECKING
558 FOR_EACH_BB_FN (bb
, fun
)
559 gcc_assert (!bb
->aux
);
562 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
563 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
564 && is_gimple_reg (arg
))
566 tree name
= ssa_default_def (fun
, arg
);
568 execute_cse_reciprocals_1 (NULL
, name
);
571 FOR_EACH_BB_FN (bb
, fun
)
575 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
578 gphi
*phi
= gsi
.phi ();
579 def
= PHI_RESULT (phi
);
580 if (! virtual_operand_p (def
)
581 && FLOAT_TYPE_P (TREE_TYPE (def
)))
582 execute_cse_reciprocals_1 (NULL
, def
);
585 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
588 gimple stmt
= gsi_stmt (gsi
);
590 if (gimple_has_lhs (stmt
)
591 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
592 && FLOAT_TYPE_P (TREE_TYPE (def
))
593 && TREE_CODE (def
) == SSA_NAME
)
594 execute_cse_reciprocals_1 (&gsi
, def
);
597 if (optimize_bb_for_size_p (bb
))
600 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
601 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
604 gimple stmt
= gsi_stmt (gsi
);
607 if (is_gimple_assign (stmt
)
608 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
610 tree arg1
= gimple_assign_rhs2 (stmt
);
613 if (TREE_CODE (arg1
) != SSA_NAME
)
616 stmt1
= SSA_NAME_DEF_STMT (arg1
);
618 if (is_gimple_call (stmt1
)
619 && gimple_call_lhs (stmt1
)
620 && (fndecl
= gimple_call_fndecl (stmt1
))
621 && (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
622 || DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
))
624 enum built_in_function code
;
629 code
= DECL_FUNCTION_CODE (fndecl
);
630 md_code
= DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
;
632 fndecl
= targetm
.builtin_reciprocal (code
, md_code
, false);
636 /* Check that all uses of the SSA name are divisions,
637 otherwise replacing the defining statement will do
640 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
642 gimple stmt2
= USE_STMT (use_p
);
643 if (is_gimple_debug (stmt2
))
645 if (!is_gimple_assign (stmt2
)
646 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
647 || gimple_assign_rhs1 (stmt2
) == arg1
648 || gimple_assign_rhs2 (stmt2
) != arg1
)
657 gimple_replace_ssa_lhs (stmt1
, arg1
);
658 gimple_call_set_fndecl (stmt1
, fndecl
);
660 reciprocal_stats
.rfuncs_inserted
++;
662 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
664 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
665 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
666 fold_stmt_inplace (&gsi
);
674 statistics_counter_event (fun
, "reciprocal divs inserted",
675 reciprocal_stats
.rdivs_inserted
);
676 statistics_counter_event (fun
, "reciprocal functions inserted",
677 reciprocal_stats
.rfuncs_inserted
);
679 free_dominance_info (CDI_DOMINATORS
);
680 free_dominance_info (CDI_POST_DOMINATORS
);
688 make_pass_cse_reciprocals (gcc::context
*ctxt
)
690 return new pass_cse_reciprocals (ctxt
);
693 /* Records an occurrence at statement USE_STMT in the vector of trees
694 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
695 is not yet initialized. Returns true if the occurrence was pushed on
696 the vector. Adjusts *TOP_BB to be the basic block dominating all
697 statements in the vector. */
700 maybe_record_sincos (vec
<gimple
> *stmts
,
701 basic_block
*top_bb
, gimple use_stmt
)
703 basic_block use_bb
= gimple_bb (use_stmt
);
705 && (*top_bb
== use_bb
706 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
707 stmts
->safe_push (use_stmt
);
709 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
711 stmts
->safe_push (use_stmt
);
720 /* Look for sin, cos and cexpi calls with the same argument NAME and
721 create a single call to cexpi CSEing the result in this case.
722 We first walk over all immediate uses of the argument collecting
723 statements that we can CSE in a vector and in a second pass replace
724 the statement rhs with a REALPART or IMAGPART expression on the
725 result of the cexpi call we insert before the use statement that
726 dominates all other candidates. */
729 execute_cse_sincos_1 (tree name
)
731 gimple_stmt_iterator gsi
;
732 imm_use_iterator use_iter
;
733 tree fndecl
, res
, type
;
734 gimple def_stmt
, use_stmt
, stmt
;
735 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
736 auto_vec
<gimple
> stmts
;
737 basic_block top_bb
= NULL
;
739 bool cfg_changed
= false;
741 type
= TREE_TYPE (name
);
742 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
744 if (gimple_code (use_stmt
) != GIMPLE_CALL
745 || !gimple_call_lhs (use_stmt
)
746 || !(fndecl
= gimple_call_fndecl (use_stmt
))
747 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
750 switch (DECL_FUNCTION_CODE (fndecl
))
752 CASE_FLT_FN (BUILT_IN_COS
):
753 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
756 CASE_FLT_FN (BUILT_IN_SIN
):
757 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
760 CASE_FLT_FN (BUILT_IN_CEXPI
):
761 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
768 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
771 /* Simply insert cexpi at the beginning of top_bb but not earlier than
772 the name def statement. */
773 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
776 stmt
= gimple_build_call (fndecl
, 1, name
);
777 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
778 gimple_call_set_lhs (stmt
, res
);
780 def_stmt
= SSA_NAME_DEF_STMT (name
);
781 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
782 && gimple_code (def_stmt
) != GIMPLE_PHI
783 && gimple_bb (def_stmt
) == top_bb
)
785 gsi
= gsi_for_stmt (def_stmt
);
786 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
790 gsi
= gsi_after_labels (top_bb
);
791 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
793 sincos_stats
.inserted
++;
795 /* And adjust the recorded old call sites. */
796 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
799 fndecl
= gimple_call_fndecl (use_stmt
);
801 switch (DECL_FUNCTION_CODE (fndecl
))
803 CASE_FLT_FN (BUILT_IN_COS
):
804 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
807 CASE_FLT_FN (BUILT_IN_SIN
):
808 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
811 CASE_FLT_FN (BUILT_IN_CEXPI
):
819 /* Replace call with a copy. */
820 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
822 gsi
= gsi_for_stmt (use_stmt
);
823 gsi_replace (&gsi
, stmt
, true);
824 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
831 /* To evaluate powi(x,n), the floating point value x raised to the
832 constant integer exponent n, we use a hybrid algorithm that
833 combines the "window method" with look-up tables. For an
834 introduction to exponentiation algorithms and "addition chains",
835 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
836 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
837 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
838 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
840 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
841 multiplications to inline before calling the system library's pow
842 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
843 so this default never requires calling pow, powf or powl. */
845 #ifndef POWI_MAX_MULTS
846 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
849 /* The size of the "optimal power tree" lookup table. All
850 exponents less than this value are simply looked up in the
851 powi_table below. This threshold is also used to size the
852 cache of pseudo registers that hold intermediate results. */
853 #define POWI_TABLE_SIZE 256
855 /* The size, in bits of the window, used in the "window method"
856 exponentiation algorithm. This is equivalent to a radix of
857 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
858 #define POWI_WINDOW_SIZE 3
860 /* The following table is an efficient representation of an
861 "optimal power tree". For each value, i, the corresponding
862 value, j, in the table states than an optimal evaluation
863 sequence for calculating pow(x,i) can be found by evaluating
864 pow(x,j)*pow(x,i-j). An optimal power tree for the first
865 100 integers is given in Knuth's "Seminumerical algorithms". */
867 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
869 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
870 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
871 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
872 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
873 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
874 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
875 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
876 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
877 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
878 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
879 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
880 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
881 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
882 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
883 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
884 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
885 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
886 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
887 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
888 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
889 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
890 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
891 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
892 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
893 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
894 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
895 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
896 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
897 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
898 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
899 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
900 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
904 /* Return the number of multiplications required to calculate
905 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
906 subroutine of powi_cost. CACHE is an array indicating
907 which exponents have already been calculated. */
910 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
912 /* If we've already calculated this exponent, then this evaluation
913 doesn't require any additional multiplications. */
918 return powi_lookup_cost (n
- powi_table
[n
], cache
)
919 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
922 /* Return the number of multiplications required to calculate
923 powi(x,n) for an arbitrary x, given the exponent N. This
924 function needs to be kept in sync with powi_as_mults below. */
927 powi_cost (HOST_WIDE_INT n
)
929 bool cache
[POWI_TABLE_SIZE
];
930 unsigned HOST_WIDE_INT digit
;
931 unsigned HOST_WIDE_INT val
;
937 /* Ignore the reciprocal when calculating the cost. */
938 val
= (n
< 0) ? -n
: n
;
940 /* Initialize the exponent cache. */
941 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
946 while (val
>= POWI_TABLE_SIZE
)
950 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
951 result
+= powi_lookup_cost (digit
, cache
)
952 + POWI_WINDOW_SIZE
+ 1;
953 val
>>= POWI_WINDOW_SIZE
;
962 return result
+ powi_lookup_cost (val
, cache
);
965 /* Recursive subroutine of powi_as_mults. This function takes the
966 array, CACHE, of already calculated exponents and an exponent N and
967 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
970 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
971 HOST_WIDE_INT n
, tree
*cache
)
973 tree op0
, op1
, ssa_target
;
974 unsigned HOST_WIDE_INT digit
;
977 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
980 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
982 if (n
< POWI_TABLE_SIZE
)
984 cache
[n
] = ssa_target
;
985 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
986 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
990 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
991 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
992 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
996 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1000 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1001 gimple_set_location (mult_stmt
, loc
);
1002 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1007 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1008 This function needs to be kept in sync with powi_cost above. */
1011 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1012 tree arg0
, HOST_WIDE_INT n
)
1014 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1019 return build_real (type
, dconst1
);
1021 memset (cache
, 0, sizeof (cache
));
1024 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1028 /* If the original exponent was negative, reciprocate the result. */
1029 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1030 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1031 build_real (type
, dconst1
), result
);
1032 gimple_set_location (div_stmt
, loc
);
1033 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1038 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1039 location info LOC. If the arguments are appropriate, create an
1040 equivalent sequence of statements prior to GSI using an optimal
1041 number of multiplications, and return an expession holding the
1045 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1046 tree arg0
, HOST_WIDE_INT n
)
1048 /* Avoid largest negative number. */
1050 && ((n
>= -1 && n
<= 2)
1051 || (optimize_function_for_speed_p (cfun
)
1052 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1053 return powi_as_mults (gsi
, loc
, arg0
, n
);
1058 /* Build a gimple call statement that calls FN with argument ARG.
1059 Set the lhs of the call statement to a fresh SSA name. Insert the
1060 statement prior to GSI's current position, and return the fresh
1064 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1070 call_stmt
= gimple_build_call (fn
, 1, arg
);
1071 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1072 gimple_set_lhs (call_stmt
, ssa_target
);
1073 gimple_set_location (call_stmt
, loc
);
1074 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1079 /* Build a gimple binary operation with the given CODE and arguments
1080 ARG0, ARG1, assigning the result to a new SSA name for variable
1081 TARGET. Insert the statement prior to GSI's current position, and
1082 return the fresh SSA name.*/
1085 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1086 const char *name
, enum tree_code code
,
1087 tree arg0
, tree arg1
)
1089 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1090 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1091 gimple_set_location (stmt
, loc
);
1092 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1096 /* Build a gimple reference operation with the given CODE and argument
1097 ARG, assigning the result to a new SSA name of TYPE with NAME.
1098 Insert the statement prior to GSI's current position, and return
1099 the fresh SSA name. */
1102 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1103 const char *name
, enum tree_code code
, tree arg0
)
1105 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1106 gimple stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1107 gimple_set_location (stmt
, loc
);
1108 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1112 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1113 prior to GSI's current position, and return the fresh SSA name. */
1116 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1117 tree type
, tree val
)
1119 tree result
= make_ssa_name (type
);
1120 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1121 gimple_set_location (stmt
, loc
);
1122 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1126 struct pow_synth_sqrt_info
1129 unsigned int deepest
;
1130 unsigned int num_mults
;
1133 /* Return true iff the real value C can be represented as a
1134 sum of powers of 0.5 up to N. That is:
1135 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1136 Record in INFO the various parameters of the synthesis algorithm such
1137 as the factors a[i], the maximum 0.5 power and the number of
1138 multiplications that will be required. */
1141 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1142 struct pow_synth_sqrt_info
*info
)
1144 REAL_VALUE_TYPE factor
= dconsthalf
;
1145 REAL_VALUE_TYPE remainder
= c
;
1148 info
->num_mults
= 0;
1149 memset (info
->factors
, 0, n
* sizeof (bool));
1151 for (unsigned i
= 0; i
< n
; i
++)
1153 REAL_VALUE_TYPE res
;
1155 /* If something inexact happened bail out now. */
1156 if (REAL_ARITHMETIC (res
, MINUS_EXPR
, remainder
, factor
))
1159 /* We have hit zero. The number is representable as a sum
1160 of powers of 0.5. */
1161 if (REAL_VALUES_EQUAL (res
, dconst0
))
1163 info
->factors
[i
] = true;
1164 info
->deepest
= i
+ 1;
1167 else if (!REAL_VALUE_NEGATIVE (res
))
1170 info
->factors
[i
] = true;
1174 info
->factors
[i
] = false;
1176 REAL_ARITHMETIC (factor
, MULT_EXPR
, factor
, dconsthalf
);
1181 /* Return the tree corresponding to FN being applied
1182 to ARG N times at GSI and LOC.
1183 Look up previous results from CACHE if need be.
1184 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1187 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1188 tree fn
, location_t loc
, tree
*cache
)
1190 tree res
= cache
[n
];
1193 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1194 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1201 /* Print to STREAM the repeated application of function FNAME to ARG
1202 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1206 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1210 fprintf (stream
, "%s", arg
);
1213 fprintf (stream
, "%s (", fname
);
1214 print_nested_fn (stream
, fname
, arg
, n
- 1);
1215 fprintf (stream
, ")");
1219 /* Print to STREAM the fractional sequence of sqrt chains
1220 applied to ARG, described by INFO. Used for the dump file. */
1223 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1224 struct pow_synth_sqrt_info
*info
)
1226 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1228 bool is_set
= info
->factors
[i
];
1231 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1232 if (i
!= info
->deepest
- 1)
1233 fprintf (stream
, " * ");
1238 /* Print to STREAM a representation of raising ARG to an integer
1239 power N. Used for the dump file. */
1242 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1245 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1247 fprintf (stream
, "%s", arg
);
1250 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1251 square roots. Place at GSI and LOC. Limit the maximum depth
1252 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1253 result of the expanded sequence or NULL_TREE if the expansion failed.
1255 This routine assumes that ARG1 is a real number with a fractional part
1256 (the integer exponent case will have been handled earlier in
1257 gimple_expand_builtin_pow).
1260 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1261 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1262 FRAC_PART == ARG1 - WHOLE_PART:
1263 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1264 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1265 if it can be expressed as such, that is if FRAC_PART satisfies:
1266 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1267 where integer a[i] is either 0 or 1.
1270 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1271 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1273 For ARG1 < 0.0 there are two approaches:
1274 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1275 is calculated as above.
1278 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1279 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1281 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1282 FRAC_PART := ARG1 - WHOLE_PART
1283 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1285 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1286 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1288 For ARG1 < 0.0 we choose between (A) and (B) depending on
1289 how many multiplications we'd have to do.
1290 So, for the example in (B): POW (x, -5.875), if we were to
1291 follow algorithm (A) we would produce:
1292 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1293 which contains more multiplications than approach (B).
1295 Hopefully, this approach will eliminate potentially expensive POW library
1296 calls when unsafe floating point math is enabled and allow the compiler to
1297 further optimise the multiplies, square roots and divides produced by this
1301 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1302 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1304 tree type
= TREE_TYPE (arg0
);
1305 machine_mode mode
= TYPE_MODE (type
);
1306 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1307 bool one_over
= true;
1312 if (TREE_CODE (arg1
) != REAL_CST
)
1315 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1317 gcc_assert (max_depth
> 0);
1318 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1320 struct pow_synth_sqrt_info synth_info
;
1321 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1322 synth_info
.deepest
= 0;
1323 synth_info
.num_mults
= 0;
1325 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1326 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1328 /* The whole and fractional parts of exp. */
1329 REAL_VALUE_TYPE whole_part
;
1330 REAL_VALUE_TYPE frac_part
;
1332 real_floor (&whole_part
, mode
, &exp
);
1333 REAL_ARITHMETIC (frac_part
, MINUS_EXPR
, exp
, whole_part
);
1336 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1337 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1341 real_ceil (&ceil_whole
, mode
, &exp
);
1342 REAL_ARITHMETIC (ceil_fract
, MINUS_EXPR
, ceil_whole
, exp
);
1345 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1348 /* Check whether it's more profitable to not use 1.0 / ... */
1351 struct pow_synth_sqrt_info alt_synth_info
;
1352 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1353 alt_synth_info
.deepest
= 0;
1354 alt_synth_info
.num_mults
= 0;
1356 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1358 && alt_synth_info
.deepest
<= synth_info
.deepest
1359 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1361 whole_part
= ceil_whole
;
1362 frac_part
= ceil_fract
;
1363 synth_info
.deepest
= alt_synth_info
.deepest
;
1364 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1365 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1366 (max_depth
+ 1) * sizeof (bool));
1371 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1372 REAL_VALUE_TYPE cint
;
1373 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1375 if (!real_identical (&whole_part
, &cint
))
1378 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1381 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1383 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1385 /* Calculate the integer part of the exponent. */
1388 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1397 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1398 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1404 fprintf (dump_file
, "1.0 / (");
1405 dump_integer_part (dump_file
, "x", n
);
1407 fprintf (dump_file
, " * ");
1408 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1409 fprintf (dump_file
, ")");
1413 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1414 fprintf (dump_file
, " / (");
1415 dump_integer_part (dump_file
, "x", n
);
1416 fprintf (dump_file
, ")");
1421 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1423 fprintf (dump_file
, " * ");
1424 dump_integer_part (dump_file
, "x", n
);
1427 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1431 tree fract_res
= NULL_TREE
;
1434 /* Calculate the fractional part of the exponent. */
1435 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1437 if (synth_info
.factors
[i
])
1439 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1442 fract_res
= sqrt_chain
;
1445 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1446 fract_res
, sqrt_chain
);
1450 tree res
= NULL_TREE
;
1457 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1458 fract_res
, integer_res
);
1462 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1463 build_real (type
, dconst1
), res
);
1467 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1468 fract_res
, integer_res
);
1472 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1473 fract_res
, integer_res
);
1477 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1478 with location info LOC. If possible, create an equivalent and
1479 less expensive sequence of statements prior to GSI, and return an
1480 expession holding the result. */
1483 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1484 tree arg0
, tree arg1
)
1486 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
1487 REAL_VALUE_TYPE c2
, dconst3
;
1489 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
1491 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
1492 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1494 dconst1_4
= dconst1
;
1495 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1497 /* If the exponent isn't a constant, there's nothing of interest
1499 if (TREE_CODE (arg1
) != REAL_CST
)
1502 /* If the exponent is equivalent to an integer, expand to an optimal
1503 multiplication sequence when profitable. */
1504 c
= TREE_REAL_CST (arg1
);
1505 n
= real_to_integer (&c
);
1506 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1507 c_is_int
= real_identical (&c
, &cint
);
1510 && ((n
>= -1 && n
<= 2)
1511 || (flag_unsafe_math_optimizations
1513 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1514 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1516 /* Attempt various optimizations using sqrt and cbrt. */
1517 type
= TREE_TYPE (arg0
);
1518 mode
= TYPE_MODE (type
);
1519 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1521 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1522 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1525 && REAL_VALUES_EQUAL (c
, dconsthalf
)
1526 && !HONOR_SIGNED_ZEROS (mode
))
1527 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1529 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1531 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1532 optimizations since 1./3. is not exactly representable. If x
1533 is negative and finite, the correct value of pow(x,1./3.) is
1534 a NaN with the "invalid" exception raised, because the value
1535 of 1./3. actually has an even denominator. The correct value
1536 of cbrt(x) is a negative real value. */
1537 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1538 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1540 if (flag_unsafe_math_optimizations
1542 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1543 && REAL_VALUES_EQUAL (c
, dconst1_3
))
1544 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1546 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1547 if we don't have a hardware sqrt insn. */
1548 dconst1_6
= dconst1_3
;
1549 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1551 if (flag_unsafe_math_optimizations
1554 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1557 && REAL_VALUES_EQUAL (c
, dconst1_6
))
1560 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1563 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1567 /* Attempt to expand the POW as a product of square root chains.
1568 Expand the 0.25 case even when otpimising for size. */
1569 if (flag_unsafe_math_optimizations
1572 && (speed_p
|| REAL_VALUES_EQUAL (c
, dconst1_4
))
1573 && !HONOR_SIGNED_ZEROS (mode
))
1575 unsigned int max_depth
= speed_p
1576 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH
)
1579 tree expand_with_sqrts
1580 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
1582 if (expand_with_sqrts
)
1583 return expand_with_sqrts
;
1586 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1587 n
= real_to_integer (&c2
);
1588 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1589 c2_is_int
= real_identical (&c2
, &cint
);
1591 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1593 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1594 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1596 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1597 different from pow(x, 1./3.) due to rounding and behavior with
1598 negative x, we need to constrain this transformation to unsafe
1599 math and positive x or finite math. */
1600 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
1601 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1602 real_round (&c2
, mode
, &c2
);
1603 n
= real_to_integer (&c2
);
1604 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1605 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1606 real_convert (&c2
, mode
, &c2
);
1608 if (flag_unsafe_math_optimizations
1610 && (gimple_val_nonnegative_real_p (arg0
) || !HONOR_NANS (mode
))
1611 && real_identical (&c2
, &c
)
1613 && optimize_function_for_speed_p (cfun
)
1614 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1616 tree powi_x_ndiv3
= NULL_TREE
;
1618 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1619 possible or profitable, give up. Skip the degenerate case when
1620 abs(n) < 3, where the result is always 1. */
1621 if (absu_hwi (n
) >= 3)
1623 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1629 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1630 as that creates an unnecessary variable. Instead, just produce
1631 either cbrt(x) or cbrt(x) * cbrt(x). */
1632 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1634 if (absu_hwi (n
) % 3 == 1)
1635 powi_cbrt_x
= cbrt_x
;
1637 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1640 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1641 if (absu_hwi (n
) < 3)
1642 result
= powi_cbrt_x
;
1644 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1645 powi_x_ndiv3
, powi_cbrt_x
);
1647 /* If n is negative, reciprocate the result. */
1649 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1650 build_real (type
, dconst1
), result
);
1655 /* No optimizations succeeded. */
1659 /* ARG is the argument to a cabs builtin call in GSI with location info
1660 LOC. Create a sequence of statements prior to GSI that calculates
1661 sqrt(R*R + I*I), where R and I are the real and imaginary components
1662 of ARG, respectively. Return an expression holding the result. */
1665 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1667 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1668 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1669 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1670 machine_mode mode
= TYPE_MODE (type
);
1672 if (!flag_unsafe_math_optimizations
1673 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1675 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1678 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1679 REALPART_EXPR
, arg
);
1680 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1681 real_part
, real_part
);
1682 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1683 IMAGPART_EXPR
, arg
);
1684 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1685 imag_part
, imag_part
);
1686 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1687 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1692 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1693 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1694 an optimal number of multiplies, when n is a constant. */
1698 const pass_data pass_data_cse_sincos
=
1700 GIMPLE_PASS
, /* type */
1701 "sincos", /* name */
1702 OPTGROUP_NONE
, /* optinfo_flags */
1703 TV_NONE
, /* tv_id */
1704 PROP_ssa
, /* properties_required */
1705 0, /* properties_provided */
1706 0, /* properties_destroyed */
1707 0, /* todo_flags_start */
1708 TODO_update_ssa
, /* todo_flags_finish */
1711 class pass_cse_sincos
: public gimple_opt_pass
1714 pass_cse_sincos (gcc::context
*ctxt
)
1715 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1718 /* opt_pass methods: */
1719 virtual bool gate (function
*)
1721 /* We no longer require either sincos or cexp, since powi expansion
1722 piggybacks on this pass. */
1726 virtual unsigned int execute (function
*);
1728 }; // class pass_cse_sincos
1731 pass_cse_sincos::execute (function
*fun
)
1734 bool cfg_changed
= false;
1736 calculate_dominance_info (CDI_DOMINATORS
);
1737 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1739 FOR_EACH_BB_FN (bb
, fun
)
1741 gimple_stmt_iterator gsi
;
1742 bool cleanup_eh
= false;
1744 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1746 gimple stmt
= gsi_stmt (gsi
);
1749 /* Only the last stmt in a bb could throw, no need to call
1750 gimple_purge_dead_eh_edges if we change something in the middle
1751 of a basic block. */
1754 if (is_gimple_call (stmt
)
1755 && gimple_call_lhs (stmt
)
1756 && (fndecl
= gimple_call_fndecl (stmt
))
1757 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1759 tree arg
, arg0
, arg1
, result
;
1763 switch (DECL_FUNCTION_CODE (fndecl
))
1765 CASE_FLT_FN (BUILT_IN_COS
):
1766 CASE_FLT_FN (BUILT_IN_SIN
):
1767 CASE_FLT_FN (BUILT_IN_CEXPI
):
1768 /* Make sure we have either sincos or cexp. */
1769 if (!targetm
.libc_has_function (function_c99_math_complex
)
1770 && !targetm
.libc_has_function (function_sincos
))
1773 arg
= gimple_call_arg (stmt
, 0);
1774 if (TREE_CODE (arg
) == SSA_NAME
)
1775 cfg_changed
|= execute_cse_sincos_1 (arg
);
1778 CASE_FLT_FN (BUILT_IN_POW
):
1779 arg0
= gimple_call_arg (stmt
, 0);
1780 arg1
= gimple_call_arg (stmt
, 1);
1782 loc
= gimple_location (stmt
);
1783 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1787 tree lhs
= gimple_get_lhs (stmt
);
1788 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1789 gimple_set_location (new_stmt
, loc
);
1790 unlink_stmt_vdef (stmt
);
1791 gsi_replace (&gsi
, new_stmt
, true);
1793 if (gimple_vdef (stmt
))
1794 release_ssa_name (gimple_vdef (stmt
));
1798 CASE_FLT_FN (BUILT_IN_POWI
):
1799 arg0
= gimple_call_arg (stmt
, 0);
1800 arg1
= gimple_call_arg (stmt
, 1);
1801 loc
= gimple_location (stmt
);
1803 if (real_minus_onep (arg0
))
1805 tree t0
, t1
, cond
, one
, minus_one
;
1808 t0
= TREE_TYPE (arg0
);
1809 t1
= TREE_TYPE (arg1
);
1810 one
= build_real (t0
, dconst1
);
1811 minus_one
= build_real (t0
, dconstm1
);
1813 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
1814 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
1815 arg1
, build_int_cst (t1
, 1));
1816 gimple_set_location (stmt
, loc
);
1817 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1819 result
= make_temp_ssa_name (t0
, NULL
, "powi");
1820 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
1822 gimple_set_location (stmt
, loc
);
1823 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1827 if (!tree_fits_shwi_p (arg1
))
1830 n
= tree_to_shwi (arg1
);
1831 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1836 tree lhs
= gimple_get_lhs (stmt
);
1837 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1838 gimple_set_location (new_stmt
, loc
);
1839 unlink_stmt_vdef (stmt
);
1840 gsi_replace (&gsi
, new_stmt
, true);
1842 if (gimple_vdef (stmt
))
1843 release_ssa_name (gimple_vdef (stmt
));
1847 CASE_FLT_FN (BUILT_IN_CABS
):
1848 arg0
= gimple_call_arg (stmt
, 0);
1849 loc
= gimple_location (stmt
);
1850 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1854 tree lhs
= gimple_get_lhs (stmt
);
1855 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1856 gimple_set_location (new_stmt
, loc
);
1857 unlink_stmt_vdef (stmt
);
1858 gsi_replace (&gsi
, new_stmt
, true);
1860 if (gimple_vdef (stmt
))
1861 release_ssa_name (gimple_vdef (stmt
));
1870 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1873 statistics_counter_event (fun
, "sincos statements inserted",
1874 sincos_stats
.inserted
);
1876 free_dominance_info (CDI_DOMINATORS
);
1877 return cfg_changed
? TODO_cleanup_cfg
: 0;
1883 make_pass_cse_sincos (gcc::context
*ctxt
)
1885 return new pass_cse_sincos (ctxt
);
1888 /* A symbolic number is used to detect byte permutation and selection
1889 patterns. Therefore the field N contains an artificial number
1890 consisting of octet sized markers:
1892 0 - target byte has the value 0
1893 FF - target byte has an unknown value (eg. due to sign extension)
1894 1..size - marker value is the target byte index minus one.
1896 To detect permutations on memory sources (arrays and structures), a symbolic
1897 number is also associated a base address (the array or structure the load is
1898 made from), an offset from the base address and a range which gives the
1899 difference between the highest and lowest accessed memory location to make
1900 such a symbolic number. The range is thus different from size which reflects
1901 the size of the type of current expression. Note that for non memory source,
1902 range holds the same value as size.
1904 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1905 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1906 still have a size of 2 but this time a range of 1. */
1908 struct symbolic_number
{
1913 HOST_WIDE_INT bytepos
;
1916 unsigned HOST_WIDE_INT range
;
1919 #define BITS_PER_MARKER 8
1920 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1921 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1922 #define HEAD_MARKER(n, size) \
1923 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1925 /* The number which the find_bswap_or_nop_1 result should match in
1926 order to have a nop. The number is masked according to the size of
1927 the symbolic number before using it. */
1928 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1929 (uint64_t)0x08070605 << 32 | 0x04030201)
1931 /* The number which the find_bswap_or_nop_1 result should match in
1932 order to have a byte swap. The number is masked according to the
1933 size of the symbolic number before using it. */
1934 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1935 (uint64_t)0x01020304 << 32 | 0x05060708)
1937 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1938 number N. Return false if the requested operation is not permitted
1939 on a symbolic number. */
1942 do_shift_rotate (enum tree_code code
,
1943 struct symbolic_number
*n
,
1946 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
1947 unsigned head_marker
;
1949 if (count
% BITS_PER_UNIT
!= 0)
1951 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
1953 /* Zero out the extra bits of N in order to avoid them being shifted
1954 into the significant bits. */
1955 if (size
< 64 / BITS_PER_MARKER
)
1956 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1964 head_marker
= HEAD_MARKER (n
->n
, size
);
1966 /* Arithmetic shift of signed type: result is dependent on the value. */
1967 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
1968 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
1969 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
1970 << ((size
- 1 - i
) * BITS_PER_MARKER
);
1973 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
1976 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
1981 /* Zero unused bits for size. */
1982 if (size
< 64 / BITS_PER_MARKER
)
1983 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
1987 /* Perform sanity checking for the symbolic number N and the gimple
1991 verify_symbolic_number_p (struct symbolic_number
*n
, gimple stmt
)
1995 lhs_type
= gimple_expr_type (stmt
);
1997 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
2000 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
2006 /* Initialize the symbolic number N for the bswap pass from the base element
2007 SRC manipulated by the bitwise OR expression. */
2010 init_symbolic_number (struct symbolic_number
*n
, tree src
)
2014 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
2016 /* Set up the symbolic number N by setting each byte to a value between 1 and
2017 the byte size of rhs1. The highest order byte is set to n->size and the
2018 lowest order byte to 1. */
2019 n
->type
= TREE_TYPE (src
);
2020 size
= TYPE_PRECISION (n
->type
);
2021 if (size
% BITS_PER_UNIT
!= 0)
2023 size
/= BITS_PER_UNIT
;
2024 if (size
> 64 / BITS_PER_MARKER
)
2029 if (size
< 64 / BITS_PER_MARKER
)
2030 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2035 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2036 the answer. If so, REF is that memory source and the base of the memory area
2037 accessed and the offset of the access from that base are recorded in N. */
2040 find_bswap_or_nop_load (gimple stmt
, tree ref
, struct symbolic_number
*n
)
2042 /* Leaf node is an array or component ref. Memorize its base and
2043 offset from base to compare to other such leaf node. */
2044 HOST_WIDE_INT bitsize
, bitpos
;
2046 int unsignedp
, volatilep
;
2047 tree offset
, base_addr
;
2049 /* Not prepared to handle PDP endian. */
2050 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2053 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
2056 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
2057 &unsignedp
, &volatilep
, false);
2059 if (TREE_CODE (base_addr
) == MEM_REF
)
2061 offset_int bit_offset
= 0;
2062 tree off
= TREE_OPERAND (base_addr
, 1);
2064 if (!integer_zerop (off
))
2066 offset_int boff
, coff
= mem_ref_offset (base_addr
);
2067 boff
= wi::lshift (coff
, LOG2_BITS_PER_UNIT
);
2071 base_addr
= TREE_OPERAND (base_addr
, 0);
2073 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2074 if (wi::neg_p (bit_offset
))
2076 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
2077 offset_int tem
= bit_offset
.and_not (mask
);
2078 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2079 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2081 tem
= wi::arshift (tem
, LOG2_BITS_PER_UNIT
);
2083 offset
= size_binop (PLUS_EXPR
, offset
,
2084 wide_int_to_tree (sizetype
, tem
));
2086 offset
= wide_int_to_tree (sizetype
, tem
);
2089 bitpos
+= bit_offset
.to_shwi ();
2092 if (bitpos
% BITS_PER_UNIT
)
2094 if (bitsize
% BITS_PER_UNIT
)
2097 if (!init_symbolic_number (n
, ref
))
2099 n
->base_addr
= base_addr
;
2101 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
2102 n
->alias_set
= reference_alias_ptr_type (ref
);
2103 n
->vuse
= gimple_vuse (stmt
);
2107 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2108 symbolic number N1 and N2 whose source statements are respectively
2109 SOURCE_STMT1 and SOURCE_STMT2. */
2112 perform_symbolic_merge (gimple source_stmt1
, struct symbolic_number
*n1
,
2113 gimple source_stmt2
, struct symbolic_number
*n2
,
2114 struct symbolic_number
*n
)
2119 struct symbolic_number
*n_start
;
2121 /* Sources are different, cancel bswap if they are not memory location with
2122 the same base (array, structure, ...). */
2123 if (gimple_assign_rhs1 (source_stmt1
) != gimple_assign_rhs1 (source_stmt2
))
2126 HOST_WIDE_INT start_sub
, end_sub
, end1
, end2
, end
;
2127 struct symbolic_number
*toinc_n_ptr
, *n_end
;
2129 if (!n1
->base_addr
|| !n2
->base_addr
2130 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
2133 if (!n1
->offset
!= !n2
->offset
2134 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
2137 if (n1
->bytepos
< n2
->bytepos
)
2140 start_sub
= n2
->bytepos
- n1
->bytepos
;
2141 source_stmt
= source_stmt1
;
2146 start_sub
= n1
->bytepos
- n2
->bytepos
;
2147 source_stmt
= source_stmt2
;
2150 /* Find the highest address at which a load is performed and
2151 compute related info. */
2152 end1
= n1
->bytepos
+ (n1
->range
- 1);
2153 end2
= n2
->bytepos
+ (n2
->range
- 1);
2157 end_sub
= end2
- end1
;
2162 end_sub
= end1
- end2
;
2164 n_end
= (end2
> end1
) ? n2
: n1
;
2166 /* Find symbolic number whose lsb is the most significant. */
2167 if (BYTES_BIG_ENDIAN
)
2168 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
2170 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
2172 n
->range
= end
- n_start
->bytepos
+ 1;
2174 /* Check that the range of memory covered can be represented by
2175 a symbolic number. */
2176 if (n
->range
> 64 / BITS_PER_MARKER
)
2179 /* Reinterpret byte marks in symbolic number holding the value of
2180 bigger weight according to target endianness. */
2181 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
2182 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
2183 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
2186 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
2187 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
2188 toinc_n_ptr
->n
+= inc
;
2193 n
->range
= n1
->range
;
2195 source_stmt
= source_stmt1
;
2199 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
2200 n
->alias_set
= n1
->alias_set
;
2202 n
->alias_set
= ptr_type_node
;
2203 n
->vuse
= n_start
->vuse
;
2204 n
->base_addr
= n_start
->base_addr
;
2205 n
->offset
= n_start
->offset
;
2206 n
->bytepos
= n_start
->bytepos
;
2207 n
->type
= n_start
->type
;
2208 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2210 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
2212 uint64_t masked1
, masked2
;
2214 masked1
= n1
->n
& mask
;
2215 masked2
= n2
->n
& mask
;
2216 if (masked1
&& masked2
&& masked1
!= masked2
)
2219 n
->n
= n1
->n
| n2
->n
;
2224 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2225 the operation given by the rhs of STMT on the result. If the operation
2226 could successfully be executed the function returns a gimple stmt whose
2227 rhs's first tree is the expression of the source operand and NULL
2231 find_bswap_or_nop_1 (gimple stmt
, struct symbolic_number
*n
, int limit
)
2233 enum tree_code code
;
2234 tree rhs1
, rhs2
= NULL
;
2235 gimple rhs1_stmt
, rhs2_stmt
, source_stmt1
;
2236 enum gimple_rhs_class rhs_class
;
2238 if (!limit
|| !is_gimple_assign (stmt
))
2241 rhs1
= gimple_assign_rhs1 (stmt
);
2243 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
2246 if (TREE_CODE (rhs1
) != SSA_NAME
)
2249 code
= gimple_assign_rhs_code (stmt
);
2250 rhs_class
= gimple_assign_rhs_class (stmt
);
2251 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2253 if (rhs_class
== GIMPLE_BINARY_RHS
)
2254 rhs2
= gimple_assign_rhs2 (stmt
);
2256 /* Handle unary rhs and binary rhs with integer constants as second
2259 if (rhs_class
== GIMPLE_UNARY_RHS
2260 || (rhs_class
== GIMPLE_BINARY_RHS
2261 && TREE_CODE (rhs2
) == INTEGER_CST
))
2263 if (code
!= BIT_AND_EXPR
2264 && code
!= LSHIFT_EXPR
2265 && code
!= RSHIFT_EXPR
2266 && code
!= LROTATE_EXPR
2267 && code
!= RROTATE_EXPR
2268 && !CONVERT_EXPR_CODE_P (code
))
2271 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
2273 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2274 we have to initialize the symbolic number. */
2277 if (gimple_assign_load_p (stmt
)
2278 || !init_symbolic_number (n
, rhs1
))
2280 source_stmt1
= stmt
;
2287 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2288 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
2289 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
2291 /* Only constants masking full bytes are allowed. */
2292 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
2293 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
2296 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
2305 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
2310 int i
, type_size
, old_type_size
;
2313 type
= gimple_expr_type (stmt
);
2314 type_size
= TYPE_PRECISION (type
);
2315 if (type_size
% BITS_PER_UNIT
!= 0)
2317 type_size
/= BITS_PER_UNIT
;
2318 if (type_size
> 64 / BITS_PER_MARKER
)
2321 /* Sign extension: result is dependent on the value. */
2322 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2323 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
2324 && HEAD_MARKER (n
->n
, old_type_size
))
2325 for (i
= 0; i
< type_size
- old_type_size
; i
++)
2326 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
2327 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
2329 if (type_size
< 64 / BITS_PER_MARKER
)
2331 /* If STMT casts to a smaller type mask out the bits not
2332 belonging to the target type. */
2333 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
2337 n
->range
= type_size
;
2343 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
2346 /* Handle binary rhs. */
2348 if (rhs_class
== GIMPLE_BINARY_RHS
)
2350 struct symbolic_number n1
, n2
;
2351 gimple source_stmt
, source_stmt2
;
2353 if (code
!= BIT_IOR_EXPR
)
2356 if (TREE_CODE (rhs2
) != SSA_NAME
)
2359 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2364 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
2369 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
2374 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
2377 if (!n1
.vuse
!= !n2
.vuse
2378 || (n1
.vuse
&& !operand_equal_p (n1
.vuse
, n2
.vuse
, 0)))
2382 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
2387 if (!verify_symbolic_number_p (n
, stmt
))
2399 /* Check if STMT completes a bswap implementation or a read in a given
2400 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2401 accordingly. It also sets N to represent the kind of operations
2402 performed: size of the resulting expression and whether it works on
2403 a memory source, and if so alias-set and vuse. At last, the
2404 function returns a stmt whose rhs's first tree is the source
2408 find_bswap_or_nop (gimple stmt
, struct symbolic_number
*n
, bool *bswap
)
2410 /* The number which the find_bswap_or_nop_1 result should match in order
2411 to have a full byte swap. The number is shifted to the right
2412 according to the size of the symbolic number before using it. */
2413 uint64_t cmpxchg
= CMPXCHG
;
2414 uint64_t cmpnop
= CMPNOP
;
2419 /* The last parameter determines the depth search limit. It usually
2420 correlates directly to the number n of bytes to be touched. We
2421 increase that number by log2(n) + 1 here in order to also
2422 cover signed -> unsigned conversions of the src operand as can be seen
2423 in libgcc, and for initial shift/and operation of the src operand. */
2424 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
2425 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
2426 source_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
2431 /* Find real size of result (highest non-zero byte). */
2437 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
2441 /* Zero out the extra bits of N and CMP*. */
2442 if (n
->range
< (int) sizeof (int64_t))
2446 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
2447 cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
2451 /* A complete byte swap should make the symbolic number to start with
2452 the largest digit in the highest order byte. Unchanged symbolic
2453 number indicates a read with same endianness as target architecture. */
2456 else if (n
->n
== cmpxchg
)
2461 /* Useless bit manipulation performed by code. */
2462 if (!n
->base_addr
&& n
->n
== cmpnop
)
2465 n
->range
*= BITS_PER_UNIT
;
2471 const pass_data pass_data_optimize_bswap
=
2473 GIMPLE_PASS
, /* type */
2475 OPTGROUP_NONE
, /* optinfo_flags */
2476 TV_NONE
, /* tv_id */
2477 PROP_ssa
, /* properties_required */
2478 0, /* properties_provided */
2479 0, /* properties_destroyed */
2480 0, /* todo_flags_start */
2481 0, /* todo_flags_finish */
2484 class pass_optimize_bswap
: public gimple_opt_pass
2487 pass_optimize_bswap (gcc::context
*ctxt
)
2488 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
2491 /* opt_pass methods: */
2492 virtual bool gate (function
*)
2494 return flag_expensive_optimizations
&& optimize
;
2497 virtual unsigned int execute (function
*);
2499 }; // class pass_optimize_bswap
2501 /* Perform the bswap optimization: replace the expression computed in the rhs
2502 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2503 Which of these alternatives replace the rhs is given by N->base_addr (non
2504 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2505 load to perform are also given in N while the builtin bswap invoke is given
2506 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2507 load statements involved to construct the rhs in CUR_STMT and N->range gives
2508 the size of the rhs expression for maintaining some statistics.
2510 Note that if the replacement involve a load, CUR_STMT is moved just after
2511 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2512 changing of basic block. */
2515 bswap_replace (gimple cur_stmt
, gimple src_stmt
, tree fndecl
, tree bswap_type
,
2516 tree load_type
, struct symbolic_number
*n
, bool bswap
)
2518 gimple_stmt_iterator gsi
;
2522 gsi
= gsi_for_stmt (cur_stmt
);
2523 src
= gimple_assign_rhs1 (src_stmt
);
2524 tgt
= gimple_assign_lhs (cur_stmt
);
2526 /* Need to load the value from memory first. */
2529 gimple_stmt_iterator gsi_ins
= gsi_for_stmt (src_stmt
);
2530 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
2531 tree load_offset_ptr
, aligned_load_type
;
2532 gimple addr_stmt
, load_stmt
;
2534 HOST_WIDE_INT load_offset
= 0;
2536 align
= get_object_alignment (src
);
2537 /* If the new access is smaller than the original one, we need
2538 to perform big endian adjustment. */
2539 if (BYTES_BIG_ENDIAN
)
2541 HOST_WIDE_INT bitsize
, bitpos
;
2543 int unsignedp
, volatilep
;
2546 get_inner_reference (src
, &bitsize
, &bitpos
, &offset
, &mode
,
2547 &unsignedp
, &volatilep
, false);
2548 if (n
->range
< (unsigned HOST_WIDE_INT
) bitsize
)
2550 load_offset
= (bitsize
- n
->range
) / BITS_PER_UNIT
;
2551 unsigned HOST_WIDE_INT l
2552 = (load_offset
* BITS_PER_UNIT
) & (align
- 1);
2559 && align
< GET_MODE_ALIGNMENT (TYPE_MODE (load_type
))
2560 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type
), align
))
2563 /* Move cur_stmt just before one of the load of the original
2564 to ensure it has the same VUSE. See PR61517 for what could
2566 gsi_move_before (&gsi
, &gsi_ins
);
2567 gsi
= gsi_for_stmt (cur_stmt
);
2569 /* Compute address to load from and cast according to the size
2571 addr_expr
= build_fold_addr_expr (unshare_expr (src
));
2572 if (is_gimple_mem_ref_addr (addr_expr
))
2573 addr_tmp
= addr_expr
;
2576 addr_tmp
= make_temp_ssa_name (TREE_TYPE (addr_expr
), NULL
,
2578 addr_stmt
= gimple_build_assign (addr_tmp
, addr_expr
);
2579 gsi_insert_before (&gsi
, addr_stmt
, GSI_SAME_STMT
);
2582 /* Perform the load. */
2583 aligned_load_type
= load_type
;
2584 if (align
< TYPE_ALIGN (load_type
))
2585 aligned_load_type
= build_aligned_type (load_type
, align
);
2586 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
2587 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
2593 nop_stats
.found_16bit
++;
2594 else if (n
->range
== 32)
2595 nop_stats
.found_32bit
++;
2598 gcc_assert (n
->range
== 64);
2599 nop_stats
.found_64bit
++;
2602 /* Convert the result of load if necessary. */
2603 if (!useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
2605 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
2607 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2608 gimple_set_vuse (load_stmt
, n
->vuse
);
2609 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2610 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
2614 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
2615 gimple_set_vuse (cur_stmt
, n
->vuse
);
2617 update_stmt (cur_stmt
);
2622 "%d bit load in target endianness found at: ",
2624 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2630 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
2631 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2632 gimple_set_vuse (load_stmt
, n
->vuse
);
2633 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2639 bswap_stats
.found_16bit
++;
2640 else if (n
->range
== 32)
2641 bswap_stats
.found_32bit
++;
2644 gcc_assert (n
->range
== 64);
2645 bswap_stats
.found_64bit
++;
2650 /* Convert the src expression if necessary. */
2651 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
2653 gimple convert_stmt
;
2655 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
2656 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
2657 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2660 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2661 are considered as rotation of 2N bit values by N bits is generally not
2662 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2663 gives 0x03040102 while a bswap for that value is 0x04030201. */
2664 if (bswap
&& n
->range
== 16)
2666 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
2667 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
2668 bswap_stmt
= gimple_build_assign (NULL
, src
);
2671 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
2675 /* Convert the result if necessary. */
2676 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
2678 gimple convert_stmt
;
2680 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
2681 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
2682 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2685 gimple_set_lhs (bswap_stmt
, tmp
);
2689 fprintf (dump_file
, "%d bit bswap implementation found at: ",
2691 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2694 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
2695 gsi_remove (&gsi
, true);
2699 /* Find manual byte swap implementations as well as load in a given
2700 endianness. Byte swaps are turned into a bswap builtin invokation
2701 while endian loads are converted to bswap builtin invokation or
2702 simple load according to the target endianness. */
2705 pass_optimize_bswap::execute (function
*fun
)
2708 bool bswap32_p
, bswap64_p
;
2709 bool changed
= false;
2710 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
2712 if (BITS_PER_UNIT
!= 8)
2715 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2716 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
2717 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2718 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
2719 || (bswap32_p
&& word_mode
== SImode
)));
2721 /* Determine the argument type of the builtins. The code later on
2722 assumes that the return and argument type are the same. */
2725 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2726 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2731 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2732 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2735 memset (&nop_stats
, 0, sizeof (nop_stats
));
2736 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
2738 FOR_EACH_BB_FN (bb
, fun
)
2740 gimple_stmt_iterator gsi
;
2742 /* We do a reverse scan for bswap patterns to make sure we get the
2743 widest match. As bswap pattern matching doesn't handle previously
2744 inserted smaller bswap replacements as sub-patterns, the wider
2745 variant wouldn't be detected. */
2746 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
2748 gimple src_stmt
, cur_stmt
= gsi_stmt (gsi
);
2749 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
2750 enum tree_code code
;
2751 struct symbolic_number n
;
2754 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2755 might be moved to a different basic block by bswap_replace and gsi
2756 must not points to it if that's the case. Moving the gsi_prev
2757 there make sure that gsi points to the statement previous to
2758 cur_stmt while still making sure that all statements are
2759 considered in this basic block. */
2762 if (!is_gimple_assign (cur_stmt
))
2765 code
= gimple_assign_rhs_code (cur_stmt
);
2770 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
2771 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
2781 src_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
2789 /* Already in canonical form, nothing to do. */
2790 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
2792 load_type
= bswap_type
= uint16_type_node
;
2795 load_type
= uint32_type_node
;
2798 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2799 bswap_type
= bswap32_type
;
2803 load_type
= uint64_type_node
;
2806 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2807 bswap_type
= bswap64_type
;
2814 if (bswap
&& !fndecl
&& n
.range
!= 16)
2817 if (bswap_replace (cur_stmt
, src_stmt
, fndecl
, bswap_type
, load_type
,
2823 statistics_counter_event (fun
, "16-bit nop implementations found",
2824 nop_stats
.found_16bit
);
2825 statistics_counter_event (fun
, "32-bit nop implementations found",
2826 nop_stats
.found_32bit
);
2827 statistics_counter_event (fun
, "64-bit nop implementations found",
2828 nop_stats
.found_64bit
);
2829 statistics_counter_event (fun
, "16-bit bswap implementations found",
2830 bswap_stats
.found_16bit
);
2831 statistics_counter_event (fun
, "32-bit bswap implementations found",
2832 bswap_stats
.found_32bit
);
2833 statistics_counter_event (fun
, "64-bit bswap implementations found",
2834 bswap_stats
.found_64bit
);
2836 return (changed
? TODO_update_ssa
: 0);
2842 make_pass_optimize_bswap (gcc::context
*ctxt
)
2844 return new pass_optimize_bswap (ctxt
);
2847 /* Return true if stmt is a type conversion operation that can be stripped
2848 when used in a widening multiply operation. */
2850 widening_mult_conversion_strippable_p (tree result_type
, gimple stmt
)
2852 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2854 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2859 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2862 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2864 /* If the type of OP has the same precision as the result, then
2865 we can strip this conversion. The multiply operation will be
2866 selected to create the correct extension as a by-product. */
2867 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2870 /* We can also strip a conversion if it preserves the signed-ness of
2871 the operation and doesn't narrow the range. */
2872 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2874 /* If the inner-most type is unsigned, then we can strip any
2875 intermediate widening operation. If it's signed, then the
2876 intermediate widening operation must also be signed. */
2877 if ((TYPE_UNSIGNED (inner_op_type
)
2878 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2879 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2885 return rhs_code
== FIXED_CONVERT_EXPR
;
2888 /* Return true if RHS is a suitable operand for a widening multiplication,
2889 assuming a target type of TYPE.
2890 There are two cases:
2892 - RHS makes some value at least twice as wide. Store that value
2893 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2895 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2896 but leave *TYPE_OUT untouched. */
2899 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2905 if (TREE_CODE (rhs
) == SSA_NAME
)
2907 stmt
= SSA_NAME_DEF_STMT (rhs
);
2908 if (is_gimple_assign (stmt
))
2910 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2914 rhs1
= gimple_assign_rhs1 (stmt
);
2916 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2918 *new_rhs_out
= rhs1
;
2927 type1
= TREE_TYPE (rhs1
);
2929 if (TREE_CODE (type1
) != TREE_CODE (type
)
2930 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2933 *new_rhs_out
= rhs1
;
2938 if (TREE_CODE (rhs
) == INTEGER_CST
)
2948 /* Return true if STMT performs a widening multiplication, assuming the
2949 output type is TYPE. If so, store the unwidened types of the operands
2950 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2951 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2952 and *TYPE2_OUT would give the operands of the multiplication. */
2955 is_widening_mult_p (gimple stmt
,
2956 tree
*type1_out
, tree
*rhs1_out
,
2957 tree
*type2_out
, tree
*rhs2_out
)
2959 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2961 if (TREE_CODE (type
) != INTEGER_TYPE
2962 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2965 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2969 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2973 if (*type1_out
== NULL
)
2975 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2977 *type1_out
= *type2_out
;
2980 if (*type2_out
== NULL
)
2982 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2984 *type2_out
= *type1_out
;
2987 /* Ensure that the larger of the two operands comes first. */
2988 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2990 std::swap (*type1_out
, *type2_out
);
2991 std::swap (*rhs1_out
, *rhs2_out
);
2997 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2998 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2999 value is true iff we converted the statement. */
3002 convert_mult_to_widen (gimple stmt
, gimple_stmt_iterator
*gsi
)
3004 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
3005 enum insn_code handler
;
3006 machine_mode to_mode
, from_mode
, actual_mode
;
3008 int actual_precision
;
3009 location_t loc
= gimple_location (stmt
);
3010 bool from_unsigned1
, from_unsigned2
;
3012 lhs
= gimple_assign_lhs (stmt
);
3013 type
= TREE_TYPE (lhs
);
3014 if (TREE_CODE (type
) != INTEGER_TYPE
)
3017 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
3020 to_mode
= TYPE_MODE (type
);
3021 from_mode
= TYPE_MODE (type1
);
3022 from_unsigned1
= TYPE_UNSIGNED (type1
);
3023 from_unsigned2
= TYPE_UNSIGNED (type2
);
3025 if (from_unsigned1
&& from_unsigned2
)
3026 op
= umul_widen_optab
;
3027 else if (!from_unsigned1
&& !from_unsigned2
)
3028 op
= smul_widen_optab
;
3030 op
= usmul_widen_optab
;
3032 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
3035 if (handler
== CODE_FOR_nothing
)
3037 if (op
!= smul_widen_optab
)
3039 /* We can use a signed multiply with unsigned types as long as
3040 there is a wider mode to use, or it is the smaller of the two
3041 types that is unsigned. Note that type1 >= type2, always. */
3042 if ((TYPE_UNSIGNED (type1
)
3043 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3044 || (TYPE_UNSIGNED (type2
)
3045 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3047 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3048 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
3052 op
= smul_widen_optab
;
3053 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
3057 if (handler
== CODE_FOR_nothing
)
3060 from_unsigned1
= from_unsigned2
= false;
3066 /* Ensure that the inputs to the handler are in the correct precison
3067 for the opcode. This will be the full mode size. */
3068 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3069 if (2 * actual_precision
> TYPE_PRECISION (type
))
3071 if (actual_precision
!= TYPE_PRECISION (type1
)
3072 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3073 rhs1
= build_and_insert_cast (gsi
, loc
,
3074 build_nonstandard_integer_type
3075 (actual_precision
, from_unsigned1
), rhs1
);
3076 if (actual_precision
!= TYPE_PRECISION (type2
)
3077 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3078 rhs2
= build_and_insert_cast (gsi
, loc
,
3079 build_nonstandard_integer_type
3080 (actual_precision
, from_unsigned2
), rhs2
);
3082 /* Handle constants. */
3083 if (TREE_CODE (rhs1
) == INTEGER_CST
)
3084 rhs1
= fold_convert (type1
, rhs1
);
3085 if (TREE_CODE (rhs2
) == INTEGER_CST
)
3086 rhs2
= fold_convert (type2
, rhs2
);
3088 gimple_assign_set_rhs1 (stmt
, rhs1
);
3089 gimple_assign_set_rhs2 (stmt
, rhs2
);
3090 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
3092 widen_mul_stats
.widen_mults_inserted
++;
3096 /* Process a single gimple statement STMT, which is found at the
3097 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3098 rhs (given by CODE), and try to convert it into a
3099 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3100 is true iff we converted the statement. */
3103 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple stmt
,
3104 enum tree_code code
)
3106 gimple rhs1_stmt
= NULL
, rhs2_stmt
= NULL
;
3107 gimple conv1_stmt
= NULL
, conv2_stmt
= NULL
, conv_stmt
;
3108 tree type
, type1
, type2
, optype
;
3109 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
3110 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
3112 enum tree_code wmult_code
;
3113 enum insn_code handler
;
3114 machine_mode to_mode
, from_mode
, actual_mode
;
3115 location_t loc
= gimple_location (stmt
);
3116 int actual_precision
;
3117 bool from_unsigned1
, from_unsigned2
;
3119 lhs
= gimple_assign_lhs (stmt
);
3120 type
= TREE_TYPE (lhs
);
3121 if (TREE_CODE (type
) != INTEGER_TYPE
3122 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
3125 if (code
== MINUS_EXPR
)
3126 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
3128 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
3130 rhs1
= gimple_assign_rhs1 (stmt
);
3131 rhs2
= gimple_assign_rhs2 (stmt
);
3133 if (TREE_CODE (rhs1
) == SSA_NAME
)
3135 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3136 if (is_gimple_assign (rhs1_stmt
))
3137 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3140 if (TREE_CODE (rhs2
) == SSA_NAME
)
3142 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3143 if (is_gimple_assign (rhs2_stmt
))
3144 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3147 /* Allow for one conversion statement between the multiply
3148 and addition/subtraction statement. If there are more than
3149 one conversions then we assume they would invalidate this
3150 transformation. If that's not the case then they should have
3151 been folded before now. */
3152 if (CONVERT_EXPR_CODE_P (rhs1_code
))
3154 conv1_stmt
= rhs1_stmt
;
3155 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
3156 if (TREE_CODE (rhs1
) == SSA_NAME
)
3158 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3159 if (is_gimple_assign (rhs1_stmt
))
3160 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3165 if (CONVERT_EXPR_CODE_P (rhs2_code
))
3167 conv2_stmt
= rhs2_stmt
;
3168 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
3169 if (TREE_CODE (rhs2
) == SSA_NAME
)
3171 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3172 if (is_gimple_assign (rhs2_stmt
))
3173 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3179 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3180 is_widening_mult_p, but we still need the rhs returns.
3182 It might also appear that it would be sufficient to use the existing
3183 operands of the widening multiply, but that would limit the choice of
3184 multiply-and-accumulate instructions.
3186 If the widened-multiplication result has more than one uses, it is
3187 probably wiser not to do the conversion. */
3188 if (code
== PLUS_EXPR
3189 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
3191 if (!has_single_use (rhs1
)
3192 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
3193 &type2
, &mult_rhs2
))
3196 conv_stmt
= conv1_stmt
;
3198 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
3200 if (!has_single_use (rhs2
)
3201 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
3202 &type2
, &mult_rhs2
))
3205 conv_stmt
= conv2_stmt
;
3210 to_mode
= TYPE_MODE (type
);
3211 from_mode
= TYPE_MODE (type1
);
3212 from_unsigned1
= TYPE_UNSIGNED (type1
);
3213 from_unsigned2
= TYPE_UNSIGNED (type2
);
3216 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3217 if (from_unsigned1
!= from_unsigned2
)
3219 if (!INTEGRAL_TYPE_P (type
))
3221 /* We can use a signed multiply with unsigned types as long as
3222 there is a wider mode to use, or it is the smaller of the two
3223 types that is unsigned. Note that type1 >= type2, always. */
3225 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3227 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3229 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3230 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
3234 from_unsigned1
= from_unsigned2
= false;
3235 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
3239 /* If there was a conversion between the multiply and addition
3240 then we need to make sure it fits a multiply-and-accumulate.
3241 The should be a single mode change which does not change the
3245 /* We use the original, unmodified data types for this. */
3246 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
3247 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
3248 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
3249 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
3251 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
3253 /* Conversion is a truncate. */
3254 if (TYPE_PRECISION (to_type
) < data_size
)
3257 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
3259 /* Conversion is an extend. Check it's the right sort. */
3260 if (TYPE_UNSIGNED (from_type
) != is_unsigned
3261 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
3264 /* else convert is a no-op for our purposes. */
3267 /* Verify that the machine can perform a widening multiply
3268 accumulate in this mode/signedness combination, otherwise
3269 this transformation is likely to pessimize code. */
3270 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
3271 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
3272 from_mode
, 0, &actual_mode
);
3274 if (handler
== CODE_FOR_nothing
)
3277 /* Ensure that the inputs to the handler are in the correct precison
3278 for the opcode. This will be the full mode size. */
3279 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3280 if (actual_precision
!= TYPE_PRECISION (type1
)
3281 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3282 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
3283 build_nonstandard_integer_type
3284 (actual_precision
, from_unsigned1
),
3286 if (actual_precision
!= TYPE_PRECISION (type2
)
3287 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3288 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
3289 build_nonstandard_integer_type
3290 (actual_precision
, from_unsigned2
),
3293 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
3294 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
3296 /* Handle constants. */
3297 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
3298 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
3299 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
3300 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
3302 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
3304 update_stmt (gsi_stmt (*gsi
));
3305 widen_mul_stats
.maccs_inserted
++;
3309 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3310 with uses in additions and subtractions to form fused multiply-add
3311 operations. Returns true if successful and MUL_STMT should be removed. */
3314 convert_mult_to_fma (gimple mul_stmt
, tree op1
, tree op2
)
3316 tree mul_result
= gimple_get_lhs (mul_stmt
);
3317 tree type
= TREE_TYPE (mul_result
);
3318 gimple use_stmt
, neguse_stmt
;
3320 use_operand_p use_p
;
3321 imm_use_iterator imm_iter
;
3323 if (FLOAT_TYPE_P (type
)
3324 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
3327 /* We don't want to do bitfield reduction ops. */
3328 if (INTEGRAL_TYPE_P (type
)
3329 && (TYPE_PRECISION (type
)
3330 != GET_MODE_PRECISION (TYPE_MODE (type
))))
3333 /* If the target doesn't support it, don't generate it. We assume that
3334 if fma isn't available then fms, fnma or fnms are not either. */
3335 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
3338 /* If the multiplication has zero uses, it is kept around probably because
3339 of -fnon-call-exceptions. Don't optimize it away in that case,
3341 if (has_zero_uses (mul_result
))
3344 /* Make sure that the multiplication statement becomes dead after
3345 the transformation, thus that all uses are transformed to FMAs.
3346 This means we assume that an FMA operation has the same cost
3348 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3350 enum tree_code use_code
;
3351 tree result
= mul_result
;
3352 bool negate_p
= false;
3354 use_stmt
= USE_STMT (use_p
);
3356 if (is_gimple_debug (use_stmt
))
3359 /* For now restrict this operations to single basic blocks. In theory
3360 we would want to support sinking the multiplication in
3366 to form a fma in the then block and sink the multiplication to the
3368 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3371 if (!is_gimple_assign (use_stmt
))
3374 use_code
= gimple_assign_rhs_code (use_stmt
);
3376 /* A negate on the multiplication leads to FNMA. */
3377 if (use_code
== NEGATE_EXPR
)
3382 result
= gimple_assign_lhs (use_stmt
);
3384 /* Make sure the negate statement becomes dead with this
3385 single transformation. */
3386 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3387 &use_p
, &neguse_stmt
))
3390 /* Make sure the multiplication isn't also used on that stmt. */
3391 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3392 if (USE_FROM_PTR (usep
) == mul_result
)
3396 use_stmt
= neguse_stmt
;
3397 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3399 if (!is_gimple_assign (use_stmt
))
3402 use_code
= gimple_assign_rhs_code (use_stmt
);
3409 if (gimple_assign_rhs2 (use_stmt
) == result
)
3410 negate_p
= !negate_p
;
3415 /* FMA can only be formed from PLUS and MINUS. */
3419 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3420 by a MULT_EXPR that we'll visit later, we might be able to
3421 get a more profitable match with fnma.
3422 OTOH, if we don't, a negate / fma pair has likely lower latency
3423 that a mult / subtract pair. */
3424 if (use_code
== MINUS_EXPR
&& !negate_p
3425 && gimple_assign_rhs1 (use_stmt
) == result
3426 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
3427 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
3429 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
3431 if (TREE_CODE (rhs2
) == SSA_NAME
)
3433 gimple stmt2
= SSA_NAME_DEF_STMT (rhs2
);
3434 if (has_single_use (rhs2
)
3435 && is_gimple_assign (stmt2
)
3436 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3441 /* We can't handle a * b + a * b. */
3442 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
3445 /* While it is possible to validate whether or not the exact form
3446 that we've recognized is available in the backend, the assumption
3447 is that the transformation is never a loss. For instance, suppose
3448 the target only has the plain FMA pattern available. Consider
3449 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3450 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3451 still have 3 operations, but in the FMA form the two NEGs are
3452 independent and could be run in parallel. */
3455 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
3457 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3458 enum tree_code use_code
;
3459 tree addop
, mulop1
= op1
, result
= mul_result
;
3460 bool negate_p
= false;
3462 if (is_gimple_debug (use_stmt
))
3465 use_code
= gimple_assign_rhs_code (use_stmt
);
3466 if (use_code
== NEGATE_EXPR
)
3468 result
= gimple_assign_lhs (use_stmt
);
3469 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3470 gsi_remove (&gsi
, true);
3471 release_defs (use_stmt
);
3473 use_stmt
= neguse_stmt
;
3474 gsi
= gsi_for_stmt (use_stmt
);
3475 use_code
= gimple_assign_rhs_code (use_stmt
);
3479 if (gimple_assign_rhs1 (use_stmt
) == result
)
3481 addop
= gimple_assign_rhs2 (use_stmt
);
3482 /* a * b - c -> a * b + (-c) */
3483 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3484 addop
= force_gimple_operand_gsi (&gsi
,
3485 build1 (NEGATE_EXPR
,
3487 true, NULL_TREE
, true,
3492 addop
= gimple_assign_rhs1 (use_stmt
);
3493 /* a - b * c -> (-b) * c + a */
3494 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3495 negate_p
= !negate_p
;
3499 mulop1
= force_gimple_operand_gsi (&gsi
,
3500 build1 (NEGATE_EXPR
,
3502 true, NULL_TREE
, true,
3505 fma_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
3506 FMA_EXPR
, mulop1
, op2
, addop
);
3507 gsi_replace (&gsi
, fma_stmt
, true);
3508 widen_mul_stats
.fmas_inserted
++;
3514 /* Find integer multiplications where the operands are extended from
3515 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3516 where appropriate. */
3520 const pass_data pass_data_optimize_widening_mul
=
3522 GIMPLE_PASS
, /* type */
3523 "widening_mul", /* name */
3524 OPTGROUP_NONE
, /* optinfo_flags */
3525 TV_NONE
, /* tv_id */
3526 PROP_ssa
, /* properties_required */
3527 0, /* properties_provided */
3528 0, /* properties_destroyed */
3529 0, /* todo_flags_start */
3530 TODO_update_ssa
, /* todo_flags_finish */
3533 class pass_optimize_widening_mul
: public gimple_opt_pass
3536 pass_optimize_widening_mul (gcc::context
*ctxt
)
3537 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
3540 /* opt_pass methods: */
3541 virtual bool gate (function
*)
3543 return flag_expensive_optimizations
&& optimize
;
3546 virtual unsigned int execute (function
*);
3548 }; // class pass_optimize_widening_mul
3551 pass_optimize_widening_mul::execute (function
*fun
)
3554 bool cfg_changed
= false;
3556 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
3558 FOR_EACH_BB_FN (bb
, fun
)
3560 gimple_stmt_iterator gsi
;
3562 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
3564 gimple stmt
= gsi_stmt (gsi
);
3565 enum tree_code code
;
3567 if (is_gimple_assign (stmt
))
3569 code
= gimple_assign_rhs_code (stmt
);
3573 if (!convert_mult_to_widen (stmt
, &gsi
)
3574 && convert_mult_to_fma (stmt
,
3575 gimple_assign_rhs1 (stmt
),
3576 gimple_assign_rhs2 (stmt
)))
3578 gsi_remove (&gsi
, true);
3579 release_defs (stmt
);
3586 convert_plusminus_to_widen (&gsi
, stmt
, code
);
3592 else if (is_gimple_call (stmt
)
3593 && gimple_call_lhs (stmt
))
3595 tree fndecl
= gimple_call_fndecl (stmt
);
3597 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3599 switch (DECL_FUNCTION_CODE (fndecl
))
3604 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
3605 && REAL_VALUES_EQUAL
3606 (TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
3608 && convert_mult_to_fma (stmt
,
3609 gimple_call_arg (stmt
, 0),
3610 gimple_call_arg (stmt
, 0)))
3612 unlink_stmt_vdef (stmt
);
3613 if (gsi_remove (&gsi
, true)
3614 && gimple_purge_dead_eh_edges (bb
))
3616 release_defs (stmt
);
3629 statistics_counter_event (fun
, "widening multiplications inserted",
3630 widen_mul_stats
.widen_mults_inserted
);
3631 statistics_counter_event (fun
, "widening maccs inserted",
3632 widen_mul_stats
.maccs_inserted
);
3633 statistics_counter_event (fun
, "fused multiply-adds inserted",
3634 widen_mul_stats
.fmas_inserted
);
3636 return cfg_changed
? TODO_cleanup_cfg
: 0;
3642 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
3644 return new pass_optimize_widening_mul (ctxt
);