gcc/
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blob705793b675bde637ea52bd6e7e13142e49be544b
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "tm.h"
91 #include "flags.h"
92 #include "tree.h"
93 #include "basic-block.h"
94 #include "tree-ssa-alias.h"
95 #include "internal-fn.h"
96 #include "gimple-fold.h"
97 #include "gimple-expr.h"
98 #include "is-a.h"
99 #include "gimple.h"
100 #include "gimple-iterator.h"
101 #include "gimplify.h"
102 #include "gimplify-me.h"
103 #include "stor-layout.h"
104 #include "gimple-ssa.h"
105 #include "tree-cfg.h"
106 #include "tree-phinodes.h"
107 #include "ssa-iterators.h"
108 #include "stringpool.h"
109 #include "tree-ssanames.h"
110 #include "expr.h"
111 #include "tree-dfa.h"
112 #include "tree-ssa.h"
113 #include "tree-pass.h"
114 #include "alloc-pool.h"
115 #include "target.h"
116 #include "gimple-pretty-print.h"
117 #include "builtins.h"
119 /* FIXME: RTL headers have to be included here for optabs. */
120 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
121 #include "expr.h" /* Because optabs.h wants sepops. */
122 #include "optabs.h"
124 /* This structure represents one basic block that either computes a
125 division, or is a common dominator for basic block that compute a
126 division. */
127 struct occurrence {
128 /* The basic block represented by this structure. */
129 basic_block bb;
131 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
132 inserted in BB. */
133 tree recip_def;
135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
136 was inserted in BB. */
137 gimple recip_def_stmt;
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 by BB. */
141 struct occurrence *children;
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence *next;
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
149 compute_merit. */
150 int num_divisions;
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division;
158 static struct
160 /* Number of 1.0/X ops inserted. */
161 int rdivs_inserted;
163 /* Number of 1.0/FUNC ops inserted. */
164 int rfuncs_inserted;
165 } reciprocal_stats;
167 static struct
169 /* Number of cexpi calls inserted. */
170 int inserted;
171 } sincos_stats;
173 static struct
175 /* Number of hand-written 16-bit nop / bswaps found. */
176 int found_16bit;
178 /* Number of hand-written 32-bit nop / bswaps found. */
179 int found_32bit;
181 /* Number of hand-written 64-bit nop / bswaps found. */
182 int found_64bit;
183 } nop_stats, bswap_stats;
185 static struct
187 /* Number of widening multiplication ops inserted. */
188 int widen_mults_inserted;
190 /* Number of integer multiply-and-accumulate ops inserted. */
191 int maccs_inserted;
193 /* Number of fp fused multiply-add ops inserted. */
194 int fmas_inserted;
195 } widen_mul_stats;
197 /* The instance of "struct occurrence" representing the highest
198 interesting block in the dominator tree. */
199 static struct occurrence *occ_head;
201 /* Allocation pool for getting instances of "struct occurrence". */
202 static alloc_pool occ_pool;
206 /* Allocate and return a new struct occurrence for basic block BB, and
207 whose children list is headed by CHILDREN. */
208 static struct occurrence *
209 occ_new (basic_block bb, struct occurrence *children)
211 struct occurrence *occ;
213 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
214 memset (occ, 0, sizeof (struct occurrence));
216 occ->bb = bb;
217 occ->children = children;
218 return occ;
222 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
223 list of "struct occurrence"s, one per basic block, having IDOM as
224 their common dominator.
226 We try to insert NEW_OCC as deep as possible in the tree, and we also
227 insert any other block that is a common dominator for BB and one
228 block already in the tree. */
230 static void
231 insert_bb (struct occurrence *new_occ, basic_block idom,
232 struct occurrence **p_head)
234 struct occurrence *occ, **p_occ;
236 for (p_occ = p_head; (occ = *p_occ) != NULL; )
238 basic_block bb = new_occ->bb, occ_bb = occ->bb;
239 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
240 if (dom == bb)
242 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
243 from its list. */
244 *p_occ = occ->next;
245 occ->next = new_occ->children;
246 new_occ->children = occ;
248 /* Try the next block (it may as well be dominated by BB). */
251 else if (dom == occ_bb)
253 /* OCC_BB dominates BB. Tail recurse to look deeper. */
254 insert_bb (new_occ, dom, &occ->children);
255 return;
258 else if (dom != idom)
260 gcc_assert (!dom->aux);
262 /* There is a dominator between IDOM and BB, add it and make
263 two children out of NEW_OCC and OCC. First, remove OCC from
264 its list. */
265 *p_occ = occ->next;
266 new_occ->next = occ;
267 occ->next = NULL;
269 /* None of the previous blocks has DOM as a dominator: if we tail
270 recursed, we would reexamine them uselessly. Just switch BB with
271 DOM, and go on looking for blocks dominated by DOM. */
272 new_occ = occ_new (dom, new_occ);
275 else
277 /* Nothing special, go on with the next element. */
278 p_occ = &occ->next;
282 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
283 new_occ->next = *p_head;
284 *p_head = new_occ;
287 /* Register that we found a division in BB. */
289 static inline void
290 register_division_in (basic_block bb)
292 struct occurrence *occ;
294 occ = (struct occurrence *) bb->aux;
295 if (!occ)
297 occ = occ_new (bb, NULL);
298 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
301 occ->bb_has_division = true;
302 occ->num_divisions++;
306 /* Compute the number of divisions that postdominate each block in OCC and
307 its children. */
309 static void
310 compute_merit (struct occurrence *occ)
312 struct occurrence *occ_child;
313 basic_block dom = occ->bb;
315 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
317 basic_block bb;
318 if (occ_child->children)
319 compute_merit (occ_child);
321 if (flag_exceptions)
322 bb = single_noncomplex_succ (dom);
323 else
324 bb = dom;
326 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
327 occ->num_divisions += occ_child->num_divisions;
332 /* Return whether USE_STMT is a floating-point division by DEF. */
333 static inline bool
334 is_division_by (gimple use_stmt, tree def)
336 return is_gimple_assign (use_stmt)
337 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
338 && gimple_assign_rhs2 (use_stmt) == def
339 /* Do not recognize x / x as valid division, as we are getting
340 confused later by replacing all immediate uses x in such
341 a stmt. */
342 && gimple_assign_rhs1 (use_stmt) != def;
345 /* Walk the subset of the dominator tree rooted at OCC, setting the
346 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
347 the given basic block. The field may be left NULL, of course,
348 if it is not possible or profitable to do the optimization.
350 DEF_BSI is an iterator pointing at the statement defining DEF.
351 If RECIP_DEF is set, a dominator already has a computation that can
352 be used. */
354 static void
355 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
356 tree def, tree recip_def, int threshold)
358 tree type;
359 gimple new_stmt;
360 gimple_stmt_iterator gsi;
361 struct occurrence *occ_child;
363 if (!recip_def
364 && (occ->bb_has_division || !flag_trapping_math)
365 && occ->num_divisions >= threshold)
367 /* Make a variable with the replacement and substitute it. */
368 type = TREE_TYPE (def);
369 recip_def = create_tmp_reg (type, "reciptmp");
370 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
371 build_one_cst (type), def);
373 if (occ->bb_has_division)
375 /* Case 1: insert before an existing division. */
376 gsi = gsi_after_labels (occ->bb);
377 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
378 gsi_next (&gsi);
380 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
382 else if (def_gsi && occ->bb == def_gsi->bb)
384 /* Case 2: insert right after the definition. Note that this will
385 never happen if the definition statement can throw, because in
386 that case the sole successor of the statement's basic block will
387 dominate all the uses as well. */
388 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
390 else
392 /* Case 3: insert in a basic block not containing defs/uses. */
393 gsi = gsi_after_labels (occ->bb);
394 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
397 reciprocal_stats.rdivs_inserted++;
399 occ->recip_def_stmt = new_stmt;
402 occ->recip_def = recip_def;
403 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
404 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
408 /* Replace the division at USE_P with a multiplication by the reciprocal, if
409 possible. */
411 static inline void
412 replace_reciprocal (use_operand_p use_p)
414 gimple use_stmt = USE_STMT (use_p);
415 basic_block bb = gimple_bb (use_stmt);
416 struct occurrence *occ = (struct occurrence *) bb->aux;
418 if (optimize_bb_for_speed_p (bb)
419 && occ->recip_def && use_stmt != occ->recip_def_stmt)
421 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
422 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
423 SET_USE (use_p, occ->recip_def);
424 fold_stmt_inplace (&gsi);
425 update_stmt (use_stmt);
430 /* Free OCC and return one more "struct occurrence" to be freed. */
432 static struct occurrence *
433 free_bb (struct occurrence *occ)
435 struct occurrence *child, *next;
437 /* First get the two pointers hanging off OCC. */
438 next = occ->next;
439 child = occ->children;
440 occ->bb->aux = NULL;
441 pool_free (occ_pool, occ);
443 /* Now ensure that we don't recurse unless it is necessary. */
444 if (!child)
445 return next;
446 else
448 while (next)
449 next = free_bb (next);
451 return child;
456 /* Look for floating-point divisions among DEF's uses, and try to
457 replace them by multiplications with the reciprocal. Add
458 as many statements computing the reciprocal as needed.
460 DEF must be a GIMPLE register of a floating-point type. */
462 static void
463 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
465 use_operand_p use_p;
466 imm_use_iterator use_iter;
467 struct occurrence *occ;
468 int count = 0, threshold;
470 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
472 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
474 gimple use_stmt = USE_STMT (use_p);
475 if (is_division_by (use_stmt, def))
477 register_division_in (gimple_bb (use_stmt));
478 count++;
482 /* Do the expensive part only if we can hope to optimize something. */
483 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
484 if (count >= threshold)
486 gimple use_stmt;
487 for (occ = occ_head; occ; occ = occ->next)
489 compute_merit (occ);
490 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
493 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
495 if (is_division_by (use_stmt, def))
497 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
498 replace_reciprocal (use_p);
503 for (occ = occ_head; occ; )
504 occ = free_bb (occ);
506 occ_head = NULL;
509 /* Go through all the floating-point SSA_NAMEs, and call
510 execute_cse_reciprocals_1 on each of them. */
511 namespace {
513 const pass_data pass_data_cse_reciprocals =
515 GIMPLE_PASS, /* type */
516 "recip", /* name */
517 OPTGROUP_NONE, /* optinfo_flags */
518 TV_NONE, /* tv_id */
519 PROP_ssa, /* properties_required */
520 0, /* properties_provided */
521 0, /* properties_destroyed */
522 0, /* todo_flags_start */
523 TODO_update_ssa, /* todo_flags_finish */
526 class pass_cse_reciprocals : public gimple_opt_pass
528 public:
529 pass_cse_reciprocals (gcc::context *ctxt)
530 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
533 /* opt_pass methods: */
534 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
535 virtual unsigned int execute (function *);
537 }; // class pass_cse_reciprocals
539 unsigned int
540 pass_cse_reciprocals::execute (function *fun)
542 basic_block bb;
543 tree arg;
545 occ_pool = create_alloc_pool ("dominators for recip",
546 sizeof (struct occurrence),
547 n_basic_blocks_for_fn (fun) / 3 + 1);
549 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
550 calculate_dominance_info (CDI_DOMINATORS);
551 calculate_dominance_info (CDI_POST_DOMINATORS);
553 #ifdef ENABLE_CHECKING
554 FOR_EACH_BB_FN (bb, fun)
555 gcc_assert (!bb->aux);
556 #endif
558 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
559 if (FLOAT_TYPE_P (TREE_TYPE (arg))
560 && is_gimple_reg (arg))
562 tree name = ssa_default_def (fun, arg);
563 if (name)
564 execute_cse_reciprocals_1 (NULL, name);
567 FOR_EACH_BB_FN (bb, fun)
569 gimple_stmt_iterator gsi;
570 gimple phi;
571 tree def;
573 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
575 phi = gsi_stmt (gsi);
576 def = PHI_RESULT (phi);
577 if (! virtual_operand_p (def)
578 && FLOAT_TYPE_P (TREE_TYPE (def)))
579 execute_cse_reciprocals_1 (NULL, def);
582 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
584 gimple stmt = gsi_stmt (gsi);
586 if (gimple_has_lhs (stmt)
587 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
588 && FLOAT_TYPE_P (TREE_TYPE (def))
589 && TREE_CODE (def) == SSA_NAME)
590 execute_cse_reciprocals_1 (&gsi, def);
593 if (optimize_bb_for_size_p (bb))
594 continue;
596 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
597 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
599 gimple stmt = gsi_stmt (gsi);
600 tree fndecl;
602 if (is_gimple_assign (stmt)
603 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
605 tree arg1 = gimple_assign_rhs2 (stmt);
606 gimple stmt1;
608 if (TREE_CODE (arg1) != SSA_NAME)
609 continue;
611 stmt1 = SSA_NAME_DEF_STMT (arg1);
613 if (is_gimple_call (stmt1)
614 && gimple_call_lhs (stmt1)
615 && (fndecl = gimple_call_fndecl (stmt1))
616 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
617 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
619 enum built_in_function code;
620 bool md_code, fail;
621 imm_use_iterator ui;
622 use_operand_p use_p;
624 code = DECL_FUNCTION_CODE (fndecl);
625 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
627 fndecl = targetm.builtin_reciprocal (code, md_code, false);
628 if (!fndecl)
629 continue;
631 /* Check that all uses of the SSA name are divisions,
632 otherwise replacing the defining statement will do
633 the wrong thing. */
634 fail = false;
635 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
637 gimple stmt2 = USE_STMT (use_p);
638 if (is_gimple_debug (stmt2))
639 continue;
640 if (!is_gimple_assign (stmt2)
641 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
642 || gimple_assign_rhs1 (stmt2) == arg1
643 || gimple_assign_rhs2 (stmt2) != arg1)
645 fail = true;
646 break;
649 if (fail)
650 continue;
652 gimple_replace_ssa_lhs (stmt1, arg1);
653 gimple_call_set_fndecl (stmt1, fndecl);
654 update_stmt (stmt1);
655 reciprocal_stats.rfuncs_inserted++;
657 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
659 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
660 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
661 fold_stmt_inplace (&gsi);
662 update_stmt (stmt);
669 statistics_counter_event (fun, "reciprocal divs inserted",
670 reciprocal_stats.rdivs_inserted);
671 statistics_counter_event (fun, "reciprocal functions inserted",
672 reciprocal_stats.rfuncs_inserted);
674 free_dominance_info (CDI_DOMINATORS);
675 free_dominance_info (CDI_POST_DOMINATORS);
676 free_alloc_pool (occ_pool);
677 return 0;
680 } // anon namespace
682 gimple_opt_pass *
683 make_pass_cse_reciprocals (gcc::context *ctxt)
685 return new pass_cse_reciprocals (ctxt);
688 /* Records an occurrence at statement USE_STMT in the vector of trees
689 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
690 is not yet initialized. Returns true if the occurrence was pushed on
691 the vector. Adjusts *TOP_BB to be the basic block dominating all
692 statements in the vector. */
694 static bool
695 maybe_record_sincos (vec<gimple> *stmts,
696 basic_block *top_bb, gimple use_stmt)
698 basic_block use_bb = gimple_bb (use_stmt);
699 if (*top_bb
700 && (*top_bb == use_bb
701 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
702 stmts->safe_push (use_stmt);
703 else if (!*top_bb
704 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
706 stmts->safe_push (use_stmt);
707 *top_bb = use_bb;
709 else
710 return false;
712 return true;
715 /* Look for sin, cos and cexpi calls with the same argument NAME and
716 create a single call to cexpi CSEing the result in this case.
717 We first walk over all immediate uses of the argument collecting
718 statements that we can CSE in a vector and in a second pass replace
719 the statement rhs with a REALPART or IMAGPART expression on the
720 result of the cexpi call we insert before the use statement that
721 dominates all other candidates. */
723 static bool
724 execute_cse_sincos_1 (tree name)
726 gimple_stmt_iterator gsi;
727 imm_use_iterator use_iter;
728 tree fndecl, res, type;
729 gimple def_stmt, use_stmt, stmt;
730 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
731 vec<gimple> stmts = vNULL;
732 basic_block top_bb = NULL;
733 int i;
734 bool cfg_changed = false;
736 type = TREE_TYPE (name);
737 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
739 if (gimple_code (use_stmt) != GIMPLE_CALL
740 || !gimple_call_lhs (use_stmt)
741 || !(fndecl = gimple_call_fndecl (use_stmt))
742 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
743 continue;
745 switch (DECL_FUNCTION_CODE (fndecl))
747 CASE_FLT_FN (BUILT_IN_COS):
748 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
749 break;
751 CASE_FLT_FN (BUILT_IN_SIN):
752 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
753 break;
755 CASE_FLT_FN (BUILT_IN_CEXPI):
756 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
757 break;
759 default:;
763 if (seen_cos + seen_sin + seen_cexpi <= 1)
765 stmts.release ();
766 return false;
769 /* Simply insert cexpi at the beginning of top_bb but not earlier than
770 the name def statement. */
771 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
772 if (!fndecl)
773 return false;
774 stmt = gimple_build_call (fndecl, 1, name);
775 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
776 gimple_call_set_lhs (stmt, res);
778 def_stmt = SSA_NAME_DEF_STMT (name);
779 if (!SSA_NAME_IS_DEFAULT_DEF (name)
780 && gimple_code (def_stmt) != GIMPLE_PHI
781 && gimple_bb (def_stmt) == top_bb)
783 gsi = gsi_for_stmt (def_stmt);
784 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
786 else
788 gsi = gsi_after_labels (top_bb);
789 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
791 sincos_stats.inserted++;
793 /* And adjust the recorded old call sites. */
794 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
796 tree rhs = NULL;
797 fndecl = gimple_call_fndecl (use_stmt);
799 switch (DECL_FUNCTION_CODE (fndecl))
801 CASE_FLT_FN (BUILT_IN_COS):
802 rhs = fold_build1 (REALPART_EXPR, type, res);
803 break;
805 CASE_FLT_FN (BUILT_IN_SIN):
806 rhs = fold_build1 (IMAGPART_EXPR, type, res);
807 break;
809 CASE_FLT_FN (BUILT_IN_CEXPI):
810 rhs = res;
811 break;
813 default:;
814 gcc_unreachable ();
817 /* Replace call with a copy. */
818 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
820 gsi = gsi_for_stmt (use_stmt);
821 gsi_replace (&gsi, stmt, true);
822 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
823 cfg_changed = true;
826 stmts.release ();
828 return cfg_changed;
831 /* To evaluate powi(x,n), the floating point value x raised to the
832 constant integer exponent n, we use a hybrid algorithm that
833 combines the "window method" with look-up tables. For an
834 introduction to exponentiation algorithms and "addition chains",
835 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
836 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
837 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
838 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
840 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
841 multiplications to inline before calling the system library's pow
842 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
843 so this default never requires calling pow, powf or powl. */
845 #ifndef POWI_MAX_MULTS
846 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
847 #endif
849 /* The size of the "optimal power tree" lookup table. All
850 exponents less than this value are simply looked up in the
851 powi_table below. This threshold is also used to size the
852 cache of pseudo registers that hold intermediate results. */
853 #define POWI_TABLE_SIZE 256
855 /* The size, in bits of the window, used in the "window method"
856 exponentiation algorithm. This is equivalent to a radix of
857 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
858 #define POWI_WINDOW_SIZE 3
860 /* The following table is an efficient representation of an
861 "optimal power tree". For each value, i, the corresponding
862 value, j, in the table states than an optimal evaluation
863 sequence for calculating pow(x,i) can be found by evaluating
864 pow(x,j)*pow(x,i-j). An optimal power tree for the first
865 100 integers is given in Knuth's "Seminumerical algorithms". */
867 static const unsigned char powi_table[POWI_TABLE_SIZE] =
869 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
870 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
871 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
872 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
873 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
874 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
875 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
876 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
877 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
878 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
879 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
880 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
881 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
882 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
883 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
884 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
885 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
886 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
887 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
888 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
889 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
890 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
891 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
892 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
893 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
894 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
895 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
896 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
897 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
898 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
899 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
900 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
904 /* Return the number of multiplications required to calculate
905 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
906 subroutine of powi_cost. CACHE is an array indicating
907 which exponents have already been calculated. */
909 static int
910 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
912 /* If we've already calculated this exponent, then this evaluation
913 doesn't require any additional multiplications. */
914 if (cache[n])
915 return 0;
917 cache[n] = true;
918 return powi_lookup_cost (n - powi_table[n], cache)
919 + powi_lookup_cost (powi_table[n], cache) + 1;
922 /* Return the number of multiplications required to calculate
923 powi(x,n) for an arbitrary x, given the exponent N. This
924 function needs to be kept in sync with powi_as_mults below. */
926 static int
927 powi_cost (HOST_WIDE_INT n)
929 bool cache[POWI_TABLE_SIZE];
930 unsigned HOST_WIDE_INT digit;
931 unsigned HOST_WIDE_INT val;
932 int result;
934 if (n == 0)
935 return 0;
937 /* Ignore the reciprocal when calculating the cost. */
938 val = (n < 0) ? -n : n;
940 /* Initialize the exponent cache. */
941 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
942 cache[1] = true;
944 result = 0;
946 while (val >= POWI_TABLE_SIZE)
948 if (val & 1)
950 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
951 result += powi_lookup_cost (digit, cache)
952 + POWI_WINDOW_SIZE + 1;
953 val >>= POWI_WINDOW_SIZE;
955 else
957 val >>= 1;
958 result++;
962 return result + powi_lookup_cost (val, cache);
965 /* Recursive subroutine of powi_as_mults. This function takes the
966 array, CACHE, of already calculated exponents and an exponent N and
967 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
969 static tree
970 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
971 HOST_WIDE_INT n, tree *cache)
973 tree op0, op1, ssa_target;
974 unsigned HOST_WIDE_INT digit;
975 gimple mult_stmt;
977 if (n < POWI_TABLE_SIZE && cache[n])
978 return cache[n];
980 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
982 if (n < POWI_TABLE_SIZE)
984 cache[n] = ssa_target;
985 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
986 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
988 else if (n & 1)
990 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
991 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
992 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
994 else
996 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
997 op1 = op0;
1000 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
1001 gimple_set_location (mult_stmt, loc);
1002 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1004 return ssa_target;
1007 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1008 This function needs to be kept in sync with powi_cost above. */
1010 static tree
1011 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1012 tree arg0, HOST_WIDE_INT n)
1014 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1015 gimple div_stmt;
1016 tree target;
1018 if (n == 0)
1019 return build_real (type, dconst1);
1021 memset (cache, 0, sizeof (cache));
1022 cache[1] = arg0;
1024 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1025 if (n >= 0)
1026 return result;
1028 /* If the original exponent was negative, reciprocate the result. */
1029 target = make_temp_ssa_name (type, NULL, "powmult");
1030 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1031 build_real (type, dconst1),
1032 result);
1033 gimple_set_location (div_stmt, loc);
1034 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1036 return target;
1039 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1040 location info LOC. If the arguments are appropriate, create an
1041 equivalent sequence of statements prior to GSI using an optimal
1042 number of multiplications, and return an expession holding the
1043 result. */
1045 static tree
1046 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1047 tree arg0, HOST_WIDE_INT n)
1049 /* Avoid largest negative number. */
1050 if (n != -n
1051 && ((n >= -1 && n <= 2)
1052 || (optimize_function_for_speed_p (cfun)
1053 && powi_cost (n) <= POWI_MAX_MULTS)))
1054 return powi_as_mults (gsi, loc, arg0, n);
1056 return NULL_TREE;
1059 /* Build a gimple call statement that calls FN with argument ARG.
1060 Set the lhs of the call statement to a fresh SSA name. Insert the
1061 statement prior to GSI's current position, and return the fresh
1062 SSA name. */
1064 static tree
1065 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1066 tree fn, tree arg)
1068 gimple call_stmt;
1069 tree ssa_target;
1071 call_stmt = gimple_build_call (fn, 1, arg);
1072 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1073 gimple_set_lhs (call_stmt, ssa_target);
1074 gimple_set_location (call_stmt, loc);
1075 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1077 return ssa_target;
1080 /* Build a gimple binary operation with the given CODE and arguments
1081 ARG0, ARG1, assigning the result to a new SSA name for variable
1082 TARGET. Insert the statement prior to GSI's current position, and
1083 return the fresh SSA name.*/
1085 static tree
1086 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1087 const char *name, enum tree_code code,
1088 tree arg0, tree arg1)
1090 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1091 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1092 gimple_set_location (stmt, loc);
1093 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1094 return result;
1097 /* Build a gimple reference operation with the given CODE and argument
1098 ARG, assigning the result to a new SSA name of TYPE with NAME.
1099 Insert the statement prior to GSI's current position, and return
1100 the fresh SSA name. */
1102 static inline tree
1103 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1104 const char *name, enum tree_code code, tree arg0)
1106 tree result = make_temp_ssa_name (type, NULL, name);
1107 gimple stmt = gimple_build_assign (result, build1 (code, type, arg0));
1108 gimple_set_location (stmt, loc);
1109 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1110 return result;
1113 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1114 prior to GSI's current position, and return the fresh SSA name. */
1116 static tree
1117 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1118 tree type, tree val)
1120 tree result = make_ssa_name (type, NULL);
1121 gimple stmt = gimple_build_assign_with_ops (NOP_EXPR, result, val, NULL_TREE);
1122 gimple_set_location (stmt, loc);
1123 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1124 return result;
1127 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1128 with location info LOC. If possible, create an equivalent and
1129 less expensive sequence of statements prior to GSI, and return an
1130 expession holding the result. */
1132 static tree
1133 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1134 tree arg0, tree arg1)
1136 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1137 REAL_VALUE_TYPE c2, dconst3;
1138 HOST_WIDE_INT n;
1139 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1140 enum machine_mode mode;
1141 bool hw_sqrt_exists, c_is_int, c2_is_int;
1143 /* If the exponent isn't a constant, there's nothing of interest
1144 to be done. */
1145 if (TREE_CODE (arg1) != REAL_CST)
1146 return NULL_TREE;
1148 /* If the exponent is equivalent to an integer, expand to an optimal
1149 multiplication sequence when profitable. */
1150 c = TREE_REAL_CST (arg1);
1151 n = real_to_integer (&c);
1152 real_from_integer (&cint, VOIDmode, n, SIGNED);
1153 c_is_int = real_identical (&c, &cint);
1155 if (c_is_int
1156 && ((n >= -1 && n <= 2)
1157 || (flag_unsafe_math_optimizations
1158 && optimize_bb_for_speed_p (gsi_bb (*gsi))
1159 && powi_cost (n) <= POWI_MAX_MULTS)))
1160 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1162 /* Attempt various optimizations using sqrt and cbrt. */
1163 type = TREE_TYPE (arg0);
1164 mode = TYPE_MODE (type);
1165 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1167 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1168 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1169 sqrt(-0) = -0. */
1170 if (sqrtfn
1171 && REAL_VALUES_EQUAL (c, dconsthalf)
1172 && !HONOR_SIGNED_ZEROS (mode))
1173 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1175 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1176 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1177 so do this optimization even if -Os. Don't do this optimization
1178 if we don't have a hardware sqrt insn. */
1179 dconst1_4 = dconst1;
1180 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1181 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1183 if (flag_unsafe_math_optimizations
1184 && sqrtfn
1185 && REAL_VALUES_EQUAL (c, dconst1_4)
1186 && hw_sqrt_exists)
1188 /* sqrt(x) */
1189 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1191 /* sqrt(sqrt(x)) */
1192 return build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1195 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1196 optimizing for space. Don't do this optimization if we don't have
1197 a hardware sqrt insn. */
1198 real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED);
1199 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1201 if (flag_unsafe_math_optimizations
1202 && sqrtfn
1203 && optimize_function_for_speed_p (cfun)
1204 && REAL_VALUES_EQUAL (c, dconst3_4)
1205 && hw_sqrt_exists)
1207 /* sqrt(x) */
1208 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1210 /* sqrt(sqrt(x)) */
1211 sqrt_sqrt = build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1213 /* sqrt(x) * sqrt(sqrt(x)) */
1214 return build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1215 sqrt_arg0, sqrt_sqrt);
1218 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1219 optimizations since 1./3. is not exactly representable. If x
1220 is negative and finite, the correct value of pow(x,1./3.) is
1221 a NaN with the "invalid" exception raised, because the value
1222 of 1./3. actually has an even denominator. The correct value
1223 of cbrt(x) is a negative real value. */
1224 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1225 dconst1_3 = real_value_truncate (mode, dconst_third ());
1227 if (flag_unsafe_math_optimizations
1228 && cbrtfn
1229 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1230 && REAL_VALUES_EQUAL (c, dconst1_3))
1231 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1233 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1234 if we don't have a hardware sqrt insn. */
1235 dconst1_6 = dconst1_3;
1236 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1238 if (flag_unsafe_math_optimizations
1239 && sqrtfn
1240 && cbrtfn
1241 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1242 && optimize_function_for_speed_p (cfun)
1243 && hw_sqrt_exists
1244 && REAL_VALUES_EQUAL (c, dconst1_6))
1246 /* sqrt(x) */
1247 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1249 /* cbrt(sqrt(x)) */
1250 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1253 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1254 and c not an integer, into
1256 sqrt(x) * powi(x, n/2), n > 0;
1257 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1259 Do not calculate the powi factor when n/2 = 0. */
1260 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1261 n = real_to_integer (&c2);
1262 real_from_integer (&cint, VOIDmode, n, SIGNED);
1263 c2_is_int = real_identical (&c2, &cint);
1265 if (flag_unsafe_math_optimizations
1266 && sqrtfn
1267 && c2_is_int
1268 && !c_is_int
1269 && optimize_function_for_speed_p (cfun))
1271 tree powi_x_ndiv2 = NULL_TREE;
1273 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1274 possible or profitable, give up. Skip the degenerate case when
1275 n is 1 or -1, where the result is always 1. */
1276 if (absu_hwi (n) != 1)
1278 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0,
1279 abs_hwi (n / 2));
1280 if (!powi_x_ndiv2)
1281 return NULL_TREE;
1284 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1285 result of the optimal multiply sequence just calculated. */
1286 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1288 if (absu_hwi (n) == 1)
1289 result = sqrt_arg0;
1290 else
1291 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1292 sqrt_arg0, powi_x_ndiv2);
1294 /* If n is negative, reciprocate the result. */
1295 if (n < 0)
1296 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1297 build_real (type, dconst1), result);
1298 return result;
1301 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1303 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1304 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1306 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1307 different from pow(x, 1./3.) due to rounding and behavior with
1308 negative x, we need to constrain this transformation to unsafe
1309 math and positive x or finite math. */
1310 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1311 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1312 real_round (&c2, mode, &c2);
1313 n = real_to_integer (&c2);
1314 real_from_integer (&cint, VOIDmode, n, SIGNED);
1315 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1316 real_convert (&c2, mode, &c2);
1318 if (flag_unsafe_math_optimizations
1319 && cbrtfn
1320 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1321 && real_identical (&c2, &c)
1322 && !c2_is_int
1323 && optimize_function_for_speed_p (cfun)
1324 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1326 tree powi_x_ndiv3 = NULL_TREE;
1328 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1329 possible or profitable, give up. Skip the degenerate case when
1330 abs(n) < 3, where the result is always 1. */
1331 if (absu_hwi (n) >= 3)
1333 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1334 abs_hwi (n / 3));
1335 if (!powi_x_ndiv3)
1336 return NULL_TREE;
1339 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1340 as that creates an unnecessary variable. Instead, just produce
1341 either cbrt(x) or cbrt(x) * cbrt(x). */
1342 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1344 if (absu_hwi (n) % 3 == 1)
1345 powi_cbrt_x = cbrt_x;
1346 else
1347 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1348 cbrt_x, cbrt_x);
1350 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1351 if (absu_hwi (n) < 3)
1352 result = powi_cbrt_x;
1353 else
1354 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1355 powi_x_ndiv3, powi_cbrt_x);
1357 /* If n is negative, reciprocate the result. */
1358 if (n < 0)
1359 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1360 build_real (type, dconst1), result);
1362 return result;
1365 /* No optimizations succeeded. */
1366 return NULL_TREE;
1369 /* ARG is the argument to a cabs builtin call in GSI with location info
1370 LOC. Create a sequence of statements prior to GSI that calculates
1371 sqrt(R*R + I*I), where R and I are the real and imaginary components
1372 of ARG, respectively. Return an expression holding the result. */
1374 static tree
1375 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1377 tree real_part, imag_part, addend1, addend2, sum, result;
1378 tree type = TREE_TYPE (TREE_TYPE (arg));
1379 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1380 enum machine_mode mode = TYPE_MODE (type);
1382 if (!flag_unsafe_math_optimizations
1383 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1384 || !sqrtfn
1385 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1386 return NULL_TREE;
1388 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1389 REALPART_EXPR, arg);
1390 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1391 real_part, real_part);
1392 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1393 IMAGPART_EXPR, arg);
1394 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1395 imag_part, imag_part);
1396 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1397 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1399 return result;
1402 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1403 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1404 an optimal number of multiplies, when n is a constant. */
1406 namespace {
1408 const pass_data pass_data_cse_sincos =
1410 GIMPLE_PASS, /* type */
1411 "sincos", /* name */
1412 OPTGROUP_NONE, /* optinfo_flags */
1413 TV_NONE, /* tv_id */
1414 PROP_ssa, /* properties_required */
1415 0, /* properties_provided */
1416 0, /* properties_destroyed */
1417 0, /* todo_flags_start */
1418 TODO_update_ssa, /* todo_flags_finish */
1421 class pass_cse_sincos : public gimple_opt_pass
1423 public:
1424 pass_cse_sincos (gcc::context *ctxt)
1425 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1428 /* opt_pass methods: */
1429 virtual bool gate (function *)
1431 /* We no longer require either sincos or cexp, since powi expansion
1432 piggybacks on this pass. */
1433 return optimize;
1436 virtual unsigned int execute (function *);
1438 }; // class pass_cse_sincos
1440 unsigned int
1441 pass_cse_sincos::execute (function *fun)
1443 basic_block bb;
1444 bool cfg_changed = false;
1446 calculate_dominance_info (CDI_DOMINATORS);
1447 memset (&sincos_stats, 0, sizeof (sincos_stats));
1449 FOR_EACH_BB_FN (bb, fun)
1451 gimple_stmt_iterator gsi;
1452 bool cleanup_eh = false;
1454 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1456 gimple stmt = gsi_stmt (gsi);
1457 tree fndecl;
1459 /* Only the last stmt in a bb could throw, no need to call
1460 gimple_purge_dead_eh_edges if we change something in the middle
1461 of a basic block. */
1462 cleanup_eh = false;
1464 if (is_gimple_call (stmt)
1465 && gimple_call_lhs (stmt)
1466 && (fndecl = gimple_call_fndecl (stmt))
1467 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1469 tree arg, arg0, arg1, result;
1470 HOST_WIDE_INT n;
1471 location_t loc;
1473 switch (DECL_FUNCTION_CODE (fndecl))
1475 CASE_FLT_FN (BUILT_IN_COS):
1476 CASE_FLT_FN (BUILT_IN_SIN):
1477 CASE_FLT_FN (BUILT_IN_CEXPI):
1478 /* Make sure we have either sincos or cexp. */
1479 if (!targetm.libc_has_function (function_c99_math_complex)
1480 && !targetm.libc_has_function (function_sincos))
1481 break;
1483 arg = gimple_call_arg (stmt, 0);
1484 if (TREE_CODE (arg) == SSA_NAME)
1485 cfg_changed |= execute_cse_sincos_1 (arg);
1486 break;
1488 CASE_FLT_FN (BUILT_IN_POW):
1489 arg0 = gimple_call_arg (stmt, 0);
1490 arg1 = gimple_call_arg (stmt, 1);
1492 loc = gimple_location (stmt);
1493 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1495 if (result)
1497 tree lhs = gimple_get_lhs (stmt);
1498 gimple new_stmt = gimple_build_assign (lhs, result);
1499 gimple_set_location (new_stmt, loc);
1500 unlink_stmt_vdef (stmt);
1501 gsi_replace (&gsi, new_stmt, true);
1502 cleanup_eh = true;
1503 if (gimple_vdef (stmt))
1504 release_ssa_name (gimple_vdef (stmt));
1506 break;
1508 CASE_FLT_FN (BUILT_IN_POWI):
1509 arg0 = gimple_call_arg (stmt, 0);
1510 arg1 = gimple_call_arg (stmt, 1);
1511 loc = gimple_location (stmt);
1513 if (real_minus_onep (arg0))
1515 tree t0, t1, cond, one, minus_one;
1516 gimple stmt;
1518 t0 = TREE_TYPE (arg0);
1519 t1 = TREE_TYPE (arg1);
1520 one = build_real (t0, dconst1);
1521 minus_one = build_real (t0, dconstm1);
1523 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1524 stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, cond,
1525 arg1,
1526 build_int_cst (t1,
1527 1));
1528 gimple_set_location (stmt, loc);
1529 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1531 result = make_temp_ssa_name (t0, NULL, "powi");
1532 stmt = gimple_build_assign_with_ops (COND_EXPR, result,
1533 cond,
1534 minus_one, one);
1535 gimple_set_location (stmt, loc);
1536 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1538 else
1540 if (!tree_fits_shwi_p (arg1))
1541 break;
1543 n = tree_to_shwi (arg1);
1544 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1547 if (result)
1549 tree lhs = gimple_get_lhs (stmt);
1550 gimple new_stmt = gimple_build_assign (lhs, result);
1551 gimple_set_location (new_stmt, loc);
1552 unlink_stmt_vdef (stmt);
1553 gsi_replace (&gsi, new_stmt, true);
1554 cleanup_eh = true;
1555 if (gimple_vdef (stmt))
1556 release_ssa_name (gimple_vdef (stmt));
1558 break;
1560 CASE_FLT_FN (BUILT_IN_CABS):
1561 arg0 = gimple_call_arg (stmt, 0);
1562 loc = gimple_location (stmt);
1563 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1565 if (result)
1567 tree lhs = gimple_get_lhs (stmt);
1568 gimple new_stmt = gimple_build_assign (lhs, result);
1569 gimple_set_location (new_stmt, loc);
1570 unlink_stmt_vdef (stmt);
1571 gsi_replace (&gsi, new_stmt, true);
1572 cleanup_eh = true;
1573 if (gimple_vdef (stmt))
1574 release_ssa_name (gimple_vdef (stmt));
1576 break;
1578 default:;
1582 if (cleanup_eh)
1583 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1586 statistics_counter_event (fun, "sincos statements inserted",
1587 sincos_stats.inserted);
1589 free_dominance_info (CDI_DOMINATORS);
1590 return cfg_changed ? TODO_cleanup_cfg : 0;
1593 } // anon namespace
1595 gimple_opt_pass *
1596 make_pass_cse_sincos (gcc::context *ctxt)
1598 return new pass_cse_sincos (ctxt);
1601 /* A symbolic number is used to detect byte permutation and selection
1602 patterns. Therefore the field N contains an artificial number
1603 consisting of byte size markers:
1605 0 - byte has the value 0
1606 1..size - byte contains the content of the byte
1607 number indexed with that value minus one.
1609 To detect permutations on memory sources (arrays and structures), a symbolic
1610 number is also associated a base address (the array or structure the load is
1611 made from), an offset from the base address and a range which gives the
1612 difference between the highest and lowest accessed memory location to make
1613 such a symbolic number. The range is thus different from size which reflects
1614 the size of the type of current expression. Note that for non memory source,
1615 range holds the same value as size.
1617 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1618 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1619 still have a size of 2 but this time a range of 1. */
1621 struct symbolic_number {
1622 uint64_t n;
1623 tree type;
1624 tree base_addr;
1625 tree offset;
1626 HOST_WIDE_INT bytepos;
1627 tree alias_set;
1628 tree vuse;
1629 unsigned HOST_WIDE_INT range;
1632 /* The number which the find_bswap_or_nop_1 result should match in
1633 order to have a nop. The number is masked according to the size of
1634 the symbolic number before using it. */
1635 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1636 (uint64_t)0x08070605 << 32 | 0x04030201)
1638 /* The number which the find_bswap_or_nop_1 result should match in
1639 order to have a byte swap. The number is masked according to the
1640 size of the symbolic number before using it. */
1641 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1642 (uint64_t)0x01020304 << 32 | 0x05060708)
1644 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1645 number N. Return false if the requested operation is not permitted
1646 on a symbolic number. */
1648 static inline bool
1649 do_shift_rotate (enum tree_code code,
1650 struct symbolic_number *n,
1651 int count)
1653 int bitsize = TYPE_PRECISION (n->type);
1655 if (count % 8 != 0)
1656 return false;
1658 /* Zero out the extra bits of N in order to avoid them being shifted
1659 into the significant bits. */
1660 if (bitsize < 8 * (int)sizeof (int64_t))
1661 n->n &= ((uint64_t)1 << bitsize) - 1;
1663 switch (code)
1665 case LSHIFT_EXPR:
1666 n->n <<= count;
1667 break;
1668 case RSHIFT_EXPR:
1669 /* Arithmetic shift of signed type: result is dependent on the value. */
1670 if (!TYPE_UNSIGNED (n->type)
1671 && (n->n & ((uint64_t) 0xff << (bitsize - 8))))
1672 return false;
1673 n->n >>= count;
1674 break;
1675 case LROTATE_EXPR:
1676 n->n = (n->n << count) | (n->n >> (bitsize - count));
1677 break;
1678 case RROTATE_EXPR:
1679 n->n = (n->n >> count) | (n->n << (bitsize - count));
1680 break;
1681 default:
1682 return false;
1684 /* Zero unused bits for size. */
1685 if (bitsize < 8 * (int)sizeof (int64_t))
1686 n->n &= ((uint64_t)1 << bitsize) - 1;
1687 return true;
1690 /* Perform sanity checking for the symbolic number N and the gimple
1691 statement STMT. */
1693 static inline bool
1694 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1696 tree lhs_type;
1698 lhs_type = gimple_expr_type (stmt);
1700 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1701 return false;
1703 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
1704 return false;
1706 return true;
1709 /* Initialize the symbolic number N for the bswap pass from the base element
1710 SRC manipulated by the bitwise OR expression. */
1712 static bool
1713 init_symbolic_number (struct symbolic_number *n, tree src)
1715 int size;
1717 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
1719 /* Set up the symbolic number N by setting each byte to a value between 1 and
1720 the byte size of rhs1. The highest order byte is set to n->size and the
1721 lowest order byte to 1. */
1722 n->type = TREE_TYPE (src);
1723 size = TYPE_PRECISION (n->type);
1724 if (size % BITS_PER_UNIT != 0)
1725 return false;
1726 size /= BITS_PER_UNIT;
1727 if (size > (int)sizeof (uint64_t))
1728 return false;
1729 n->range = size;
1730 n->n = CMPNOP;
1732 if (size < (int)sizeof (int64_t))
1733 n->n &= ((uint64_t)1 << (size * BITS_PER_UNIT)) - 1;
1735 return true;
1738 /* Check if STMT might be a byte swap or a nop from a memory source and returns
1739 the answer. If so, REF is that memory source and the base of the memory area
1740 accessed and the offset of the access from that base are recorded in N. */
1742 bool
1743 find_bswap_or_nop_load (gimple stmt, tree ref, struct symbolic_number *n)
1745 /* Leaf node is an array or component ref. Memorize its base and
1746 offset from base to compare to other such leaf node. */
1747 HOST_WIDE_INT bitsize, bitpos;
1748 enum machine_mode mode;
1749 int unsignedp, volatilep;
1750 tree offset, base_addr;
1752 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
1753 return false;
1755 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
1756 &unsignedp, &volatilep, false);
1758 if (TREE_CODE (base_addr) == MEM_REF)
1760 offset_int bit_offset = 0;
1761 tree off = TREE_OPERAND (base_addr, 1);
1763 if (!integer_zerop (off))
1765 offset_int boff, coff = mem_ref_offset (base_addr);
1766 boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
1767 bit_offset += boff;
1770 base_addr = TREE_OPERAND (base_addr, 0);
1772 /* Avoid returning a negative bitpos as this may wreak havoc later. */
1773 if (wi::neg_p (bit_offset))
1775 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
1776 offset_int tem = bit_offset.and_not (mask);
1777 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
1778 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
1779 bit_offset -= tem;
1780 tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
1781 if (offset)
1782 offset = size_binop (PLUS_EXPR, offset,
1783 wide_int_to_tree (sizetype, tem));
1784 else
1785 offset = wide_int_to_tree (sizetype, tem);
1788 bitpos += bit_offset.to_shwi ();
1791 if (bitpos % BITS_PER_UNIT)
1792 return false;
1793 if (bitsize % BITS_PER_UNIT)
1794 return false;
1796 if (!init_symbolic_number (n, ref))
1797 return false;
1798 n->base_addr = base_addr;
1799 n->offset = offset;
1800 n->bytepos = bitpos / BITS_PER_UNIT;
1801 n->alias_set = reference_alias_ptr_type (ref);
1802 n->vuse = gimple_vuse (stmt);
1803 return true;
1806 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
1807 the operation given by the rhs of STMT on the result. If the operation
1808 could successfully be executed the function returns a gimple stmt whose
1809 rhs's first tree is the expression of the source operand and NULL
1810 otherwise. */
1812 static gimple
1813 find_bswap_or_nop_1 (gimple stmt, struct symbolic_number *n, int limit)
1815 enum tree_code code;
1816 tree rhs1, rhs2 = NULL;
1817 gimple rhs1_stmt, rhs2_stmt, source_stmt1;
1818 enum gimple_rhs_class rhs_class;
1820 if (!limit || !is_gimple_assign (stmt))
1821 return NULL;
1823 rhs1 = gimple_assign_rhs1 (stmt);
1825 if (find_bswap_or_nop_load (stmt, rhs1, n))
1826 return stmt;
1828 if (TREE_CODE (rhs1) != SSA_NAME)
1829 return NULL;
1831 code = gimple_assign_rhs_code (stmt);
1832 rhs_class = gimple_assign_rhs_class (stmt);
1833 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1835 if (rhs_class == GIMPLE_BINARY_RHS)
1836 rhs2 = gimple_assign_rhs2 (stmt);
1838 /* Handle unary rhs and binary rhs with integer constants as second
1839 operand. */
1841 if (rhs_class == GIMPLE_UNARY_RHS
1842 || (rhs_class == GIMPLE_BINARY_RHS
1843 && TREE_CODE (rhs2) == INTEGER_CST))
1845 if (code != BIT_AND_EXPR
1846 && code != LSHIFT_EXPR
1847 && code != RSHIFT_EXPR
1848 && code != LROTATE_EXPR
1849 && code != RROTATE_EXPR
1850 && code != NOP_EXPR
1851 && code != CONVERT_EXPR)
1852 return NULL;
1854 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
1856 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
1857 we have to initialize the symbolic number. */
1858 if (!source_stmt1)
1860 if (gimple_assign_load_p (stmt)
1861 || !init_symbolic_number (n, rhs1))
1862 return NULL;
1863 source_stmt1 = stmt;
1866 switch (code)
1868 case BIT_AND_EXPR:
1870 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
1871 uint64_t val = int_cst_value (rhs2);
1872 uint64_t tmp = val;
1874 /* Only constants masking full bytes are allowed. */
1875 for (i = 0; i < size; i++, tmp >>= BITS_PER_UNIT)
1876 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1877 return NULL;
1879 n->n &= val;
1881 break;
1882 case LSHIFT_EXPR:
1883 case RSHIFT_EXPR:
1884 case LROTATE_EXPR:
1885 case RROTATE_EXPR:
1886 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1887 return NULL;
1888 break;
1889 CASE_CONVERT:
1891 int type_size, old_type_size;
1892 tree type;
1894 type = gimple_expr_type (stmt);
1895 type_size = TYPE_PRECISION (type);
1896 if (type_size % BITS_PER_UNIT != 0)
1897 return NULL;
1898 if (type_size > (int)sizeof (uint64_t) * 8)
1899 return NULL;
1901 /* Sign extension: result is dependent on the value. */
1902 old_type_size = TYPE_PRECISION (n->type);
1903 if (!TYPE_UNSIGNED (n->type)
1904 && type_size > old_type_size
1905 && n->n & ((uint64_t) 0xff << (old_type_size - 8)))
1906 return NULL;
1908 if (type_size / BITS_PER_UNIT < (int)(sizeof (int64_t)))
1910 /* If STMT casts to a smaller type mask out the bits not
1911 belonging to the target type. */
1912 n->n &= ((uint64_t)1 << type_size) - 1;
1914 n->type = type;
1915 if (!n->base_addr)
1916 n->range = type_size / BITS_PER_UNIT;
1918 break;
1919 default:
1920 return NULL;
1922 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
1925 /* Handle binary rhs. */
1927 if (rhs_class == GIMPLE_BINARY_RHS)
1929 int i, size;
1930 struct symbolic_number n1, n2;
1931 uint64_t mask;
1932 gimple source_stmt2;
1934 if (code != BIT_IOR_EXPR)
1935 return NULL;
1937 if (TREE_CODE (rhs2) != SSA_NAME)
1938 return NULL;
1940 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1942 switch (code)
1944 case BIT_IOR_EXPR:
1945 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
1947 if (!source_stmt1)
1948 return NULL;
1950 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
1952 if (!source_stmt2)
1953 return NULL;
1955 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
1956 return NULL;
1958 if (!n1.vuse != !n2.vuse ||
1959 (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
1960 return NULL;
1962 if (gimple_assign_rhs1 (source_stmt1)
1963 != gimple_assign_rhs1 (source_stmt2))
1965 int64_t inc, mask;
1966 unsigned i;
1967 HOST_WIDE_INT off_sub;
1968 struct symbolic_number *n_ptr;
1970 if (!n1.base_addr || !n2.base_addr
1971 || !operand_equal_p (n1.base_addr, n2.base_addr, 0))
1972 return NULL;
1973 if (!n1.offset != !n2.offset ||
1974 (n1.offset && !operand_equal_p (n1.offset, n2.offset, 0)))
1975 return NULL;
1977 /* We swap n1 with n2 to have n1 < n2. */
1978 if (n2.bytepos < n1.bytepos)
1980 struct symbolic_number tmpn;
1982 tmpn = n2;
1983 n2 = n1;
1984 n1 = tmpn;
1985 source_stmt1 = source_stmt2;
1988 off_sub = n2.bytepos - n1.bytepos;
1990 /* Check that the range of memory covered < biggest int size. */
1991 if (off_sub + n2.range > (int) sizeof (int64_t))
1992 return NULL;
1993 n->range = n2.range + off_sub;
1995 /* Reinterpret byte marks in symbolic number holding the value of
1996 bigger weight according to target endianness. */
1997 inc = BYTES_BIG_ENDIAN ? off_sub + n2.range - n1.range : off_sub;
1998 mask = 0xFF;
1999 if (BYTES_BIG_ENDIAN)
2000 n_ptr = &n1;
2001 else
2002 n_ptr = &n2;
2003 for (i = 0; i < sizeof (int64_t); i++, inc <<= 8,
2004 mask <<= 8)
2006 if (n_ptr->n & mask)
2007 n_ptr->n += inc;
2010 else
2011 n->range = n1.range;
2013 if (!n1.alias_set
2014 || alias_ptr_types_compatible_p (n1.alias_set, n2.alias_set))
2015 n->alias_set = n1.alias_set;
2016 else
2017 n->alias_set = ptr_type_node;
2018 n->vuse = n1.vuse;
2019 n->base_addr = n1.base_addr;
2020 n->offset = n1.offset;
2021 n->bytepos = n1.bytepos;
2022 n->type = n1.type;
2023 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2024 for (i = 0, mask = 0xff; i < size; i++, mask <<= BITS_PER_UNIT)
2026 uint64_t masked1, masked2;
2028 masked1 = n1.n & mask;
2029 masked2 = n2.n & mask;
2030 if (masked1 && masked2 && masked1 != masked2)
2031 return NULL;
2033 n->n = n1.n | n2.n;
2035 if (!verify_symbolic_number_p (n, stmt))
2036 return NULL;
2038 break;
2039 default:
2040 return NULL;
2042 return source_stmt1;
2044 return NULL;
2047 /* Check if STMT completes a bswap implementation or a read in a given
2048 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2049 accordingly. It also sets N to represent the kind of operations
2050 performed: size of the resulting expression and whether it works on
2051 a memory source, and if so alias-set and vuse. At last, the
2052 function returns a stmt whose rhs's first tree is the source
2053 expression. */
2055 static gimple
2056 find_bswap_or_nop (gimple stmt, struct symbolic_number *n, bool *bswap)
2058 /* The number which the find_bswap_or_nop_1 result should match in order
2059 to have a full byte swap. The number is shifted to the right
2060 according to the size of the symbolic number before using it. */
2061 uint64_t cmpxchg = CMPXCHG;
2062 uint64_t cmpnop = CMPNOP;
2064 gimple source_stmt;
2065 int limit;
2067 /* The last parameter determines the depth search limit. It usually
2068 correlates directly to the number n of bytes to be touched. We
2069 increase that number by log2(n) + 1 here in order to also
2070 cover signed -> unsigned conversions of the src operand as can be seen
2071 in libgcc, and for initial shift/and operation of the src operand. */
2072 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2073 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2074 source_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2076 if (!source_stmt)
2077 return NULL;
2079 /* Find real size of result (highest non zero byte). */
2080 if (n->base_addr)
2082 int rsize;
2083 uint64_t tmpn;
2085 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_UNIT, rsize++);
2086 n->range = rsize;
2089 /* Zero out the extra bits of N and CMP*. */
2090 if (n->range < (int)sizeof (int64_t))
2092 uint64_t mask;
2094 mask = ((uint64_t)1 << (n->range * BITS_PER_UNIT)) - 1;
2095 cmpxchg >>= (sizeof (int64_t) - n->range) * BITS_PER_UNIT;
2096 cmpnop &= mask;
2099 /* A complete byte swap should make the symbolic number to start with
2100 the largest digit in the highest order byte. Unchanged symbolic
2101 number indicates a read with same endianness as target architecture. */
2102 if (n->n == cmpnop)
2103 *bswap = false;
2104 else if (n->n == cmpxchg)
2105 *bswap = true;
2106 else
2107 return NULL;
2109 /* Useless bit manipulation performed by code. */
2110 if (!n->base_addr && n->n == cmpnop)
2111 return NULL;
2113 n->range *= BITS_PER_UNIT;
2114 return source_stmt;
2117 namespace {
2119 const pass_data pass_data_optimize_bswap =
2121 GIMPLE_PASS, /* type */
2122 "bswap", /* name */
2123 OPTGROUP_NONE, /* optinfo_flags */
2124 TV_NONE, /* tv_id */
2125 PROP_ssa, /* properties_required */
2126 0, /* properties_provided */
2127 0, /* properties_destroyed */
2128 0, /* todo_flags_start */
2129 0, /* todo_flags_finish */
2132 class pass_optimize_bswap : public gimple_opt_pass
2134 public:
2135 pass_optimize_bswap (gcc::context *ctxt)
2136 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2139 /* opt_pass methods: */
2140 virtual bool gate (function *)
2142 return flag_expensive_optimizations && optimize;
2145 virtual unsigned int execute (function *);
2147 }; // class pass_optimize_bswap
2149 /* Perform the bswap optimization: replace the statement CUR_STMT at
2150 GSI with a load of type, VUSE and set-alias as described by N if a
2151 memory source is involved (N->base_addr is non null), followed by
2152 the builtin bswap invocation in FNDECL if BSWAP is true. SRC_STMT
2153 gives where should the replacement be made. It also gives the
2154 source on which CUR_STMT is operating via its rhs's first tree nad
2155 N->range gives the size of the expression involved for maintaining
2156 some statistics. */
2158 static bool
2159 bswap_replace (gimple cur_stmt, gimple_stmt_iterator gsi, gimple src_stmt,
2160 tree fndecl, tree bswap_type, tree load_type,
2161 struct symbolic_number *n, bool bswap)
2163 tree src, tmp, tgt;
2164 gimple call;
2166 src = gimple_assign_rhs1 (src_stmt);
2167 tgt = gimple_assign_lhs (cur_stmt);
2169 /* Need to load the value from memory first. */
2170 if (n->base_addr)
2172 gimple_stmt_iterator gsi_ins = gsi_for_stmt (src_stmt);
2173 tree addr_expr, addr_tmp, val_expr, val_tmp;
2174 tree load_offset_ptr, aligned_load_type;
2175 gimple addr_stmt, load_stmt;
2176 unsigned align;
2178 align = get_object_alignment (src);
2179 if (bswap
2180 && align < GET_MODE_ALIGNMENT (TYPE_MODE (load_type))
2181 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
2182 return false;
2184 gsi_move_before (&gsi, &gsi_ins);
2185 gsi = gsi_for_stmt (cur_stmt);
2187 /* Compute address to load from and cast according to the size
2188 of the load. */
2189 addr_expr = build_fold_addr_expr (unshare_expr (src));
2190 if (is_gimple_min_invariant (addr_expr))
2191 addr_tmp = addr_expr;
2192 else
2194 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2195 "load_src");
2196 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2197 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2200 /* Perform the load. */
2201 aligned_load_type = load_type;
2202 if (align < TYPE_ALIGN (load_type))
2203 aligned_load_type = build_aligned_type (load_type, align);
2204 load_offset_ptr = build_int_cst (n->alias_set, 0);
2205 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2206 load_offset_ptr);
2208 if (!bswap)
2210 if (n->range == 16)
2211 nop_stats.found_16bit++;
2212 else if (n->range == 32)
2213 nop_stats.found_32bit++;
2214 else
2216 gcc_assert (n->range == 64);
2217 nop_stats.found_64bit++;
2220 /* Convert the result of load if necessary. */
2221 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2223 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2224 "load_dst");
2225 load_stmt = gimple_build_assign (val_tmp, val_expr);
2226 gimple_set_vuse (load_stmt, n->vuse);
2227 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2228 gimple_assign_set_rhs_with_ops_1 (&gsi, NOP_EXPR, val_tmp,
2229 NULL_TREE, NULL_TREE);
2231 else
2233 gimple_assign_set_rhs_with_ops_1 (&gsi, MEM_REF, val_expr,
2234 NULL_TREE, NULL_TREE);
2235 gimple_set_vuse (cur_stmt, n->vuse);
2237 update_stmt (cur_stmt);
2239 if (dump_file)
2241 fprintf (dump_file,
2242 "%d bit load in target endianness found at: ",
2243 (int)n->range);
2244 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2246 return true;
2248 else
2250 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2251 load_stmt = gimple_build_assign (val_tmp, val_expr);
2252 gimple_set_vuse (load_stmt, n->vuse);
2253 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2255 src = val_tmp;
2258 if (n->range == 16)
2259 bswap_stats.found_16bit++;
2260 else if (n->range == 32)
2261 bswap_stats.found_32bit++;
2262 else
2264 gcc_assert (n->range == 64);
2265 bswap_stats.found_64bit++;
2268 tmp = src;
2270 /* Convert the src expression if necessary. */
2271 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2273 gimple convert_stmt;
2274 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2275 convert_stmt = gimple_build_assign_with_ops (NOP_EXPR, tmp, src, NULL);
2276 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2279 call = gimple_build_call (fndecl, 1, tmp);
2281 tmp = tgt;
2283 /* Convert the result if necessary. */
2284 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2286 gimple convert_stmt;
2287 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2288 convert_stmt = gimple_build_assign_with_ops (NOP_EXPR, tgt, tmp, NULL);
2289 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2292 gimple_call_set_lhs (call, tmp);
2294 if (dump_file)
2296 fprintf (dump_file, "%d bit bswap implementation found at: ",
2297 (int)n->range);
2298 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2301 gsi_insert_after (&gsi, call, GSI_SAME_STMT);
2302 gsi_remove (&gsi, true);
2303 return true;
2306 /* Find manual byte swap implementations as well as load in a given
2307 endianness. Byte swaps are turned into a bswap builtin invokation
2308 while endian loads are converted to bswap builtin invokation or
2309 simple load according to the target endianness. */
2311 unsigned int
2312 pass_optimize_bswap::execute (function *fun)
2314 basic_block bb;
2315 bool bswap16_p, bswap32_p, bswap64_p;
2316 bool changed = false;
2317 tree bswap16_type = NULL_TREE, bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2319 if (BITS_PER_UNIT != 8)
2320 return 0;
2322 bswap16_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP16)
2323 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing);
2324 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2325 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2326 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2327 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2328 || (bswap32_p && word_mode == SImode)));
2330 /* Determine the argument type of the builtins. The code later on
2331 assumes that the return and argument type are the same. */
2332 if (bswap16_p)
2334 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
2335 bswap16_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2338 if (bswap32_p)
2340 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2341 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2344 if (bswap64_p)
2346 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2347 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2350 memset (&nop_stats, 0, sizeof (nop_stats));
2351 memset (&bswap_stats, 0, sizeof (bswap_stats));
2353 FOR_EACH_BB_FN (bb, fun)
2355 gimple_stmt_iterator gsi;
2357 /* We do a reverse scan for bswap patterns to make sure we get the
2358 widest match. As bswap pattern matching doesn't handle
2359 previously inserted smaller bswap replacements as sub-
2360 patterns, the wider variant wouldn't be detected. */
2361 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
2363 gimple src_stmt, cur_stmt = gsi_stmt (gsi);
2364 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2365 struct symbolic_number n;
2366 bool bswap;
2368 if (!is_gimple_assign (cur_stmt)
2369 || gimple_assign_rhs_code (cur_stmt) != BIT_IOR_EXPR)
2370 continue;
2372 src_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2374 if (!src_stmt)
2375 continue;
2377 switch (n.range)
2379 case 16:
2380 load_type = uint16_type_node;
2381 if (bswap16_p)
2383 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
2384 bswap_type = bswap16_type;
2386 break;
2387 case 32:
2388 load_type = uint32_type_node;
2389 if (bswap32_p)
2391 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2392 bswap_type = bswap32_type;
2394 break;
2395 case 64:
2396 load_type = uint64_type_node;
2397 if (bswap64_p)
2399 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2400 bswap_type = bswap64_type;
2402 break;
2403 default:
2404 continue;
2407 if (bswap && !fndecl)
2408 continue;
2410 if (bswap_replace (cur_stmt, gsi, src_stmt, fndecl, bswap_type,
2411 load_type, &n, bswap))
2412 changed = true;
2416 statistics_counter_event (fun, "16-bit nop implementations found",
2417 nop_stats.found_16bit);
2418 statistics_counter_event (fun, "32-bit nop implementations found",
2419 nop_stats.found_32bit);
2420 statistics_counter_event (fun, "64-bit nop implementations found",
2421 nop_stats.found_64bit);
2422 statistics_counter_event (fun, "16-bit bswap implementations found",
2423 bswap_stats.found_16bit);
2424 statistics_counter_event (fun, "32-bit bswap implementations found",
2425 bswap_stats.found_32bit);
2426 statistics_counter_event (fun, "64-bit bswap implementations found",
2427 bswap_stats.found_64bit);
2429 return (changed ? TODO_update_ssa : 0);
2432 } // anon namespace
2434 gimple_opt_pass *
2435 make_pass_optimize_bswap (gcc::context *ctxt)
2437 return new pass_optimize_bswap (ctxt);
2440 /* Return true if stmt is a type conversion operation that can be stripped
2441 when used in a widening multiply operation. */
2442 static bool
2443 widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
2445 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2447 if (TREE_CODE (result_type) == INTEGER_TYPE)
2449 tree op_type;
2450 tree inner_op_type;
2452 if (!CONVERT_EXPR_CODE_P (rhs_code))
2453 return false;
2455 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2457 /* If the type of OP has the same precision as the result, then
2458 we can strip this conversion. The multiply operation will be
2459 selected to create the correct extension as a by-product. */
2460 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2461 return true;
2463 /* We can also strip a conversion if it preserves the signed-ness of
2464 the operation and doesn't narrow the range. */
2465 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2467 /* If the inner-most type is unsigned, then we can strip any
2468 intermediate widening operation. If it's signed, then the
2469 intermediate widening operation must also be signed. */
2470 if ((TYPE_UNSIGNED (inner_op_type)
2471 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2472 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2473 return true;
2475 return false;
2478 return rhs_code == FIXED_CONVERT_EXPR;
2481 /* Return true if RHS is a suitable operand for a widening multiplication,
2482 assuming a target type of TYPE.
2483 There are two cases:
2485 - RHS makes some value at least twice as wide. Store that value
2486 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2488 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2489 but leave *TYPE_OUT untouched. */
2491 static bool
2492 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2493 tree *new_rhs_out)
2495 gimple stmt;
2496 tree type1, rhs1;
2498 if (TREE_CODE (rhs) == SSA_NAME)
2500 stmt = SSA_NAME_DEF_STMT (rhs);
2501 if (is_gimple_assign (stmt))
2503 if (! widening_mult_conversion_strippable_p (type, stmt))
2504 rhs1 = rhs;
2505 else
2507 rhs1 = gimple_assign_rhs1 (stmt);
2509 if (TREE_CODE (rhs1) == INTEGER_CST)
2511 *new_rhs_out = rhs1;
2512 *type_out = NULL;
2513 return true;
2517 else
2518 rhs1 = rhs;
2520 type1 = TREE_TYPE (rhs1);
2522 if (TREE_CODE (type1) != TREE_CODE (type)
2523 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2524 return false;
2526 *new_rhs_out = rhs1;
2527 *type_out = type1;
2528 return true;
2531 if (TREE_CODE (rhs) == INTEGER_CST)
2533 *new_rhs_out = rhs;
2534 *type_out = NULL;
2535 return true;
2538 return false;
2541 /* Return true if STMT performs a widening multiplication, assuming the
2542 output type is TYPE. If so, store the unwidened types of the operands
2543 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2544 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2545 and *TYPE2_OUT would give the operands of the multiplication. */
2547 static bool
2548 is_widening_mult_p (gimple stmt,
2549 tree *type1_out, tree *rhs1_out,
2550 tree *type2_out, tree *rhs2_out)
2552 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2554 if (TREE_CODE (type) != INTEGER_TYPE
2555 && TREE_CODE (type) != FIXED_POINT_TYPE)
2556 return false;
2558 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2559 rhs1_out))
2560 return false;
2562 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2563 rhs2_out))
2564 return false;
2566 if (*type1_out == NULL)
2568 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2569 return false;
2570 *type1_out = *type2_out;
2573 if (*type2_out == NULL)
2575 if (!int_fits_type_p (*rhs2_out, *type1_out))
2576 return false;
2577 *type2_out = *type1_out;
2580 /* Ensure that the larger of the two operands comes first. */
2581 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2583 tree tmp;
2584 tmp = *type1_out;
2585 *type1_out = *type2_out;
2586 *type2_out = tmp;
2587 tmp = *rhs1_out;
2588 *rhs1_out = *rhs2_out;
2589 *rhs2_out = tmp;
2592 return true;
2595 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2596 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2597 value is true iff we converted the statement. */
2599 static bool
2600 convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
2602 tree lhs, rhs1, rhs2, type, type1, type2;
2603 enum insn_code handler;
2604 enum machine_mode to_mode, from_mode, actual_mode;
2605 optab op;
2606 int actual_precision;
2607 location_t loc = gimple_location (stmt);
2608 bool from_unsigned1, from_unsigned2;
2610 lhs = gimple_assign_lhs (stmt);
2611 type = TREE_TYPE (lhs);
2612 if (TREE_CODE (type) != INTEGER_TYPE)
2613 return false;
2615 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2616 return false;
2618 to_mode = TYPE_MODE (type);
2619 from_mode = TYPE_MODE (type1);
2620 from_unsigned1 = TYPE_UNSIGNED (type1);
2621 from_unsigned2 = TYPE_UNSIGNED (type2);
2623 if (from_unsigned1 && from_unsigned2)
2624 op = umul_widen_optab;
2625 else if (!from_unsigned1 && !from_unsigned2)
2626 op = smul_widen_optab;
2627 else
2628 op = usmul_widen_optab;
2630 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2631 0, &actual_mode);
2633 if (handler == CODE_FOR_nothing)
2635 if (op != smul_widen_optab)
2637 /* We can use a signed multiply with unsigned types as long as
2638 there is a wider mode to use, or it is the smaller of the two
2639 types that is unsigned. Note that type1 >= type2, always. */
2640 if ((TYPE_UNSIGNED (type1)
2641 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2642 || (TYPE_UNSIGNED (type2)
2643 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2645 from_mode = GET_MODE_WIDER_MODE (from_mode);
2646 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2647 return false;
2650 op = smul_widen_optab;
2651 handler = find_widening_optab_handler_and_mode (op, to_mode,
2652 from_mode, 0,
2653 &actual_mode);
2655 if (handler == CODE_FOR_nothing)
2656 return false;
2658 from_unsigned1 = from_unsigned2 = false;
2660 else
2661 return false;
2664 /* Ensure that the inputs to the handler are in the correct precison
2665 for the opcode. This will be the full mode size. */
2666 actual_precision = GET_MODE_PRECISION (actual_mode);
2667 if (2 * actual_precision > TYPE_PRECISION (type))
2668 return false;
2669 if (actual_precision != TYPE_PRECISION (type1)
2670 || from_unsigned1 != TYPE_UNSIGNED (type1))
2671 rhs1 = build_and_insert_cast (gsi, loc,
2672 build_nonstandard_integer_type
2673 (actual_precision, from_unsigned1), rhs1);
2674 if (actual_precision != TYPE_PRECISION (type2)
2675 || from_unsigned2 != TYPE_UNSIGNED (type2))
2676 rhs2 = build_and_insert_cast (gsi, loc,
2677 build_nonstandard_integer_type
2678 (actual_precision, from_unsigned2), rhs2);
2680 /* Handle constants. */
2681 if (TREE_CODE (rhs1) == INTEGER_CST)
2682 rhs1 = fold_convert (type1, rhs1);
2683 if (TREE_CODE (rhs2) == INTEGER_CST)
2684 rhs2 = fold_convert (type2, rhs2);
2686 gimple_assign_set_rhs1 (stmt, rhs1);
2687 gimple_assign_set_rhs2 (stmt, rhs2);
2688 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2689 update_stmt (stmt);
2690 widen_mul_stats.widen_mults_inserted++;
2691 return true;
2694 /* Process a single gimple statement STMT, which is found at the
2695 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2696 rhs (given by CODE), and try to convert it into a
2697 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2698 is true iff we converted the statement. */
2700 static bool
2701 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2702 enum tree_code code)
2704 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2705 gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
2706 tree type, type1, type2, optype;
2707 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2708 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2709 optab this_optab;
2710 enum tree_code wmult_code;
2711 enum insn_code handler;
2712 enum machine_mode to_mode, from_mode, actual_mode;
2713 location_t loc = gimple_location (stmt);
2714 int actual_precision;
2715 bool from_unsigned1, from_unsigned2;
2717 lhs = gimple_assign_lhs (stmt);
2718 type = TREE_TYPE (lhs);
2719 if (TREE_CODE (type) != INTEGER_TYPE
2720 && TREE_CODE (type) != FIXED_POINT_TYPE)
2721 return false;
2723 if (code == MINUS_EXPR)
2724 wmult_code = WIDEN_MULT_MINUS_EXPR;
2725 else
2726 wmult_code = WIDEN_MULT_PLUS_EXPR;
2728 rhs1 = gimple_assign_rhs1 (stmt);
2729 rhs2 = gimple_assign_rhs2 (stmt);
2731 if (TREE_CODE (rhs1) == SSA_NAME)
2733 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2734 if (is_gimple_assign (rhs1_stmt))
2735 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2738 if (TREE_CODE (rhs2) == SSA_NAME)
2740 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2741 if (is_gimple_assign (rhs2_stmt))
2742 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2745 /* Allow for one conversion statement between the multiply
2746 and addition/subtraction statement. If there are more than
2747 one conversions then we assume they would invalidate this
2748 transformation. If that's not the case then they should have
2749 been folded before now. */
2750 if (CONVERT_EXPR_CODE_P (rhs1_code))
2752 conv1_stmt = rhs1_stmt;
2753 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2754 if (TREE_CODE (rhs1) == SSA_NAME)
2756 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2757 if (is_gimple_assign (rhs1_stmt))
2758 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2760 else
2761 return false;
2763 if (CONVERT_EXPR_CODE_P (rhs2_code))
2765 conv2_stmt = rhs2_stmt;
2766 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2767 if (TREE_CODE (rhs2) == SSA_NAME)
2769 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2770 if (is_gimple_assign (rhs2_stmt))
2771 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2773 else
2774 return false;
2777 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2778 is_widening_mult_p, but we still need the rhs returns.
2780 It might also appear that it would be sufficient to use the existing
2781 operands of the widening multiply, but that would limit the choice of
2782 multiply-and-accumulate instructions.
2784 If the widened-multiplication result has more than one uses, it is
2785 probably wiser not to do the conversion. */
2786 if (code == PLUS_EXPR
2787 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2789 if (!has_single_use (rhs1)
2790 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2791 &type2, &mult_rhs2))
2792 return false;
2793 add_rhs = rhs2;
2794 conv_stmt = conv1_stmt;
2796 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2798 if (!has_single_use (rhs2)
2799 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2800 &type2, &mult_rhs2))
2801 return false;
2802 add_rhs = rhs1;
2803 conv_stmt = conv2_stmt;
2805 else
2806 return false;
2808 to_mode = TYPE_MODE (type);
2809 from_mode = TYPE_MODE (type1);
2810 from_unsigned1 = TYPE_UNSIGNED (type1);
2811 from_unsigned2 = TYPE_UNSIGNED (type2);
2812 optype = type1;
2814 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2815 if (from_unsigned1 != from_unsigned2)
2817 if (!INTEGRAL_TYPE_P (type))
2818 return false;
2819 /* We can use a signed multiply with unsigned types as long as
2820 there is a wider mode to use, or it is the smaller of the two
2821 types that is unsigned. Note that type1 >= type2, always. */
2822 if ((from_unsigned1
2823 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2824 || (from_unsigned2
2825 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2827 from_mode = GET_MODE_WIDER_MODE (from_mode);
2828 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2829 return false;
2832 from_unsigned1 = from_unsigned2 = false;
2833 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2834 false);
2837 /* If there was a conversion between the multiply and addition
2838 then we need to make sure it fits a multiply-and-accumulate.
2839 The should be a single mode change which does not change the
2840 value. */
2841 if (conv_stmt)
2843 /* We use the original, unmodified data types for this. */
2844 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2845 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2846 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2847 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2849 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2851 /* Conversion is a truncate. */
2852 if (TYPE_PRECISION (to_type) < data_size)
2853 return false;
2855 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2857 /* Conversion is an extend. Check it's the right sort. */
2858 if (TYPE_UNSIGNED (from_type) != is_unsigned
2859 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2860 return false;
2862 /* else convert is a no-op for our purposes. */
2865 /* Verify that the machine can perform a widening multiply
2866 accumulate in this mode/signedness combination, otherwise
2867 this transformation is likely to pessimize code. */
2868 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2869 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2870 from_mode, 0, &actual_mode);
2872 if (handler == CODE_FOR_nothing)
2873 return false;
2875 /* Ensure that the inputs to the handler are in the correct precison
2876 for the opcode. This will be the full mode size. */
2877 actual_precision = GET_MODE_PRECISION (actual_mode);
2878 if (actual_precision != TYPE_PRECISION (type1)
2879 || from_unsigned1 != TYPE_UNSIGNED (type1))
2880 mult_rhs1 = build_and_insert_cast (gsi, loc,
2881 build_nonstandard_integer_type
2882 (actual_precision, from_unsigned1),
2883 mult_rhs1);
2884 if (actual_precision != TYPE_PRECISION (type2)
2885 || from_unsigned2 != TYPE_UNSIGNED (type2))
2886 mult_rhs2 = build_and_insert_cast (gsi, loc,
2887 build_nonstandard_integer_type
2888 (actual_precision, from_unsigned2),
2889 mult_rhs2);
2891 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2892 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2894 /* Handle constants. */
2895 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2896 mult_rhs1 = fold_convert (type1, mult_rhs1);
2897 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2898 mult_rhs2 = fold_convert (type2, mult_rhs2);
2900 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code, mult_rhs1, mult_rhs2,
2901 add_rhs);
2902 update_stmt (gsi_stmt (*gsi));
2903 widen_mul_stats.maccs_inserted++;
2904 return true;
2907 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2908 with uses in additions and subtractions to form fused multiply-add
2909 operations. Returns true if successful and MUL_STMT should be removed. */
2911 static bool
2912 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2914 tree mul_result = gimple_get_lhs (mul_stmt);
2915 tree type = TREE_TYPE (mul_result);
2916 gimple use_stmt, neguse_stmt, fma_stmt;
2917 use_operand_p use_p;
2918 imm_use_iterator imm_iter;
2920 if (FLOAT_TYPE_P (type)
2921 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2922 return false;
2924 /* We don't want to do bitfield reduction ops. */
2925 if (INTEGRAL_TYPE_P (type)
2926 && (TYPE_PRECISION (type)
2927 != GET_MODE_PRECISION (TYPE_MODE (type))))
2928 return false;
2930 /* If the target doesn't support it, don't generate it. We assume that
2931 if fma isn't available then fms, fnma or fnms are not either. */
2932 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2933 return false;
2935 /* If the multiplication has zero uses, it is kept around probably because
2936 of -fnon-call-exceptions. Don't optimize it away in that case,
2937 it is DCE job. */
2938 if (has_zero_uses (mul_result))
2939 return false;
2941 /* Make sure that the multiplication statement becomes dead after
2942 the transformation, thus that all uses are transformed to FMAs.
2943 This means we assume that an FMA operation has the same cost
2944 as an addition. */
2945 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2947 enum tree_code use_code;
2948 tree result = mul_result;
2949 bool negate_p = false;
2951 use_stmt = USE_STMT (use_p);
2953 if (is_gimple_debug (use_stmt))
2954 continue;
2956 /* For now restrict this operations to single basic blocks. In theory
2957 we would want to support sinking the multiplication in
2958 m = a*b;
2959 if ()
2960 ma = m + c;
2961 else
2962 d = m;
2963 to form a fma in the then block and sink the multiplication to the
2964 else block. */
2965 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2966 return false;
2968 if (!is_gimple_assign (use_stmt))
2969 return false;
2971 use_code = gimple_assign_rhs_code (use_stmt);
2973 /* A negate on the multiplication leads to FNMA. */
2974 if (use_code == NEGATE_EXPR)
2976 ssa_op_iter iter;
2977 use_operand_p usep;
2979 result = gimple_assign_lhs (use_stmt);
2981 /* Make sure the negate statement becomes dead with this
2982 single transformation. */
2983 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2984 &use_p, &neguse_stmt))
2985 return false;
2987 /* Make sure the multiplication isn't also used on that stmt. */
2988 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2989 if (USE_FROM_PTR (usep) == mul_result)
2990 return false;
2992 /* Re-validate. */
2993 use_stmt = neguse_stmt;
2994 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2995 return false;
2996 if (!is_gimple_assign (use_stmt))
2997 return false;
2999 use_code = gimple_assign_rhs_code (use_stmt);
3000 negate_p = true;
3003 switch (use_code)
3005 case MINUS_EXPR:
3006 if (gimple_assign_rhs2 (use_stmt) == result)
3007 negate_p = !negate_p;
3008 break;
3009 case PLUS_EXPR:
3010 break;
3011 default:
3012 /* FMA can only be formed from PLUS and MINUS. */
3013 return false;
3016 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3017 by a MULT_EXPR that we'll visit later, we might be able to
3018 get a more profitable match with fnma.
3019 OTOH, if we don't, a negate / fma pair has likely lower latency
3020 that a mult / subtract pair. */
3021 if (use_code == MINUS_EXPR && !negate_p
3022 && gimple_assign_rhs1 (use_stmt) == result
3023 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3024 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3026 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3028 if (TREE_CODE (rhs2) == SSA_NAME)
3030 gimple stmt2 = SSA_NAME_DEF_STMT (rhs2);
3031 if (has_single_use (rhs2)
3032 && is_gimple_assign (stmt2)
3033 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3034 return false;
3038 /* We can't handle a * b + a * b. */
3039 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3040 return false;
3042 /* While it is possible to validate whether or not the exact form
3043 that we've recognized is available in the backend, the assumption
3044 is that the transformation is never a loss. For instance, suppose
3045 the target only has the plain FMA pattern available. Consider
3046 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3047 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3048 still have 3 operations, but in the FMA form the two NEGs are
3049 independent and could be run in parallel. */
3052 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3054 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3055 enum tree_code use_code;
3056 tree addop, mulop1 = op1, result = mul_result;
3057 bool negate_p = false;
3059 if (is_gimple_debug (use_stmt))
3060 continue;
3062 use_code = gimple_assign_rhs_code (use_stmt);
3063 if (use_code == NEGATE_EXPR)
3065 result = gimple_assign_lhs (use_stmt);
3066 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3067 gsi_remove (&gsi, true);
3068 release_defs (use_stmt);
3070 use_stmt = neguse_stmt;
3071 gsi = gsi_for_stmt (use_stmt);
3072 use_code = gimple_assign_rhs_code (use_stmt);
3073 negate_p = true;
3076 if (gimple_assign_rhs1 (use_stmt) == result)
3078 addop = gimple_assign_rhs2 (use_stmt);
3079 /* a * b - c -> a * b + (-c) */
3080 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3081 addop = force_gimple_operand_gsi (&gsi,
3082 build1 (NEGATE_EXPR,
3083 type, addop),
3084 true, NULL_TREE, true,
3085 GSI_SAME_STMT);
3087 else
3089 addop = gimple_assign_rhs1 (use_stmt);
3090 /* a - b * c -> (-b) * c + a */
3091 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3092 negate_p = !negate_p;
3095 if (negate_p)
3096 mulop1 = force_gimple_operand_gsi (&gsi,
3097 build1 (NEGATE_EXPR,
3098 type, mulop1),
3099 true, NULL_TREE, true,
3100 GSI_SAME_STMT);
3102 fma_stmt = gimple_build_assign_with_ops (FMA_EXPR,
3103 gimple_assign_lhs (use_stmt),
3104 mulop1, op2,
3105 addop);
3106 gsi_replace (&gsi, fma_stmt, true);
3107 widen_mul_stats.fmas_inserted++;
3110 return true;
3113 /* Find integer multiplications where the operands are extended from
3114 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3115 where appropriate. */
3117 namespace {
3119 const pass_data pass_data_optimize_widening_mul =
3121 GIMPLE_PASS, /* type */
3122 "widening_mul", /* name */
3123 OPTGROUP_NONE, /* optinfo_flags */
3124 TV_NONE, /* tv_id */
3125 PROP_ssa, /* properties_required */
3126 0, /* properties_provided */
3127 0, /* properties_destroyed */
3128 0, /* todo_flags_start */
3129 TODO_update_ssa, /* todo_flags_finish */
3132 class pass_optimize_widening_mul : public gimple_opt_pass
3134 public:
3135 pass_optimize_widening_mul (gcc::context *ctxt)
3136 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3139 /* opt_pass methods: */
3140 virtual bool gate (function *)
3142 return flag_expensive_optimizations && optimize;
3145 virtual unsigned int execute (function *);
3147 }; // class pass_optimize_widening_mul
3149 unsigned int
3150 pass_optimize_widening_mul::execute (function *fun)
3152 basic_block bb;
3153 bool cfg_changed = false;
3155 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3157 FOR_EACH_BB_FN (bb, fun)
3159 gimple_stmt_iterator gsi;
3161 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3163 gimple stmt = gsi_stmt (gsi);
3164 enum tree_code code;
3166 if (is_gimple_assign (stmt))
3168 code = gimple_assign_rhs_code (stmt);
3169 switch (code)
3171 case MULT_EXPR:
3172 if (!convert_mult_to_widen (stmt, &gsi)
3173 && convert_mult_to_fma (stmt,
3174 gimple_assign_rhs1 (stmt),
3175 gimple_assign_rhs2 (stmt)))
3177 gsi_remove (&gsi, true);
3178 release_defs (stmt);
3179 continue;
3181 break;
3183 case PLUS_EXPR:
3184 case MINUS_EXPR:
3185 convert_plusminus_to_widen (&gsi, stmt, code);
3186 break;
3188 default:;
3191 else if (is_gimple_call (stmt)
3192 && gimple_call_lhs (stmt))
3194 tree fndecl = gimple_call_fndecl (stmt);
3195 if (fndecl
3196 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3198 switch (DECL_FUNCTION_CODE (fndecl))
3200 case BUILT_IN_POWF:
3201 case BUILT_IN_POW:
3202 case BUILT_IN_POWL:
3203 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3204 && REAL_VALUES_EQUAL
3205 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3206 dconst2)
3207 && convert_mult_to_fma (stmt,
3208 gimple_call_arg (stmt, 0),
3209 gimple_call_arg (stmt, 0)))
3211 unlink_stmt_vdef (stmt);
3212 if (gsi_remove (&gsi, true)
3213 && gimple_purge_dead_eh_edges (bb))
3214 cfg_changed = true;
3215 release_defs (stmt);
3216 continue;
3218 break;
3220 default:;
3224 gsi_next (&gsi);
3228 statistics_counter_event (fun, "widening multiplications inserted",
3229 widen_mul_stats.widen_mults_inserted);
3230 statistics_counter_event (fun, "widening maccs inserted",
3231 widen_mul_stats.maccs_inserted);
3232 statistics_counter_event (fun, "fused multiply-adds inserted",
3233 widen_mul_stats.fmas_inserted);
3235 return cfg_changed ? TODO_cleanup_cfg : 0;
3238 } // anon namespace
3240 gimple_opt_pass *
3241 make_pass_optimize_widening_mul (gcc::context *ctxt)
3243 return new pass_optimize_widening_mul (ctxt);