Fix compilation failure with C++98 compilers
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blobe5aa5310e585e4d3f7e9e74299b10c0c7849f7ce
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
118 #include "domwalk.h"
120 /* This structure represents one basic block that either computes a
121 division, or is a common dominator for basic block that compute a
122 division. */
123 struct occurrence {
124 /* The basic block represented by this structure. */
125 basic_block bb;
127 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
128 inserted in BB. */
129 tree recip_def;
131 /* If non-NULL, the SSA_NAME holding the definition for a squared
132 reciprocal inserted in BB. */
133 tree square_recip_def;
135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
136 was inserted in BB. */
137 gimple *recip_def_stmt;
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 by BB. */
141 struct occurrence *children;
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence *next;
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
149 compute_merit. */
150 int num_divisions;
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division;
158 static struct
160 /* Number of 1.0/X ops inserted. */
161 int rdivs_inserted;
163 /* Number of 1.0/FUNC ops inserted. */
164 int rfuncs_inserted;
165 } reciprocal_stats;
167 static struct
169 /* Number of cexpi calls inserted. */
170 int inserted;
171 } sincos_stats;
173 static struct
175 /* Number of widening multiplication ops inserted. */
176 int widen_mults_inserted;
178 /* Number of integer multiply-and-accumulate ops inserted. */
179 int maccs_inserted;
181 /* Number of fp fused multiply-add ops inserted. */
182 int fmas_inserted;
184 /* Number of divmod calls inserted. */
185 int divmod_calls_inserted;
186 } widen_mul_stats;
188 /* The instance of "struct occurrence" representing the highest
189 interesting block in the dominator tree. */
190 static struct occurrence *occ_head;
192 /* Allocation pool for getting instances of "struct occurrence". */
193 static object_allocator<occurrence> *occ_pool;
197 /* Allocate and return a new struct occurrence for basic block BB, and
198 whose children list is headed by CHILDREN. */
199 static struct occurrence *
200 occ_new (basic_block bb, struct occurrence *children)
202 struct occurrence *occ;
204 bb->aux = occ = occ_pool->allocate ();
205 memset (occ, 0, sizeof (struct occurrence));
207 occ->bb = bb;
208 occ->children = children;
209 return occ;
213 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
214 list of "struct occurrence"s, one per basic block, having IDOM as
215 their common dominator.
217 We try to insert NEW_OCC as deep as possible in the tree, and we also
218 insert any other block that is a common dominator for BB and one
219 block already in the tree. */
221 static void
222 insert_bb (struct occurrence *new_occ, basic_block idom,
223 struct occurrence **p_head)
225 struct occurrence *occ, **p_occ;
227 for (p_occ = p_head; (occ = *p_occ) != NULL; )
229 basic_block bb = new_occ->bb, occ_bb = occ->bb;
230 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
231 if (dom == bb)
233 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
234 from its list. */
235 *p_occ = occ->next;
236 occ->next = new_occ->children;
237 new_occ->children = occ;
239 /* Try the next block (it may as well be dominated by BB). */
242 else if (dom == occ_bb)
244 /* OCC_BB dominates BB. Tail recurse to look deeper. */
245 insert_bb (new_occ, dom, &occ->children);
246 return;
249 else if (dom != idom)
251 gcc_assert (!dom->aux);
253 /* There is a dominator between IDOM and BB, add it and make
254 two children out of NEW_OCC and OCC. First, remove OCC from
255 its list. */
256 *p_occ = occ->next;
257 new_occ->next = occ;
258 occ->next = NULL;
260 /* None of the previous blocks has DOM as a dominator: if we tail
261 recursed, we would reexamine them uselessly. Just switch BB with
262 DOM, and go on looking for blocks dominated by DOM. */
263 new_occ = occ_new (dom, new_occ);
266 else
268 /* Nothing special, go on with the next element. */
269 p_occ = &occ->next;
273 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
274 new_occ->next = *p_head;
275 *p_head = new_occ;
278 /* Register that we found a division in BB.
279 IMPORTANCE is a measure of how much weighting to give
280 that division. Use IMPORTANCE = 2 to register a single
281 division. If the division is going to be found multiple
282 times use 1 (as it is with squares). */
284 static inline void
285 register_division_in (basic_block bb, int importance)
287 struct occurrence *occ;
289 occ = (struct occurrence *) bb->aux;
290 if (!occ)
292 occ = occ_new (bb, NULL);
293 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
296 occ->bb_has_division = true;
297 occ->num_divisions += importance;
301 /* Compute the number of divisions that postdominate each block in OCC and
302 its children. */
304 static void
305 compute_merit (struct occurrence *occ)
307 struct occurrence *occ_child;
308 basic_block dom = occ->bb;
310 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
312 basic_block bb;
313 if (occ_child->children)
314 compute_merit (occ_child);
316 if (flag_exceptions)
317 bb = single_noncomplex_succ (dom);
318 else
319 bb = dom;
321 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
322 occ->num_divisions += occ_child->num_divisions;
327 /* Return whether USE_STMT is a floating-point division by DEF. */
328 static inline bool
329 is_division_by (gimple *use_stmt, tree def)
331 return is_gimple_assign (use_stmt)
332 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
333 && gimple_assign_rhs2 (use_stmt) == def
334 /* Do not recognize x / x as valid division, as we are getting
335 confused later by replacing all immediate uses x in such
336 a stmt. */
337 && gimple_assign_rhs1 (use_stmt) != def;
340 /* Return TRUE if USE_STMT is a multiplication of DEF by A. */
341 static inline bool
342 is_mult_by (gimple *use_stmt, tree def, tree a)
344 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
345 && gimple_assign_rhs_code (use_stmt) == MULT_EXPR)
347 tree op0 = gimple_assign_rhs1 (use_stmt);
348 tree op1 = gimple_assign_rhs2 (use_stmt);
350 return (op0 == def && op1 == a)
351 || (op0 == a && op1 == def);
353 return 0;
356 /* Return whether USE_STMT is DEF * DEF. */
357 static inline bool
358 is_square_of (gimple *use_stmt, tree def)
360 return is_mult_by (use_stmt, def, def);
363 /* Return whether USE_STMT is a floating-point division by
364 DEF * DEF. */
365 static inline bool
366 is_division_by_square (gimple *use_stmt, tree def)
368 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
369 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
370 && gimple_assign_rhs1 (use_stmt) != gimple_assign_rhs2 (use_stmt))
372 tree denominator = gimple_assign_rhs2 (use_stmt);
373 if (TREE_CODE (denominator) == SSA_NAME)
375 return is_square_of (SSA_NAME_DEF_STMT (denominator), def);
378 return 0;
381 /* Walk the subset of the dominator tree rooted at OCC, setting the
382 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
383 the given basic block. The field may be left NULL, of course,
384 if it is not possible or profitable to do the optimization.
386 DEF_BSI is an iterator pointing at the statement defining DEF.
387 If RECIP_DEF is set, a dominator already has a computation that can
388 be used.
390 If should_insert_square_recip is set, then this also inserts
391 the square of the reciprocal immediately after the definition
392 of the reciprocal. */
394 static void
395 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
396 tree def, tree recip_def, tree square_recip_def,
397 int should_insert_square_recip, int threshold)
399 tree type;
400 gassign *new_stmt, *new_square_stmt;
401 gimple_stmt_iterator gsi;
402 struct occurrence *occ_child;
404 if (!recip_def
405 && (occ->bb_has_division || !flag_trapping_math)
406 /* Divide by two as all divisions are counted twice in
407 the costing loop. */
408 && occ->num_divisions / 2 >= threshold)
410 /* Make a variable with the replacement and substitute it. */
411 type = TREE_TYPE (def);
412 recip_def = create_tmp_reg (type, "reciptmp");
413 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
414 build_one_cst (type), def);
416 if (should_insert_square_recip)
418 square_recip_def = create_tmp_reg (type, "powmult_reciptmp");
419 new_square_stmt = gimple_build_assign (square_recip_def, MULT_EXPR,
420 recip_def, recip_def);
423 if (occ->bb_has_division)
425 /* Case 1: insert before an existing division. */
426 gsi = gsi_after_labels (occ->bb);
427 while (!gsi_end_p (gsi)
428 && (!is_division_by (gsi_stmt (gsi), def))
429 && (!is_division_by_square (gsi_stmt (gsi), def)))
430 gsi_next (&gsi);
432 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
433 if (should_insert_square_recip)
434 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
436 else if (def_gsi && occ->bb == def_gsi->bb)
438 /* Case 2: insert right after the definition. Note that this will
439 never happen if the definition statement can throw, because in
440 that case the sole successor of the statement's basic block will
441 dominate all the uses as well. */
442 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
443 if (should_insert_square_recip)
444 gsi_insert_after (def_gsi, new_square_stmt, GSI_NEW_STMT);
446 else
448 /* Case 3: insert in a basic block not containing defs/uses. */
449 gsi = gsi_after_labels (occ->bb);
450 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
451 if (should_insert_square_recip)
452 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
455 reciprocal_stats.rdivs_inserted++;
457 occ->recip_def_stmt = new_stmt;
460 occ->recip_def = recip_def;
461 occ->square_recip_def = square_recip_def;
462 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
463 insert_reciprocals (def_gsi, occ_child, def, recip_def,
464 square_recip_def, should_insert_square_recip,
465 threshold);
468 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
469 Take as argument the use for (x * x). */
470 static inline void
471 replace_reciprocal_squares (use_operand_p use_p)
473 gimple *use_stmt = USE_STMT (use_p);
474 basic_block bb = gimple_bb (use_stmt);
475 struct occurrence *occ = (struct occurrence *) bb->aux;
477 if (optimize_bb_for_speed_p (bb) && occ->square_recip_def
478 && occ->recip_def)
480 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
481 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
482 gimple_assign_set_rhs2 (use_stmt, occ->square_recip_def);
483 SET_USE (use_p, occ->square_recip_def);
484 fold_stmt_inplace (&gsi);
485 update_stmt (use_stmt);
490 /* Replace the division at USE_P with a multiplication by the reciprocal, if
491 possible. */
493 static inline void
494 replace_reciprocal (use_operand_p use_p)
496 gimple *use_stmt = USE_STMT (use_p);
497 basic_block bb = gimple_bb (use_stmt);
498 struct occurrence *occ = (struct occurrence *) bb->aux;
500 if (optimize_bb_for_speed_p (bb)
501 && occ->recip_def && use_stmt != occ->recip_def_stmt)
503 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
504 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
505 SET_USE (use_p, occ->recip_def);
506 fold_stmt_inplace (&gsi);
507 update_stmt (use_stmt);
512 /* Free OCC and return one more "struct occurrence" to be freed. */
514 static struct occurrence *
515 free_bb (struct occurrence *occ)
517 struct occurrence *child, *next;
519 /* First get the two pointers hanging off OCC. */
520 next = occ->next;
521 child = occ->children;
522 occ->bb->aux = NULL;
523 occ_pool->remove (occ);
525 /* Now ensure that we don't recurse unless it is necessary. */
526 if (!child)
527 return next;
528 else
530 while (next)
531 next = free_bb (next);
533 return child;
537 /* Transform sequences like
538 t = sqrt (a)
539 x = 1.0 / t;
540 r1 = x * x;
541 r2 = a * x;
542 into:
543 t = sqrt (a)
544 r1 = 1.0 / a;
545 r2 = t;
546 x = r1 * r2;
547 depending on the uses of x, r1, r2. This removes one multiplication and
548 allows the sqrt and division operations to execute in parallel.
549 DEF_GSI is the gsi of the initial division by sqrt that defines
550 DEF (x in the example above). */
552 static void
553 optimize_recip_sqrt (gimple_stmt_iterator *def_gsi, tree def)
555 gimple *use_stmt;
556 imm_use_iterator use_iter;
557 gimple *stmt = gsi_stmt (*def_gsi);
558 tree x = def;
559 tree orig_sqrt_ssa_name = gimple_assign_rhs2 (stmt);
560 tree div_rhs1 = gimple_assign_rhs1 (stmt);
562 if (TREE_CODE (orig_sqrt_ssa_name) != SSA_NAME
563 || TREE_CODE (div_rhs1) != REAL_CST
564 || !real_equal (&TREE_REAL_CST (div_rhs1), &dconst1))
565 return;
567 gcall *sqrt_stmt
568 = dyn_cast <gcall *> (SSA_NAME_DEF_STMT (orig_sqrt_ssa_name));
570 if (!sqrt_stmt || !gimple_call_lhs (sqrt_stmt))
571 return;
573 switch (gimple_call_combined_fn (sqrt_stmt))
575 CASE_CFN_SQRT:
576 CASE_CFN_SQRT_FN:
577 break;
579 default:
580 return;
582 tree a = gimple_call_arg (sqrt_stmt, 0);
584 /* We have 'a' and 'x'. Now analyze the uses of 'x'. */
586 /* Statements that use x in x * x. */
587 auto_vec<gimple *> sqr_stmts;
588 /* Statements that use x in a * x. */
589 auto_vec<gimple *> mult_stmts;
590 bool has_other_use = false;
591 bool mult_on_main_path = false;
593 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, x)
595 if (is_gimple_debug (use_stmt))
596 continue;
597 if (is_square_of (use_stmt, x))
599 sqr_stmts.safe_push (use_stmt);
600 if (gimple_bb (use_stmt) == gimple_bb (stmt))
601 mult_on_main_path = true;
603 else if (is_mult_by (use_stmt, x, a))
605 mult_stmts.safe_push (use_stmt);
606 if (gimple_bb (use_stmt) == gimple_bb (stmt))
607 mult_on_main_path = true;
609 else
610 has_other_use = true;
613 /* In the x * x and a * x cases we just rewire stmt operands or
614 remove multiplications. In the has_other_use case we introduce
615 a multiplication so make sure we don't introduce a multiplication
616 on a path where there was none. */
617 if (has_other_use && !mult_on_main_path)
618 return;
620 if (sqr_stmts.is_empty () && mult_stmts.is_empty ())
621 return;
623 /* If x = 1.0 / sqrt (a) has uses other than those optimized here we want
624 to be able to compose it from the sqr and mult cases. */
625 if (has_other_use && (sqr_stmts.is_empty () || mult_stmts.is_empty ()))
626 return;
628 if (dump_file)
630 fprintf (dump_file, "Optimizing reciprocal sqrt multiplications of\n");
631 print_gimple_stmt (dump_file, sqrt_stmt, 0, TDF_NONE);
632 print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
633 fprintf (dump_file, "\n");
636 bool delete_div = !has_other_use;
637 tree sqr_ssa_name = NULL_TREE;
638 if (!sqr_stmts.is_empty ())
640 /* r1 = x * x. Transform the original
641 x = 1.0 / t
642 into
643 tmp1 = 1.0 / a
644 r1 = tmp1. */
646 sqr_ssa_name
647 = make_temp_ssa_name (TREE_TYPE (a), NULL, "recip_sqrt_sqr");
649 if (dump_file)
651 fprintf (dump_file, "Replacing original division\n");
652 print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
653 fprintf (dump_file, "with new division\n");
655 gimple_assign_set_lhs (stmt, sqr_ssa_name);
656 gimple_assign_set_rhs2 (stmt, a);
657 fold_stmt_inplace (def_gsi);
658 update_stmt (stmt);
660 if (dump_file)
661 print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
663 delete_div = false;
664 gimple *sqr_stmt;
665 unsigned int i;
666 FOR_EACH_VEC_ELT (sqr_stmts, i, sqr_stmt)
668 gimple_stmt_iterator gsi2 = gsi_for_stmt (sqr_stmt);
669 gimple_assign_set_rhs_from_tree (&gsi2, sqr_ssa_name);
670 update_stmt (sqr_stmt);
673 if (!mult_stmts.is_empty ())
675 /* r2 = a * x. Transform this into:
676 r2 = t (The original sqrt (a)). */
677 unsigned int i;
678 gimple *mult_stmt = NULL;
679 FOR_EACH_VEC_ELT (mult_stmts, i, mult_stmt)
681 gimple_stmt_iterator gsi2 = gsi_for_stmt (mult_stmt);
683 if (dump_file)
685 fprintf (dump_file, "Replacing squaring multiplication\n");
686 print_gimple_stmt (dump_file, mult_stmt, 0, TDF_NONE);
687 fprintf (dump_file, "with assignment\n");
689 gimple_assign_set_rhs_from_tree (&gsi2, orig_sqrt_ssa_name);
690 fold_stmt_inplace (&gsi2);
691 update_stmt (mult_stmt);
692 if (dump_file)
693 print_gimple_stmt (dump_file, mult_stmt, 0, TDF_NONE);
697 if (has_other_use)
699 /* Using the two temporaries tmp1, tmp2 from above
700 the original x is now:
701 x = tmp1 * tmp2. */
702 gcc_assert (orig_sqrt_ssa_name);
703 gcc_assert (sqr_ssa_name);
705 gimple *new_stmt
706 = gimple_build_assign (x, MULT_EXPR,
707 orig_sqrt_ssa_name, sqr_ssa_name);
708 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
709 update_stmt (stmt);
711 else if (delete_div)
713 /* Remove the original division. */
714 gimple_stmt_iterator gsi2 = gsi_for_stmt (stmt);
715 gsi_remove (&gsi2, true);
716 release_defs (stmt);
720 /* Look for floating-point divisions among DEF's uses, and try to
721 replace them by multiplications with the reciprocal. Add
722 as many statements computing the reciprocal as needed.
724 DEF must be a GIMPLE register of a floating-point type. */
726 static void
727 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
729 use_operand_p use_p, square_use_p;
730 imm_use_iterator use_iter, square_use_iter;
731 tree square_def;
732 struct occurrence *occ;
733 int count = 0;
734 int threshold;
735 int square_recip_count = 0;
736 int sqrt_recip_count = 0;
738 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && TREE_CODE (def) == SSA_NAME);
739 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
741 /* If DEF is a square (x * x), count the number of divisions by x.
742 If there are more divisions by x than by (DEF * DEF), prefer to optimize
743 the reciprocal of x instead of DEF. This improves cases like:
744 def = x * x
745 t0 = a / def
746 t1 = b / def
747 t2 = c / x
748 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
749 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
751 if (is_gimple_assign (def_stmt)
752 && gimple_assign_rhs_code (def_stmt) == MULT_EXPR
753 && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
754 && gimple_assign_rhs1 (def_stmt) == gimple_assign_rhs2 (def_stmt))
756 tree op0 = gimple_assign_rhs1 (def_stmt);
758 FOR_EACH_IMM_USE_FAST (use_p, use_iter, op0)
760 gimple *use_stmt = USE_STMT (use_p);
761 if (is_division_by (use_stmt, op0))
762 sqrt_recip_count++;
766 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
768 gimple *use_stmt = USE_STMT (use_p);
769 if (is_division_by (use_stmt, def))
771 register_division_in (gimple_bb (use_stmt), 2);
772 count++;
775 if (is_square_of (use_stmt, def))
777 square_def = gimple_assign_lhs (use_stmt);
778 FOR_EACH_IMM_USE_FAST (square_use_p, square_use_iter, square_def)
780 gimple *square_use_stmt = USE_STMT (square_use_p);
781 if (is_division_by (square_use_stmt, square_def))
783 /* This is executed twice for each division by a square. */
784 register_division_in (gimple_bb (square_use_stmt), 1);
785 square_recip_count++;
791 /* Square reciprocals were counted twice above. */
792 square_recip_count /= 2;
794 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
795 if (sqrt_recip_count > square_recip_count)
796 return;
798 /* Do the expensive part only if we can hope to optimize something. */
799 if (count + square_recip_count >= threshold && count >= 1)
801 gimple *use_stmt;
802 for (occ = occ_head; occ; occ = occ->next)
804 compute_merit (occ);
805 insert_reciprocals (def_gsi, occ, def, NULL, NULL,
806 square_recip_count, threshold);
809 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
811 if (is_division_by (use_stmt, def))
813 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
814 replace_reciprocal (use_p);
816 else if (square_recip_count > 0 && is_square_of (use_stmt, def))
818 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
820 /* Find all uses of the square that are divisions and
821 * replace them by multiplications with the inverse. */
822 imm_use_iterator square_iterator;
823 gimple *powmult_use_stmt = USE_STMT (use_p);
824 tree powmult_def_name = gimple_assign_lhs (powmult_use_stmt);
826 FOR_EACH_IMM_USE_STMT (powmult_use_stmt,
827 square_iterator, powmult_def_name)
828 FOR_EACH_IMM_USE_ON_STMT (square_use_p, square_iterator)
830 gimple *powmult_use_stmt = USE_STMT (square_use_p);
831 if (is_division_by (powmult_use_stmt, powmult_def_name))
832 replace_reciprocal_squares (square_use_p);
839 for (occ = occ_head; occ; )
840 occ = free_bb (occ);
842 occ_head = NULL;
845 /* Return an internal function that implements the reciprocal of CALL,
846 or IFN_LAST if there is no such function that the target supports. */
848 internal_fn
849 internal_fn_reciprocal (gcall *call)
851 internal_fn ifn;
853 switch (gimple_call_combined_fn (call))
855 CASE_CFN_SQRT:
856 CASE_CFN_SQRT_FN:
857 ifn = IFN_RSQRT;
858 break;
860 default:
861 return IFN_LAST;
864 tree_pair types = direct_internal_fn_types (ifn, call);
865 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
866 return IFN_LAST;
868 return ifn;
871 /* Go through all the floating-point SSA_NAMEs, and call
872 execute_cse_reciprocals_1 on each of them. */
873 namespace {
875 const pass_data pass_data_cse_reciprocals =
877 GIMPLE_PASS, /* type */
878 "recip", /* name */
879 OPTGROUP_NONE, /* optinfo_flags */
880 TV_TREE_RECIP, /* tv_id */
881 PROP_ssa, /* properties_required */
882 0, /* properties_provided */
883 0, /* properties_destroyed */
884 0, /* todo_flags_start */
885 TODO_update_ssa, /* todo_flags_finish */
888 class pass_cse_reciprocals : public gimple_opt_pass
890 public:
891 pass_cse_reciprocals (gcc::context *ctxt)
892 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
895 /* opt_pass methods: */
896 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
897 virtual unsigned int execute (function *);
899 }; // class pass_cse_reciprocals
901 unsigned int
902 pass_cse_reciprocals::execute (function *fun)
904 basic_block bb;
905 tree arg;
907 occ_pool = new object_allocator<occurrence> ("dominators for recip");
909 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
910 calculate_dominance_info (CDI_DOMINATORS);
911 calculate_dominance_info (CDI_POST_DOMINATORS);
913 if (flag_checking)
914 FOR_EACH_BB_FN (bb, fun)
915 gcc_assert (!bb->aux);
917 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
918 if (FLOAT_TYPE_P (TREE_TYPE (arg))
919 && is_gimple_reg (arg))
921 tree name = ssa_default_def (fun, arg);
922 if (name)
923 execute_cse_reciprocals_1 (NULL, name);
926 FOR_EACH_BB_FN (bb, fun)
928 tree def;
930 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
931 gsi_next (&gsi))
933 gphi *phi = gsi.phi ();
934 def = PHI_RESULT (phi);
935 if (! virtual_operand_p (def)
936 && FLOAT_TYPE_P (TREE_TYPE (def)))
937 execute_cse_reciprocals_1 (NULL, def);
940 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
941 gsi_next (&gsi))
943 gimple *stmt = gsi_stmt (gsi);
945 if (gimple_has_lhs (stmt)
946 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
947 && FLOAT_TYPE_P (TREE_TYPE (def))
948 && TREE_CODE (def) == SSA_NAME)
950 execute_cse_reciprocals_1 (&gsi, def);
951 stmt = gsi_stmt (gsi);
952 if (flag_unsafe_math_optimizations
953 && is_gimple_assign (stmt)
954 && !stmt_can_throw_internal (stmt)
955 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
956 optimize_recip_sqrt (&gsi, def);
960 if (optimize_bb_for_size_p (bb))
961 continue;
963 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
964 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
965 gsi_next (&gsi))
967 gimple *stmt = gsi_stmt (gsi);
969 if (is_gimple_assign (stmt)
970 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
972 tree arg1 = gimple_assign_rhs2 (stmt);
973 gimple *stmt1;
975 if (TREE_CODE (arg1) != SSA_NAME)
976 continue;
978 stmt1 = SSA_NAME_DEF_STMT (arg1);
980 if (is_gimple_call (stmt1)
981 && gimple_call_lhs (stmt1))
983 bool fail;
984 imm_use_iterator ui;
985 use_operand_p use_p;
986 tree fndecl = NULL_TREE;
988 gcall *call = as_a <gcall *> (stmt1);
989 internal_fn ifn = internal_fn_reciprocal (call);
990 if (ifn == IFN_LAST)
992 fndecl = gimple_call_fndecl (call);
993 if (!fndecl
994 || !fndecl_built_in_p (fndecl, BUILT_IN_MD))
995 continue;
996 fndecl = targetm.builtin_reciprocal (fndecl);
997 if (!fndecl)
998 continue;
1001 /* Check that all uses of the SSA name are divisions,
1002 otherwise replacing the defining statement will do
1003 the wrong thing. */
1004 fail = false;
1005 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
1007 gimple *stmt2 = USE_STMT (use_p);
1008 if (is_gimple_debug (stmt2))
1009 continue;
1010 if (!is_gimple_assign (stmt2)
1011 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
1012 || gimple_assign_rhs1 (stmt2) == arg1
1013 || gimple_assign_rhs2 (stmt2) != arg1)
1015 fail = true;
1016 break;
1019 if (fail)
1020 continue;
1022 gimple_replace_ssa_lhs (call, arg1);
1023 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
1025 auto_vec<tree, 4> args;
1026 for (unsigned int i = 0;
1027 i < gimple_call_num_args (call); i++)
1028 args.safe_push (gimple_call_arg (call, i));
1029 gcall *stmt2;
1030 if (ifn == IFN_LAST)
1031 stmt2 = gimple_build_call_vec (fndecl, args);
1032 else
1033 stmt2 = gimple_build_call_internal_vec (ifn, args);
1034 gimple_call_set_lhs (stmt2, arg1);
1035 if (gimple_vdef (call))
1037 gimple_set_vdef (stmt2, gimple_vdef (call));
1038 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
1040 gimple_call_set_nothrow (stmt2,
1041 gimple_call_nothrow_p (call));
1042 gimple_set_vuse (stmt2, gimple_vuse (call));
1043 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
1044 gsi_replace (&gsi2, stmt2, true);
1046 else
1048 if (ifn == IFN_LAST)
1049 gimple_call_set_fndecl (call, fndecl);
1050 else
1051 gimple_call_set_internal_fn (call, ifn);
1052 update_stmt (call);
1054 reciprocal_stats.rfuncs_inserted++;
1056 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
1058 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1059 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
1060 fold_stmt_inplace (&gsi);
1061 update_stmt (stmt);
1068 statistics_counter_event (fun, "reciprocal divs inserted",
1069 reciprocal_stats.rdivs_inserted);
1070 statistics_counter_event (fun, "reciprocal functions inserted",
1071 reciprocal_stats.rfuncs_inserted);
1073 free_dominance_info (CDI_DOMINATORS);
1074 free_dominance_info (CDI_POST_DOMINATORS);
1075 delete occ_pool;
1076 return 0;
1079 } // anon namespace
1081 gimple_opt_pass *
1082 make_pass_cse_reciprocals (gcc::context *ctxt)
1084 return new pass_cse_reciprocals (ctxt);
1087 /* Records an occurrence at statement USE_STMT in the vector of trees
1088 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
1089 is not yet initialized. Returns true if the occurrence was pushed on
1090 the vector. Adjusts *TOP_BB to be the basic block dominating all
1091 statements in the vector. */
1093 static bool
1094 maybe_record_sincos (vec<gimple *> *stmts,
1095 basic_block *top_bb, gimple *use_stmt)
1097 basic_block use_bb = gimple_bb (use_stmt);
1098 if (*top_bb
1099 && (*top_bb == use_bb
1100 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
1101 stmts->safe_push (use_stmt);
1102 else if (!*top_bb
1103 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
1105 stmts->safe_push (use_stmt);
1106 *top_bb = use_bb;
1108 else
1109 return false;
1111 return true;
1114 /* Look for sin, cos and cexpi calls with the same argument NAME and
1115 create a single call to cexpi CSEing the result in this case.
1116 We first walk over all immediate uses of the argument collecting
1117 statements that we can CSE in a vector and in a second pass replace
1118 the statement rhs with a REALPART or IMAGPART expression on the
1119 result of the cexpi call we insert before the use statement that
1120 dominates all other candidates. */
1122 static bool
1123 execute_cse_sincos_1 (tree name)
1125 gimple_stmt_iterator gsi;
1126 imm_use_iterator use_iter;
1127 tree fndecl, res, type;
1128 gimple *def_stmt, *use_stmt, *stmt;
1129 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
1130 auto_vec<gimple *> stmts;
1131 basic_block top_bb = NULL;
1132 int i;
1133 bool cfg_changed = false;
1135 type = TREE_TYPE (name);
1136 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
1138 if (gimple_code (use_stmt) != GIMPLE_CALL
1139 || !gimple_call_lhs (use_stmt))
1140 continue;
1142 switch (gimple_call_combined_fn (use_stmt))
1144 CASE_CFN_COS:
1145 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
1146 break;
1148 CASE_CFN_SIN:
1149 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
1150 break;
1152 CASE_CFN_CEXPI:
1153 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
1154 break;
1156 default:;
1160 if (seen_cos + seen_sin + seen_cexpi <= 1)
1161 return false;
1163 /* Simply insert cexpi at the beginning of top_bb but not earlier than
1164 the name def statement. */
1165 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
1166 if (!fndecl)
1167 return false;
1168 stmt = gimple_build_call (fndecl, 1, name);
1169 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
1170 gimple_call_set_lhs (stmt, res);
1172 def_stmt = SSA_NAME_DEF_STMT (name);
1173 if (!SSA_NAME_IS_DEFAULT_DEF (name)
1174 && gimple_code (def_stmt) != GIMPLE_PHI
1175 && gimple_bb (def_stmt) == top_bb)
1177 gsi = gsi_for_stmt (def_stmt);
1178 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
1180 else
1182 gsi = gsi_after_labels (top_bb);
1183 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1185 sincos_stats.inserted++;
1187 /* And adjust the recorded old call sites. */
1188 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
1190 tree rhs = NULL;
1192 switch (gimple_call_combined_fn (use_stmt))
1194 CASE_CFN_COS:
1195 rhs = fold_build1 (REALPART_EXPR, type, res);
1196 break;
1198 CASE_CFN_SIN:
1199 rhs = fold_build1 (IMAGPART_EXPR, type, res);
1200 break;
1202 CASE_CFN_CEXPI:
1203 rhs = res;
1204 break;
1206 default:;
1207 gcc_unreachable ();
1210 /* Replace call with a copy. */
1211 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
1213 gsi = gsi_for_stmt (use_stmt);
1214 gsi_replace (&gsi, stmt, true);
1215 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
1216 cfg_changed = true;
1219 return cfg_changed;
1222 /* To evaluate powi(x,n), the floating point value x raised to the
1223 constant integer exponent n, we use a hybrid algorithm that
1224 combines the "window method" with look-up tables. For an
1225 introduction to exponentiation algorithms and "addition chains",
1226 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1227 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1228 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1229 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1231 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1232 multiplications to inline before calling the system library's pow
1233 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1234 so this default never requires calling pow, powf or powl. */
1236 #ifndef POWI_MAX_MULTS
1237 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1238 #endif
1240 /* The size of the "optimal power tree" lookup table. All
1241 exponents less than this value are simply looked up in the
1242 powi_table below. This threshold is also used to size the
1243 cache of pseudo registers that hold intermediate results. */
1244 #define POWI_TABLE_SIZE 256
1246 /* The size, in bits of the window, used in the "window method"
1247 exponentiation algorithm. This is equivalent to a radix of
1248 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1249 #define POWI_WINDOW_SIZE 3
1251 /* The following table is an efficient representation of an
1252 "optimal power tree". For each value, i, the corresponding
1253 value, j, in the table states than an optimal evaluation
1254 sequence for calculating pow(x,i) can be found by evaluating
1255 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1256 100 integers is given in Knuth's "Seminumerical algorithms". */
1258 static const unsigned char powi_table[POWI_TABLE_SIZE] =
1260 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1261 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1262 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1263 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1264 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1265 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1266 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1267 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1268 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1269 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1270 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1271 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1272 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1273 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1274 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1275 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1276 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1277 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1278 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1279 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1280 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1281 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1282 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1283 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1284 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1285 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1286 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1287 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1288 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1289 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1290 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1291 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1295 /* Return the number of multiplications required to calculate
1296 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1297 subroutine of powi_cost. CACHE is an array indicating
1298 which exponents have already been calculated. */
1300 static int
1301 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
1303 /* If we've already calculated this exponent, then this evaluation
1304 doesn't require any additional multiplications. */
1305 if (cache[n])
1306 return 0;
1308 cache[n] = true;
1309 return powi_lookup_cost (n - powi_table[n], cache)
1310 + powi_lookup_cost (powi_table[n], cache) + 1;
1313 /* Return the number of multiplications required to calculate
1314 powi(x,n) for an arbitrary x, given the exponent N. This
1315 function needs to be kept in sync with powi_as_mults below. */
1317 static int
1318 powi_cost (HOST_WIDE_INT n)
1320 bool cache[POWI_TABLE_SIZE];
1321 unsigned HOST_WIDE_INT digit;
1322 unsigned HOST_WIDE_INT val;
1323 int result;
1325 if (n == 0)
1326 return 0;
1328 /* Ignore the reciprocal when calculating the cost. */
1329 val = (n < 0) ? -n : n;
1331 /* Initialize the exponent cache. */
1332 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
1333 cache[1] = true;
1335 result = 0;
1337 while (val >= POWI_TABLE_SIZE)
1339 if (val & 1)
1341 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
1342 result += powi_lookup_cost (digit, cache)
1343 + POWI_WINDOW_SIZE + 1;
1344 val >>= POWI_WINDOW_SIZE;
1346 else
1348 val >>= 1;
1349 result++;
1353 return result + powi_lookup_cost (val, cache);
1356 /* Recursive subroutine of powi_as_mults. This function takes the
1357 array, CACHE, of already calculated exponents and an exponent N and
1358 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1360 static tree
1361 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1362 HOST_WIDE_INT n, tree *cache)
1364 tree op0, op1, ssa_target;
1365 unsigned HOST_WIDE_INT digit;
1366 gassign *mult_stmt;
1368 if (n < POWI_TABLE_SIZE && cache[n])
1369 return cache[n];
1371 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1373 if (n < POWI_TABLE_SIZE)
1375 cache[n] = ssa_target;
1376 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1377 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1379 else if (n & 1)
1381 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1382 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1383 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1385 else
1387 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1388 op1 = op0;
1391 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1392 gimple_set_location (mult_stmt, loc);
1393 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1395 return ssa_target;
1398 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1399 This function needs to be kept in sync with powi_cost above. */
1401 static tree
1402 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1403 tree arg0, HOST_WIDE_INT n)
1405 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1406 gassign *div_stmt;
1407 tree target;
1409 if (n == 0)
1410 return build_real (type, dconst1);
1412 memset (cache, 0, sizeof (cache));
1413 cache[1] = arg0;
1415 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1416 if (n >= 0)
1417 return result;
1419 /* If the original exponent was negative, reciprocate the result. */
1420 target = make_temp_ssa_name (type, NULL, "powmult");
1421 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1422 build_real (type, dconst1), result);
1423 gimple_set_location (div_stmt, loc);
1424 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1426 return target;
1429 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1430 location info LOC. If the arguments are appropriate, create an
1431 equivalent sequence of statements prior to GSI using an optimal
1432 number of multiplications, and return an expession holding the
1433 result. */
1435 static tree
1436 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1437 tree arg0, HOST_WIDE_INT n)
1439 /* Avoid largest negative number. */
1440 if (n != -n
1441 && ((n >= -1 && n <= 2)
1442 || (optimize_function_for_speed_p (cfun)
1443 && powi_cost (n) <= POWI_MAX_MULTS)))
1444 return powi_as_mults (gsi, loc, arg0, n);
1446 return NULL_TREE;
1449 /* Build a gimple call statement that calls FN with argument ARG.
1450 Set the lhs of the call statement to a fresh SSA name. Insert the
1451 statement prior to GSI's current position, and return the fresh
1452 SSA name. */
1454 static tree
1455 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1456 tree fn, tree arg)
1458 gcall *call_stmt;
1459 tree ssa_target;
1461 call_stmt = gimple_build_call (fn, 1, arg);
1462 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1463 gimple_set_lhs (call_stmt, ssa_target);
1464 gimple_set_location (call_stmt, loc);
1465 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1467 return ssa_target;
1470 /* Build a gimple binary operation with the given CODE and arguments
1471 ARG0, ARG1, assigning the result to a new SSA name for variable
1472 TARGET. Insert the statement prior to GSI's current position, and
1473 return the fresh SSA name.*/
1475 static tree
1476 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1477 const char *name, enum tree_code code,
1478 tree arg0, tree arg1)
1480 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1481 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1482 gimple_set_location (stmt, loc);
1483 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1484 return result;
1487 /* Build a gimple reference operation with the given CODE and argument
1488 ARG, assigning the result to a new SSA name of TYPE with NAME.
1489 Insert the statement prior to GSI's current position, and return
1490 the fresh SSA name. */
1492 static inline tree
1493 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1494 const char *name, enum tree_code code, tree arg0)
1496 tree result = make_temp_ssa_name (type, NULL, name);
1497 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1498 gimple_set_location (stmt, loc);
1499 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1500 return result;
1503 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1504 prior to GSI's current position, and return the fresh SSA name. */
1506 static tree
1507 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1508 tree type, tree val)
1510 tree result = make_ssa_name (type);
1511 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1512 gimple_set_location (stmt, loc);
1513 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1514 return result;
1517 struct pow_synth_sqrt_info
1519 bool *factors;
1520 unsigned int deepest;
1521 unsigned int num_mults;
1524 /* Return true iff the real value C can be represented as a
1525 sum of powers of 0.5 up to N. That is:
1526 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1527 Record in INFO the various parameters of the synthesis algorithm such
1528 as the factors a[i], the maximum 0.5 power and the number of
1529 multiplications that will be required. */
1531 bool
1532 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1533 struct pow_synth_sqrt_info *info)
1535 REAL_VALUE_TYPE factor = dconsthalf;
1536 REAL_VALUE_TYPE remainder = c;
1538 info->deepest = 0;
1539 info->num_mults = 0;
1540 memset (info->factors, 0, n * sizeof (bool));
1542 for (unsigned i = 0; i < n; i++)
1544 REAL_VALUE_TYPE res;
1546 /* If something inexact happened bail out now. */
1547 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1548 return false;
1550 /* We have hit zero. The number is representable as a sum
1551 of powers of 0.5. */
1552 if (real_equal (&res, &dconst0))
1554 info->factors[i] = true;
1555 info->deepest = i + 1;
1556 return true;
1558 else if (!REAL_VALUE_NEGATIVE (res))
1560 remainder = res;
1561 info->factors[i] = true;
1562 info->num_mults++;
1564 else
1565 info->factors[i] = false;
1567 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1569 return false;
1572 /* Return the tree corresponding to FN being applied
1573 to ARG N times at GSI and LOC.
1574 Look up previous results from CACHE if need be.
1575 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1577 static tree
1578 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1579 tree fn, location_t loc, tree *cache)
1581 tree res = cache[n];
1582 if (!res)
1584 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1585 res = build_and_insert_call (gsi, loc, fn, prev);
1586 cache[n] = res;
1589 return res;
1592 /* Print to STREAM the repeated application of function FNAME to ARG
1593 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1594 "foo (foo (x))". */
1596 static void
1597 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1598 unsigned int n)
1600 if (n == 0)
1601 fprintf (stream, "%s", arg);
1602 else
1604 fprintf (stream, "%s (", fname);
1605 print_nested_fn (stream, fname, arg, n - 1);
1606 fprintf (stream, ")");
1610 /* Print to STREAM the fractional sequence of sqrt chains
1611 applied to ARG, described by INFO. Used for the dump file. */
1613 static void
1614 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1615 struct pow_synth_sqrt_info *info)
1617 for (unsigned int i = 0; i < info->deepest; i++)
1619 bool is_set = info->factors[i];
1620 if (is_set)
1622 print_nested_fn (stream, "sqrt", arg, i + 1);
1623 if (i != info->deepest - 1)
1624 fprintf (stream, " * ");
1629 /* Print to STREAM a representation of raising ARG to an integer
1630 power N. Used for the dump file. */
1632 static void
1633 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1635 if (n > 1)
1636 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1637 else if (n == 1)
1638 fprintf (stream, "%s", arg);
1641 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1642 square roots. Place at GSI and LOC. Limit the maximum depth
1643 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1644 result of the expanded sequence or NULL_TREE if the expansion failed.
1646 This routine assumes that ARG1 is a real number with a fractional part
1647 (the integer exponent case will have been handled earlier in
1648 gimple_expand_builtin_pow).
1650 For ARG1 > 0.0:
1651 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1652 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1653 FRAC_PART == ARG1 - WHOLE_PART:
1654 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1655 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1656 if it can be expressed as such, that is if FRAC_PART satisfies:
1657 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1658 where integer a[i] is either 0 or 1.
1660 Example:
1661 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1662 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1664 For ARG1 < 0.0 there are two approaches:
1665 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1666 is calculated as above.
1668 Example:
1669 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1670 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1672 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1673 FRAC_PART := ARG1 - WHOLE_PART
1674 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1675 Example:
1676 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1677 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1679 For ARG1 < 0.0 we choose between (A) and (B) depending on
1680 how many multiplications we'd have to do.
1681 So, for the example in (B): POW (x, -5.875), if we were to
1682 follow algorithm (A) we would produce:
1683 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1684 which contains more multiplications than approach (B).
1686 Hopefully, this approach will eliminate potentially expensive POW library
1687 calls when unsafe floating point math is enabled and allow the compiler to
1688 further optimise the multiplies, square roots and divides produced by this
1689 function. */
1691 static tree
1692 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1693 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1695 tree type = TREE_TYPE (arg0);
1696 machine_mode mode = TYPE_MODE (type);
1697 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1698 bool one_over = true;
1700 if (!sqrtfn)
1701 return NULL_TREE;
1703 if (TREE_CODE (arg1) != REAL_CST)
1704 return NULL_TREE;
1706 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1708 gcc_assert (max_depth > 0);
1709 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1711 struct pow_synth_sqrt_info synth_info;
1712 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1713 synth_info.deepest = 0;
1714 synth_info.num_mults = 0;
1716 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1717 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1719 /* The whole and fractional parts of exp. */
1720 REAL_VALUE_TYPE whole_part;
1721 REAL_VALUE_TYPE frac_part;
1723 real_floor (&whole_part, mode, &exp);
1724 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1727 REAL_VALUE_TYPE ceil_whole = dconst0;
1728 REAL_VALUE_TYPE ceil_fract = dconst0;
1730 if (neg_exp)
1732 real_ceil (&ceil_whole, mode, &exp);
1733 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1736 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1737 return NULL_TREE;
1739 /* Check whether it's more profitable to not use 1.0 / ... */
1740 if (neg_exp)
1742 struct pow_synth_sqrt_info alt_synth_info;
1743 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1744 alt_synth_info.deepest = 0;
1745 alt_synth_info.num_mults = 0;
1747 if (representable_as_half_series_p (ceil_fract, max_depth,
1748 &alt_synth_info)
1749 && alt_synth_info.deepest <= synth_info.deepest
1750 && alt_synth_info.num_mults < synth_info.num_mults)
1752 whole_part = ceil_whole;
1753 frac_part = ceil_fract;
1754 synth_info.deepest = alt_synth_info.deepest;
1755 synth_info.num_mults = alt_synth_info.num_mults;
1756 memcpy (synth_info.factors, alt_synth_info.factors,
1757 (max_depth + 1) * sizeof (bool));
1758 one_over = false;
1762 HOST_WIDE_INT n = real_to_integer (&whole_part);
1763 REAL_VALUE_TYPE cint;
1764 real_from_integer (&cint, VOIDmode, n, SIGNED);
1766 if (!real_identical (&whole_part, &cint))
1767 return NULL_TREE;
1769 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1770 return NULL_TREE;
1772 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1774 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1776 /* Calculate the integer part of the exponent. */
1777 if (n > 1)
1779 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1780 if (!integer_res)
1781 return NULL_TREE;
1784 if (dump_file)
1786 char string[64];
1788 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1789 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1791 if (neg_exp)
1793 if (one_over)
1795 fprintf (dump_file, "1.0 / (");
1796 dump_integer_part (dump_file, "x", n);
1797 if (n > 0)
1798 fprintf (dump_file, " * ");
1799 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1800 fprintf (dump_file, ")");
1802 else
1804 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1805 fprintf (dump_file, " / (");
1806 dump_integer_part (dump_file, "x", n);
1807 fprintf (dump_file, ")");
1810 else
1812 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1813 if (n > 0)
1814 fprintf (dump_file, " * ");
1815 dump_integer_part (dump_file, "x", n);
1818 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1822 tree fract_res = NULL_TREE;
1823 cache[0] = arg0;
1825 /* Calculate the fractional part of the exponent. */
1826 for (unsigned i = 0; i < synth_info.deepest; i++)
1828 if (synth_info.factors[i])
1830 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1832 if (!fract_res)
1833 fract_res = sqrt_chain;
1835 else
1836 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1837 fract_res, sqrt_chain);
1841 tree res = NULL_TREE;
1843 if (neg_exp)
1845 if (one_over)
1847 if (n > 0)
1848 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1849 fract_res, integer_res);
1850 else
1851 res = fract_res;
1853 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1854 build_real (type, dconst1), res);
1856 else
1858 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1859 fract_res, integer_res);
1862 else
1863 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1864 fract_res, integer_res);
1865 return res;
1868 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1869 with location info LOC. If possible, create an equivalent and
1870 less expensive sequence of statements prior to GSI, and return an
1871 expession holding the result. */
1873 static tree
1874 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1875 tree arg0, tree arg1)
1877 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1878 REAL_VALUE_TYPE c2, dconst3;
1879 HOST_WIDE_INT n;
1880 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1881 machine_mode mode;
1882 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1883 bool hw_sqrt_exists, c_is_int, c2_is_int;
1885 dconst1_4 = dconst1;
1886 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1888 /* If the exponent isn't a constant, there's nothing of interest
1889 to be done. */
1890 if (TREE_CODE (arg1) != REAL_CST)
1891 return NULL_TREE;
1893 /* Don't perform the operation if flag_signaling_nans is on
1894 and the operand is a signaling NaN. */
1895 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1896 && ((TREE_CODE (arg0) == REAL_CST
1897 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1898 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1899 return NULL_TREE;
1901 /* If the exponent is equivalent to an integer, expand to an optimal
1902 multiplication sequence when profitable. */
1903 c = TREE_REAL_CST (arg1);
1904 n = real_to_integer (&c);
1905 real_from_integer (&cint, VOIDmode, n, SIGNED);
1906 c_is_int = real_identical (&c, &cint);
1908 if (c_is_int
1909 && ((n >= -1 && n <= 2)
1910 || (flag_unsafe_math_optimizations
1911 && speed_p
1912 && powi_cost (n) <= POWI_MAX_MULTS)))
1913 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1915 /* Attempt various optimizations using sqrt and cbrt. */
1916 type = TREE_TYPE (arg0);
1917 mode = TYPE_MODE (type);
1918 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1920 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1921 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1922 sqrt(-0) = -0. */
1923 if (sqrtfn
1924 && real_equal (&c, &dconsthalf)
1925 && !HONOR_SIGNED_ZEROS (mode))
1926 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1928 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1930 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1931 optimizations since 1./3. is not exactly representable. If x
1932 is negative and finite, the correct value of pow(x,1./3.) is
1933 a NaN with the "invalid" exception raised, because the value
1934 of 1./3. actually has an even denominator. The correct value
1935 of cbrt(x) is a negative real value. */
1936 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1937 dconst1_3 = real_value_truncate (mode, dconst_third ());
1939 if (flag_unsafe_math_optimizations
1940 && cbrtfn
1941 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1942 && real_equal (&c, &dconst1_3))
1943 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1945 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1946 if we don't have a hardware sqrt insn. */
1947 dconst1_6 = dconst1_3;
1948 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1950 if (flag_unsafe_math_optimizations
1951 && sqrtfn
1952 && cbrtfn
1953 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1954 && speed_p
1955 && hw_sqrt_exists
1956 && real_equal (&c, &dconst1_6))
1958 /* sqrt(x) */
1959 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1961 /* cbrt(sqrt(x)) */
1962 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1966 /* Attempt to expand the POW as a product of square root chains.
1967 Expand the 0.25 case even when otpimising for size. */
1968 if (flag_unsafe_math_optimizations
1969 && sqrtfn
1970 && hw_sqrt_exists
1971 && (speed_p || real_equal (&c, &dconst1_4))
1972 && !HONOR_SIGNED_ZEROS (mode))
1974 unsigned int max_depth = speed_p
1975 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1976 : 2;
1978 tree expand_with_sqrts
1979 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1981 if (expand_with_sqrts)
1982 return expand_with_sqrts;
1985 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1986 n = real_to_integer (&c2);
1987 real_from_integer (&cint, VOIDmode, n, SIGNED);
1988 c2_is_int = real_identical (&c2, &cint);
1990 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1992 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1993 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1995 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1996 different from pow(x, 1./3.) due to rounding and behavior with
1997 negative x, we need to constrain this transformation to unsafe
1998 math and positive x or finite math. */
1999 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
2000 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
2001 real_round (&c2, mode, &c2);
2002 n = real_to_integer (&c2);
2003 real_from_integer (&cint, VOIDmode, n, SIGNED);
2004 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
2005 real_convert (&c2, mode, &c2);
2007 if (flag_unsafe_math_optimizations
2008 && cbrtfn
2009 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
2010 && real_identical (&c2, &c)
2011 && !c2_is_int
2012 && optimize_function_for_speed_p (cfun)
2013 && powi_cost (n / 3) <= POWI_MAX_MULTS)
2015 tree powi_x_ndiv3 = NULL_TREE;
2017 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
2018 possible or profitable, give up. Skip the degenerate case when
2019 abs(n) < 3, where the result is always 1. */
2020 if (absu_hwi (n) >= 3)
2022 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
2023 abs_hwi (n / 3));
2024 if (!powi_x_ndiv3)
2025 return NULL_TREE;
2028 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
2029 as that creates an unnecessary variable. Instead, just produce
2030 either cbrt(x) or cbrt(x) * cbrt(x). */
2031 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
2033 if (absu_hwi (n) % 3 == 1)
2034 powi_cbrt_x = cbrt_x;
2035 else
2036 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
2037 cbrt_x, cbrt_x);
2039 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
2040 if (absu_hwi (n) < 3)
2041 result = powi_cbrt_x;
2042 else
2043 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
2044 powi_x_ndiv3, powi_cbrt_x);
2046 /* If n is negative, reciprocate the result. */
2047 if (n < 0)
2048 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
2049 build_real (type, dconst1), result);
2051 return result;
2054 /* No optimizations succeeded. */
2055 return NULL_TREE;
2058 /* ARG is the argument to a cabs builtin call in GSI with location info
2059 LOC. Create a sequence of statements prior to GSI that calculates
2060 sqrt(R*R + I*I), where R and I are the real and imaginary components
2061 of ARG, respectively. Return an expression holding the result. */
2063 static tree
2064 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
2066 tree real_part, imag_part, addend1, addend2, sum, result;
2067 tree type = TREE_TYPE (TREE_TYPE (arg));
2068 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
2069 machine_mode mode = TYPE_MODE (type);
2071 if (!flag_unsafe_math_optimizations
2072 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
2073 || !sqrtfn
2074 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
2075 return NULL_TREE;
2077 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
2078 REALPART_EXPR, arg);
2079 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
2080 real_part, real_part);
2081 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
2082 IMAGPART_EXPR, arg);
2083 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
2084 imag_part, imag_part);
2085 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
2086 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
2088 return result;
2091 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
2092 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
2093 an optimal number of multiplies, when n is a constant. */
2095 namespace {
2097 const pass_data pass_data_cse_sincos =
2099 GIMPLE_PASS, /* type */
2100 "sincos", /* name */
2101 OPTGROUP_NONE, /* optinfo_flags */
2102 TV_TREE_SINCOS, /* tv_id */
2103 PROP_ssa, /* properties_required */
2104 PROP_gimple_opt_math, /* properties_provided */
2105 0, /* properties_destroyed */
2106 0, /* todo_flags_start */
2107 TODO_update_ssa, /* todo_flags_finish */
2110 class pass_cse_sincos : public gimple_opt_pass
2112 public:
2113 pass_cse_sincos (gcc::context *ctxt)
2114 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
2117 /* opt_pass methods: */
2118 virtual bool gate (function *)
2120 /* We no longer require either sincos or cexp, since powi expansion
2121 piggybacks on this pass. */
2122 return optimize;
2125 virtual unsigned int execute (function *);
2127 }; // class pass_cse_sincos
2129 unsigned int
2130 pass_cse_sincos::execute (function *fun)
2132 basic_block bb;
2133 bool cfg_changed = false;
2135 calculate_dominance_info (CDI_DOMINATORS);
2136 memset (&sincos_stats, 0, sizeof (sincos_stats));
2138 FOR_EACH_BB_FN (bb, fun)
2140 gimple_stmt_iterator gsi;
2141 bool cleanup_eh = false;
2143 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2145 gimple *stmt = gsi_stmt (gsi);
2147 /* Only the last stmt in a bb could throw, no need to call
2148 gimple_purge_dead_eh_edges if we change something in the middle
2149 of a basic block. */
2150 cleanup_eh = false;
2152 if (is_gimple_call (stmt)
2153 && gimple_call_lhs (stmt))
2155 tree arg, arg0, arg1, result;
2156 HOST_WIDE_INT n;
2157 location_t loc;
2159 switch (gimple_call_combined_fn (stmt))
2161 CASE_CFN_COS:
2162 CASE_CFN_SIN:
2163 CASE_CFN_CEXPI:
2164 /* Make sure we have either sincos or cexp. */
2165 if (!targetm.libc_has_function (function_c99_math_complex)
2166 && !targetm.libc_has_function (function_sincos))
2167 break;
2169 arg = gimple_call_arg (stmt, 0);
2170 if (TREE_CODE (arg) == SSA_NAME)
2171 cfg_changed |= execute_cse_sincos_1 (arg);
2172 break;
2174 CASE_CFN_POW:
2175 arg0 = gimple_call_arg (stmt, 0);
2176 arg1 = gimple_call_arg (stmt, 1);
2178 loc = gimple_location (stmt);
2179 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
2181 if (result)
2183 tree lhs = gimple_get_lhs (stmt);
2184 gassign *new_stmt = gimple_build_assign (lhs, result);
2185 gimple_set_location (new_stmt, loc);
2186 unlink_stmt_vdef (stmt);
2187 gsi_replace (&gsi, new_stmt, true);
2188 cleanup_eh = true;
2189 if (gimple_vdef (stmt))
2190 release_ssa_name (gimple_vdef (stmt));
2192 break;
2194 CASE_CFN_POWI:
2195 arg0 = gimple_call_arg (stmt, 0);
2196 arg1 = gimple_call_arg (stmt, 1);
2197 loc = gimple_location (stmt);
2199 if (real_minus_onep (arg0))
2201 tree t0, t1, cond, one, minus_one;
2202 gassign *stmt;
2204 t0 = TREE_TYPE (arg0);
2205 t1 = TREE_TYPE (arg1);
2206 one = build_real (t0, dconst1);
2207 minus_one = build_real (t0, dconstm1);
2209 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
2210 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
2211 arg1, build_int_cst (t1, 1));
2212 gimple_set_location (stmt, loc);
2213 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2215 result = make_temp_ssa_name (t0, NULL, "powi");
2216 stmt = gimple_build_assign (result, COND_EXPR, cond,
2217 minus_one, one);
2218 gimple_set_location (stmt, loc);
2219 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2221 else
2223 if (!tree_fits_shwi_p (arg1))
2224 break;
2226 n = tree_to_shwi (arg1);
2227 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
2230 if (result)
2232 tree lhs = gimple_get_lhs (stmt);
2233 gassign *new_stmt = gimple_build_assign (lhs, result);
2234 gimple_set_location (new_stmt, loc);
2235 unlink_stmt_vdef (stmt);
2236 gsi_replace (&gsi, new_stmt, true);
2237 cleanup_eh = true;
2238 if (gimple_vdef (stmt))
2239 release_ssa_name (gimple_vdef (stmt));
2241 break;
2243 CASE_CFN_CABS:
2244 arg0 = gimple_call_arg (stmt, 0);
2245 loc = gimple_location (stmt);
2246 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
2248 if (result)
2250 tree lhs = gimple_get_lhs (stmt);
2251 gassign *new_stmt = gimple_build_assign (lhs, result);
2252 gimple_set_location (new_stmt, loc);
2253 unlink_stmt_vdef (stmt);
2254 gsi_replace (&gsi, new_stmt, true);
2255 cleanup_eh = true;
2256 if (gimple_vdef (stmt))
2257 release_ssa_name (gimple_vdef (stmt));
2259 break;
2261 default:;
2265 if (cleanup_eh)
2266 cfg_changed |= gimple_purge_dead_eh_edges (bb);
2269 statistics_counter_event (fun, "sincos statements inserted",
2270 sincos_stats.inserted);
2272 return cfg_changed ? TODO_cleanup_cfg : 0;
2275 } // anon namespace
2277 gimple_opt_pass *
2278 make_pass_cse_sincos (gcc::context *ctxt)
2280 return new pass_cse_sincos (ctxt);
2283 /* Return true if stmt is a type conversion operation that can be stripped
2284 when used in a widening multiply operation. */
2285 static bool
2286 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2288 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2290 if (TREE_CODE (result_type) == INTEGER_TYPE)
2292 tree op_type;
2293 tree inner_op_type;
2295 if (!CONVERT_EXPR_CODE_P (rhs_code))
2296 return false;
2298 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2300 /* If the type of OP has the same precision as the result, then
2301 we can strip this conversion. The multiply operation will be
2302 selected to create the correct extension as a by-product. */
2303 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2304 return true;
2306 /* We can also strip a conversion if it preserves the signed-ness of
2307 the operation and doesn't narrow the range. */
2308 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2310 /* If the inner-most type is unsigned, then we can strip any
2311 intermediate widening operation. If it's signed, then the
2312 intermediate widening operation must also be signed. */
2313 if ((TYPE_UNSIGNED (inner_op_type)
2314 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2315 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2316 return true;
2318 return false;
2321 return rhs_code == FIXED_CONVERT_EXPR;
2324 /* Return true if RHS is a suitable operand for a widening multiplication,
2325 assuming a target type of TYPE.
2326 There are two cases:
2328 - RHS makes some value at least twice as wide. Store that value
2329 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2331 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2332 but leave *TYPE_OUT untouched. */
2334 static bool
2335 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2336 tree *new_rhs_out)
2338 gimple *stmt;
2339 tree type1, rhs1;
2341 if (TREE_CODE (rhs) == SSA_NAME)
2343 stmt = SSA_NAME_DEF_STMT (rhs);
2344 if (is_gimple_assign (stmt))
2346 if (! widening_mult_conversion_strippable_p (type, stmt))
2347 rhs1 = rhs;
2348 else
2350 rhs1 = gimple_assign_rhs1 (stmt);
2352 if (TREE_CODE (rhs1) == INTEGER_CST)
2354 *new_rhs_out = rhs1;
2355 *type_out = NULL;
2356 return true;
2360 else
2361 rhs1 = rhs;
2363 type1 = TREE_TYPE (rhs1);
2365 if (TREE_CODE (type1) != TREE_CODE (type)
2366 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2367 return false;
2369 *new_rhs_out = rhs1;
2370 *type_out = type1;
2371 return true;
2374 if (TREE_CODE (rhs) == INTEGER_CST)
2376 *new_rhs_out = rhs;
2377 *type_out = NULL;
2378 return true;
2381 return false;
2384 /* Return true if STMT performs a widening multiplication, assuming the
2385 output type is TYPE. If so, store the unwidened types of the operands
2386 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2387 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2388 and *TYPE2_OUT would give the operands of the multiplication. */
2390 static bool
2391 is_widening_mult_p (gimple *stmt,
2392 tree *type1_out, tree *rhs1_out,
2393 tree *type2_out, tree *rhs2_out)
2395 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2397 if (TREE_CODE (type) == INTEGER_TYPE)
2399 if (TYPE_OVERFLOW_TRAPS (type))
2400 return false;
2402 else if (TREE_CODE (type) != FIXED_POINT_TYPE)
2403 return false;
2405 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2406 rhs1_out))
2407 return false;
2409 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2410 rhs2_out))
2411 return false;
2413 if (*type1_out == NULL)
2415 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2416 return false;
2417 *type1_out = *type2_out;
2420 if (*type2_out == NULL)
2422 if (!int_fits_type_p (*rhs2_out, *type1_out))
2423 return false;
2424 *type2_out = *type1_out;
2427 /* Ensure that the larger of the two operands comes first. */
2428 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2430 std::swap (*type1_out, *type2_out);
2431 std::swap (*rhs1_out, *rhs2_out);
2434 return true;
2437 /* Check to see if the CALL statement is an invocation of copysign
2438 with 1. being the first argument. */
2439 static bool
2440 is_copysign_call_with_1 (gimple *call)
2442 gcall *c = dyn_cast <gcall *> (call);
2443 if (! c)
2444 return false;
2446 enum combined_fn code = gimple_call_combined_fn (c);
2448 if (code == CFN_LAST)
2449 return false;
2451 if (builtin_fn_p (code))
2453 switch (as_builtin_fn (code))
2455 CASE_FLT_FN (BUILT_IN_COPYSIGN):
2456 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
2457 return real_onep (gimple_call_arg (c, 0));
2458 default:
2459 return false;
2463 if (internal_fn_p (code))
2465 switch (as_internal_fn (code))
2467 case IFN_COPYSIGN:
2468 return real_onep (gimple_call_arg (c, 0));
2469 default:
2470 return false;
2474 return false;
2477 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2478 This only happens when the the xorsign optab is defined, if the
2479 pattern is not a xorsign pattern or if expansion fails FALSE is
2480 returned, otherwise TRUE is returned. */
2481 static bool
2482 convert_expand_mult_copysign (gimple *stmt, gimple_stmt_iterator *gsi)
2484 tree treeop0, treeop1, lhs, type;
2485 location_t loc = gimple_location (stmt);
2486 lhs = gimple_assign_lhs (stmt);
2487 treeop0 = gimple_assign_rhs1 (stmt);
2488 treeop1 = gimple_assign_rhs2 (stmt);
2489 type = TREE_TYPE (lhs);
2490 machine_mode mode = TYPE_MODE (type);
2492 if (HONOR_SNANS (type))
2493 return false;
2495 if (TREE_CODE (treeop0) == SSA_NAME && TREE_CODE (treeop1) == SSA_NAME)
2497 gimple *call0 = SSA_NAME_DEF_STMT (treeop0);
2498 if (!has_single_use (treeop0) || !is_copysign_call_with_1 (call0))
2500 call0 = SSA_NAME_DEF_STMT (treeop1);
2501 if (!has_single_use (treeop1) || !is_copysign_call_with_1 (call0))
2502 return false;
2504 treeop1 = treeop0;
2506 if (optab_handler (xorsign_optab, mode) == CODE_FOR_nothing)
2507 return false;
2509 gcall *c = as_a<gcall*> (call0);
2510 treeop0 = gimple_call_arg (c, 1);
2512 gcall *call_stmt
2513 = gimple_build_call_internal (IFN_XORSIGN, 2, treeop1, treeop0);
2514 gimple_set_lhs (call_stmt, lhs);
2515 gimple_set_location (call_stmt, loc);
2516 gsi_replace (gsi, call_stmt, true);
2517 return true;
2520 return false;
2523 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2524 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2525 value is true iff we converted the statement. */
2527 static bool
2528 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
2530 tree lhs, rhs1, rhs2, type, type1, type2;
2531 enum insn_code handler;
2532 scalar_int_mode to_mode, from_mode, actual_mode;
2533 optab op;
2534 int actual_precision;
2535 location_t loc = gimple_location (stmt);
2536 bool from_unsigned1, from_unsigned2;
2538 lhs = gimple_assign_lhs (stmt);
2539 type = TREE_TYPE (lhs);
2540 if (TREE_CODE (type) != INTEGER_TYPE)
2541 return false;
2543 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2544 return false;
2546 to_mode = SCALAR_INT_TYPE_MODE (type);
2547 from_mode = SCALAR_INT_TYPE_MODE (type1);
2548 if (to_mode == from_mode)
2549 return false;
2551 from_unsigned1 = TYPE_UNSIGNED (type1);
2552 from_unsigned2 = TYPE_UNSIGNED (type2);
2554 if (from_unsigned1 && from_unsigned2)
2555 op = umul_widen_optab;
2556 else if (!from_unsigned1 && !from_unsigned2)
2557 op = smul_widen_optab;
2558 else
2559 op = usmul_widen_optab;
2561 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2562 &actual_mode);
2564 if (handler == CODE_FOR_nothing)
2566 if (op != smul_widen_optab)
2568 /* We can use a signed multiply with unsigned types as long as
2569 there is a wider mode to use, or it is the smaller of the two
2570 types that is unsigned. Note that type1 >= type2, always. */
2571 if ((TYPE_UNSIGNED (type1)
2572 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2573 || (TYPE_UNSIGNED (type2)
2574 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2576 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2577 || GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2578 return false;
2581 op = smul_widen_optab;
2582 handler = find_widening_optab_handler_and_mode (op, to_mode,
2583 from_mode,
2584 &actual_mode);
2586 if (handler == CODE_FOR_nothing)
2587 return false;
2589 from_unsigned1 = from_unsigned2 = false;
2591 else
2592 return false;
2595 /* Ensure that the inputs to the handler are in the correct precison
2596 for the opcode. This will be the full mode size. */
2597 actual_precision = GET_MODE_PRECISION (actual_mode);
2598 if (2 * actual_precision > TYPE_PRECISION (type))
2599 return false;
2600 if (actual_precision != TYPE_PRECISION (type1)
2601 || from_unsigned1 != TYPE_UNSIGNED (type1))
2602 rhs1 = build_and_insert_cast (gsi, loc,
2603 build_nonstandard_integer_type
2604 (actual_precision, from_unsigned1), rhs1);
2605 if (actual_precision != TYPE_PRECISION (type2)
2606 || from_unsigned2 != TYPE_UNSIGNED (type2))
2607 rhs2 = build_and_insert_cast (gsi, loc,
2608 build_nonstandard_integer_type
2609 (actual_precision, from_unsigned2), rhs2);
2611 /* Handle constants. */
2612 if (TREE_CODE (rhs1) == INTEGER_CST)
2613 rhs1 = fold_convert (type1, rhs1);
2614 if (TREE_CODE (rhs2) == INTEGER_CST)
2615 rhs2 = fold_convert (type2, rhs2);
2617 gimple_assign_set_rhs1 (stmt, rhs1);
2618 gimple_assign_set_rhs2 (stmt, rhs2);
2619 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2620 update_stmt (stmt);
2621 widen_mul_stats.widen_mults_inserted++;
2622 return true;
2625 /* Process a single gimple statement STMT, which is found at the
2626 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2627 rhs (given by CODE), and try to convert it into a
2628 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2629 is true iff we converted the statement. */
2631 static bool
2632 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
2633 enum tree_code code)
2635 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
2636 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
2637 tree type, type1, type2, optype;
2638 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2639 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2640 optab this_optab;
2641 enum tree_code wmult_code;
2642 enum insn_code handler;
2643 scalar_mode to_mode, from_mode, actual_mode;
2644 location_t loc = gimple_location (stmt);
2645 int actual_precision;
2646 bool from_unsigned1, from_unsigned2;
2648 lhs = gimple_assign_lhs (stmt);
2649 type = TREE_TYPE (lhs);
2650 if (TREE_CODE (type) != INTEGER_TYPE
2651 && TREE_CODE (type) != FIXED_POINT_TYPE)
2652 return false;
2654 if (code == MINUS_EXPR)
2655 wmult_code = WIDEN_MULT_MINUS_EXPR;
2656 else
2657 wmult_code = WIDEN_MULT_PLUS_EXPR;
2659 rhs1 = gimple_assign_rhs1 (stmt);
2660 rhs2 = gimple_assign_rhs2 (stmt);
2662 if (TREE_CODE (rhs1) == SSA_NAME)
2664 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2665 if (is_gimple_assign (rhs1_stmt))
2666 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2669 if (TREE_CODE (rhs2) == SSA_NAME)
2671 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2672 if (is_gimple_assign (rhs2_stmt))
2673 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2676 /* Allow for one conversion statement between the multiply
2677 and addition/subtraction statement. If there are more than
2678 one conversions then we assume they would invalidate this
2679 transformation. If that's not the case then they should have
2680 been folded before now. */
2681 if (CONVERT_EXPR_CODE_P (rhs1_code))
2683 conv1_stmt = rhs1_stmt;
2684 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2685 if (TREE_CODE (rhs1) == SSA_NAME)
2687 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2688 if (is_gimple_assign (rhs1_stmt))
2689 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2691 else
2692 return false;
2694 if (CONVERT_EXPR_CODE_P (rhs2_code))
2696 conv2_stmt = rhs2_stmt;
2697 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2698 if (TREE_CODE (rhs2) == SSA_NAME)
2700 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2701 if (is_gimple_assign (rhs2_stmt))
2702 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2704 else
2705 return false;
2708 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2709 is_widening_mult_p, but we still need the rhs returns.
2711 It might also appear that it would be sufficient to use the existing
2712 operands of the widening multiply, but that would limit the choice of
2713 multiply-and-accumulate instructions.
2715 If the widened-multiplication result has more than one uses, it is
2716 probably wiser not to do the conversion. */
2717 if (code == PLUS_EXPR
2718 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2720 if (!has_single_use (rhs1)
2721 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2722 &type2, &mult_rhs2))
2723 return false;
2724 add_rhs = rhs2;
2725 conv_stmt = conv1_stmt;
2727 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2729 if (!has_single_use (rhs2)
2730 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2731 &type2, &mult_rhs2))
2732 return false;
2733 add_rhs = rhs1;
2734 conv_stmt = conv2_stmt;
2736 else
2737 return false;
2739 to_mode = SCALAR_TYPE_MODE (type);
2740 from_mode = SCALAR_TYPE_MODE (type1);
2741 if (to_mode == from_mode)
2742 return false;
2744 from_unsigned1 = TYPE_UNSIGNED (type1);
2745 from_unsigned2 = TYPE_UNSIGNED (type2);
2746 optype = type1;
2748 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2749 if (from_unsigned1 != from_unsigned2)
2751 if (!INTEGRAL_TYPE_P (type))
2752 return false;
2753 /* We can use a signed multiply with unsigned types as long as
2754 there is a wider mode to use, or it is the smaller of the two
2755 types that is unsigned. Note that type1 >= type2, always. */
2756 if ((from_unsigned1
2757 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2758 || (from_unsigned2
2759 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2761 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2762 || GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2763 return false;
2766 from_unsigned1 = from_unsigned2 = false;
2767 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2768 false);
2771 /* If there was a conversion between the multiply and addition
2772 then we need to make sure it fits a multiply-and-accumulate.
2773 The should be a single mode change which does not change the
2774 value. */
2775 if (conv_stmt)
2777 /* We use the original, unmodified data types for this. */
2778 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2779 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2780 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2781 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2783 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2785 /* Conversion is a truncate. */
2786 if (TYPE_PRECISION (to_type) < data_size)
2787 return false;
2789 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2791 /* Conversion is an extend. Check it's the right sort. */
2792 if (TYPE_UNSIGNED (from_type) != is_unsigned
2793 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2794 return false;
2796 /* else convert is a no-op for our purposes. */
2799 /* Verify that the machine can perform a widening multiply
2800 accumulate in this mode/signedness combination, otherwise
2801 this transformation is likely to pessimize code. */
2802 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2803 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2804 from_mode, &actual_mode);
2806 if (handler == CODE_FOR_nothing)
2807 return false;
2809 /* Ensure that the inputs to the handler are in the correct precison
2810 for the opcode. This will be the full mode size. */
2811 actual_precision = GET_MODE_PRECISION (actual_mode);
2812 if (actual_precision != TYPE_PRECISION (type1)
2813 || from_unsigned1 != TYPE_UNSIGNED (type1))
2814 mult_rhs1 = build_and_insert_cast (gsi, loc,
2815 build_nonstandard_integer_type
2816 (actual_precision, from_unsigned1),
2817 mult_rhs1);
2818 if (actual_precision != TYPE_PRECISION (type2)
2819 || from_unsigned2 != TYPE_UNSIGNED (type2))
2820 mult_rhs2 = build_and_insert_cast (gsi, loc,
2821 build_nonstandard_integer_type
2822 (actual_precision, from_unsigned2),
2823 mult_rhs2);
2825 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2826 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2828 /* Handle constants. */
2829 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2830 mult_rhs1 = fold_convert (type1, mult_rhs1);
2831 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2832 mult_rhs2 = fold_convert (type2, mult_rhs2);
2834 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
2835 add_rhs);
2836 update_stmt (gsi_stmt (*gsi));
2837 widen_mul_stats.maccs_inserted++;
2838 return true;
2841 /* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
2842 OP2 and which we know is used in statements that can be, together with the
2843 multiplication, converted to FMAs, perform the transformation. */
2845 static void
2846 convert_mult_to_fma_1 (tree mul_result, tree op1, tree op2)
2848 tree type = TREE_TYPE (mul_result);
2849 gimple *use_stmt;
2850 imm_use_iterator imm_iter;
2851 gcall *fma_stmt;
2853 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2855 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2856 tree addop, mulop1 = op1, result = mul_result;
2857 bool negate_p = false;
2858 gimple_seq seq = NULL;
2860 if (is_gimple_debug (use_stmt))
2861 continue;
2863 if (is_gimple_assign (use_stmt)
2864 && gimple_assign_rhs_code (use_stmt) == NEGATE_EXPR)
2866 result = gimple_assign_lhs (use_stmt);
2867 use_operand_p use_p;
2868 gimple *neguse_stmt;
2869 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2870 gsi_remove (&gsi, true);
2871 release_defs (use_stmt);
2873 use_stmt = neguse_stmt;
2874 gsi = gsi_for_stmt (use_stmt);
2875 negate_p = true;
2878 tree cond, else_value, ops[3];
2879 tree_code code;
2880 if (!can_interpret_as_conditional_op_p (use_stmt, &cond, &code,
2881 ops, &else_value))
2882 gcc_unreachable ();
2883 addop = ops[0] == result ? ops[1] : ops[0];
2885 if (code == MINUS_EXPR)
2887 if (ops[0] == result)
2888 /* a * b - c -> a * b + (-c) */
2889 addop = gimple_build (&seq, NEGATE_EXPR, type, addop);
2890 else
2891 /* a - b * c -> (-b) * c + a */
2892 negate_p = !negate_p;
2895 if (negate_p)
2896 mulop1 = gimple_build (&seq, NEGATE_EXPR, type, mulop1);
2898 if (seq)
2899 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
2901 if (cond)
2902 fma_stmt = gimple_build_call_internal (IFN_COND_FMA, 5, cond, mulop1,
2903 op2, addop, else_value);
2904 else
2905 fma_stmt = gimple_build_call_internal (IFN_FMA, 3, mulop1, op2, addop);
2906 gimple_set_lhs (fma_stmt, gimple_get_lhs (use_stmt));
2907 gimple_call_set_nothrow (fma_stmt, !stmt_can_throw_internal (use_stmt));
2908 gsi_replace (&gsi, fma_stmt, true);
2909 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
2910 regardless of where the negation occurs. */
2911 if (fold_stmt (&gsi, follow_all_ssa_edges))
2912 update_stmt (gsi_stmt (gsi));
2914 if (dump_file && (dump_flags & TDF_DETAILS))
2916 fprintf (dump_file, "Generated FMA ");
2917 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, TDF_NONE);
2918 fprintf (dump_file, "\n");
2921 widen_mul_stats.fmas_inserted++;
2925 /* Data necessary to perform the actual transformation from a multiplication
2926 and an addition to an FMA after decision is taken it should be done and to
2927 then delete the multiplication statement from the function IL. */
2929 struct fma_transformation_info
2931 gimple *mul_stmt;
2932 tree mul_result;
2933 tree op1;
2934 tree op2;
2937 /* Structure containing the current state of FMA deferring, i.e. whether we are
2938 deferring, whether to continue deferring, and all data necessary to come
2939 back and perform all deferred transformations. */
2941 class fma_deferring_state
2943 public:
2944 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
2945 do any deferring. */
2947 fma_deferring_state (bool perform_deferring)
2948 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL),
2949 m_last_result (NULL_TREE), m_deferring_p (perform_deferring) {}
2951 /* List of FMA candidates for which we the transformation has been determined
2952 possible but we at this point in BB analysis we do not consider them
2953 beneficial. */
2954 auto_vec<fma_transformation_info, 8> m_candidates;
2956 /* Set of results of multiplication that are part of an already deferred FMA
2957 candidates. */
2958 hash_set<tree> m_mul_result_set;
2960 /* The PHI that supposedly feeds back result of a FMA to another over loop
2961 boundary. */
2962 gphi *m_initial_phi;
2964 /* Result of the last produced FMA candidate or NULL if there has not been
2965 one. */
2966 tree m_last_result;
2968 /* If true, deferring might still be profitable. If false, transform all
2969 candidates and no longer defer. */
2970 bool m_deferring_p;
2973 /* Transform all deferred FMA candidates and mark STATE as no longer
2974 deferring. */
2976 static void
2977 cancel_fma_deferring (fma_deferring_state *state)
2979 if (!state->m_deferring_p)
2980 return;
2982 for (unsigned i = 0; i < state->m_candidates.length (); i++)
2984 if (dump_file && (dump_flags & TDF_DETAILS))
2985 fprintf (dump_file, "Generating deferred FMA\n");
2987 const fma_transformation_info &fti = state->m_candidates[i];
2988 convert_mult_to_fma_1 (fti.mul_result, fti.op1, fti.op2);
2990 gimple_stmt_iterator gsi = gsi_for_stmt (fti.mul_stmt);
2991 gsi_remove (&gsi, true);
2992 release_defs (fti.mul_stmt);
2994 state->m_deferring_p = false;
2997 /* If OP is an SSA name defined by a PHI node, return the PHI statement.
2998 Otherwise return NULL. */
3000 static gphi *
3001 result_of_phi (tree op)
3003 if (TREE_CODE (op) != SSA_NAME)
3004 return NULL;
3006 return dyn_cast <gphi *> (SSA_NAME_DEF_STMT (op));
3009 /* After processing statements of a BB and recording STATE, return true if the
3010 initial phi is fed by the last FMA candidate result ore one such result from
3011 previously processed BBs marked in LAST_RESULT_SET. */
3013 static bool
3014 last_fma_candidate_feeds_initial_phi (fma_deferring_state *state,
3015 hash_set<tree> *last_result_set)
3017 ssa_op_iter iter;
3018 use_operand_p use;
3019 FOR_EACH_PHI_ARG (use, state->m_initial_phi, iter, SSA_OP_USE)
3021 tree t = USE_FROM_PTR (use);
3022 if (t == state->m_last_result
3023 || last_result_set->contains (t))
3024 return true;
3027 return false;
3030 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3031 with uses in additions and subtractions to form fused multiply-add
3032 operations. Returns true if successful and MUL_STMT should be removed.
3034 If STATE indicates that we are deferring FMA transformation, that means
3035 that we do not produce FMAs for basic blocks which look like:
3037 <bb 6>
3038 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
3039 _65 = _14 * _16;
3040 accumulator_66 = _65 + accumulator_111;
3042 or its unrolled version, i.e. with several FMA candidates that feed result
3043 of one into the addend of another. Instead, we add them to a list in STATE
3044 and if we later discover an FMA candidate that is not part of such a chain,
3045 we go back and perform all deferred past candidates. */
3047 static bool
3048 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
3049 fma_deferring_state *state)
3051 tree mul_result = gimple_get_lhs (mul_stmt);
3052 tree type = TREE_TYPE (mul_result);
3053 gimple *use_stmt, *neguse_stmt;
3054 use_operand_p use_p;
3055 imm_use_iterator imm_iter;
3057 if (FLOAT_TYPE_P (type)
3058 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3059 return false;
3061 /* We don't want to do bitfield reduction ops. */
3062 if (INTEGRAL_TYPE_P (type)
3063 && (!type_has_mode_precision_p (type) || TYPE_OVERFLOW_TRAPS (type)))
3064 return false;
3066 /* If the target doesn't support it, don't generate it. We assume that
3067 if fma isn't available then fms, fnma or fnms are not either. */
3068 optimization_type opt_type = bb_optimization_type (gimple_bb (mul_stmt));
3069 if (!direct_internal_fn_supported_p (IFN_FMA, type, opt_type))
3070 return false;
3072 /* If the multiplication has zero uses, it is kept around probably because
3073 of -fnon-call-exceptions. Don't optimize it away in that case,
3074 it is DCE job. */
3075 if (has_zero_uses (mul_result))
3076 return false;
3078 bool check_defer
3079 = (state->m_deferring_p
3080 && (tree_to_shwi (TYPE_SIZE (type))
3081 <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
3082 bool defer = check_defer;
3083 /* Make sure that the multiplication statement becomes dead after
3084 the transformation, thus that all uses are transformed to FMAs.
3085 This means we assume that an FMA operation has the same cost
3086 as an addition. */
3087 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3089 tree result = mul_result;
3090 bool negate_p = false;
3092 use_stmt = USE_STMT (use_p);
3094 if (is_gimple_debug (use_stmt))
3095 continue;
3097 /* For now restrict this operations to single basic blocks. In theory
3098 we would want to support sinking the multiplication in
3099 m = a*b;
3100 if ()
3101 ma = m + c;
3102 else
3103 d = m;
3104 to form a fma in the then block and sink the multiplication to the
3105 else block. */
3106 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3107 return false;
3109 /* A negate on the multiplication leads to FNMA. */
3110 if (is_gimple_assign (use_stmt)
3111 && gimple_assign_rhs_code (use_stmt) == NEGATE_EXPR)
3113 ssa_op_iter iter;
3114 use_operand_p usep;
3116 result = gimple_assign_lhs (use_stmt);
3118 /* Make sure the negate statement becomes dead with this
3119 single transformation. */
3120 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3121 &use_p, &neguse_stmt))
3122 return false;
3124 /* Make sure the multiplication isn't also used on that stmt. */
3125 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3126 if (USE_FROM_PTR (usep) == mul_result)
3127 return false;
3129 /* Re-validate. */
3130 use_stmt = neguse_stmt;
3131 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3132 return false;
3134 negate_p = true;
3137 tree cond, else_value, ops[3];
3138 tree_code code;
3139 if (!can_interpret_as_conditional_op_p (use_stmt, &cond, &code, ops,
3140 &else_value))
3141 return false;
3143 switch (code)
3145 case MINUS_EXPR:
3146 if (ops[1] == result)
3147 negate_p = !negate_p;
3148 break;
3149 case PLUS_EXPR:
3150 break;
3151 default:
3152 /* FMA can only be formed from PLUS and MINUS. */
3153 return false;
3156 if (cond)
3158 if (cond == result || else_value == result)
3159 return false;
3160 if (!direct_internal_fn_supported_p (IFN_COND_FMA, type, opt_type))
3161 return false;
3164 /* If the subtrahend (OPS[1]) is computed by a MULT_EXPR that
3165 we'll visit later, we might be able to get a more profitable
3166 match with fnma.
3167 OTOH, if we don't, a negate / fma pair has likely lower latency
3168 that a mult / subtract pair. */
3169 if (code == MINUS_EXPR
3170 && !negate_p
3171 && ops[0] == result
3172 && !direct_internal_fn_supported_p (IFN_FMS, type, opt_type)
3173 && direct_internal_fn_supported_p (IFN_FNMA, type, opt_type)
3174 && TREE_CODE (ops[1]) == SSA_NAME
3175 && has_single_use (ops[1]))
3177 gimple *stmt2 = SSA_NAME_DEF_STMT (ops[1]);
3178 if (is_gimple_assign (stmt2)
3179 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3180 return false;
3183 /* We can't handle a * b + a * b. */
3184 if (ops[0] == ops[1])
3185 return false;
3186 /* If deferring, make sure we are not looking at an instruction that
3187 wouldn't have existed if we were not. */
3188 if (state->m_deferring_p
3189 && (state->m_mul_result_set.contains (ops[0])
3190 || state->m_mul_result_set.contains (ops[1])))
3191 return false;
3193 if (check_defer)
3195 tree use_lhs = gimple_get_lhs (use_stmt);
3196 if (state->m_last_result)
3198 if (ops[1] == state->m_last_result
3199 || ops[0] == state->m_last_result)
3200 defer = true;
3201 else
3202 defer = false;
3204 else
3206 gcc_checking_assert (!state->m_initial_phi);
3207 gphi *phi;
3208 if (ops[0] == result)
3209 phi = result_of_phi (ops[1]);
3210 else
3212 gcc_assert (ops[1] == result);
3213 phi = result_of_phi (ops[0]);
3216 if (phi)
3218 state->m_initial_phi = phi;
3219 defer = true;
3221 else
3222 defer = false;
3225 state->m_last_result = use_lhs;
3226 check_defer = false;
3228 else
3229 defer = false;
3231 /* While it is possible to validate whether or not the exact form that
3232 we've recognized is available in the backend, the assumption is that
3233 if the deferring logic above did not trigger, the transformation is
3234 never a loss. For instance, suppose the target only has the plain FMA
3235 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3236 MUL+SUB for FMA+NEG, which is still two operations. Consider
3237 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3238 form the two NEGs are independent and could be run in parallel. */
3241 if (defer)
3243 fma_transformation_info fti;
3244 fti.mul_stmt = mul_stmt;
3245 fti.mul_result = mul_result;
3246 fti.op1 = op1;
3247 fti.op2 = op2;
3248 state->m_candidates.safe_push (fti);
3249 state->m_mul_result_set.add (mul_result);
3251 if (dump_file && (dump_flags & TDF_DETAILS))
3253 fprintf (dump_file, "Deferred generating FMA for multiplication ");
3254 print_gimple_stmt (dump_file, mul_stmt, 0, TDF_NONE);
3255 fprintf (dump_file, "\n");
3258 return false;
3260 else
3262 if (state->m_deferring_p)
3263 cancel_fma_deferring (state);
3264 convert_mult_to_fma_1 (mul_result, op1, op2);
3265 return true;
3270 /* Helper function of match_uaddsub_overflow. Return 1
3271 if USE_STMT is unsigned overflow check ovf != 0 for
3272 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3273 and 0 otherwise. */
3275 static int
3276 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3278 enum tree_code ccode = ERROR_MARK;
3279 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3280 if (gimple_code (use_stmt) == GIMPLE_COND)
3282 ccode = gimple_cond_code (use_stmt);
3283 crhs1 = gimple_cond_lhs (use_stmt);
3284 crhs2 = gimple_cond_rhs (use_stmt);
3286 else if (is_gimple_assign (use_stmt))
3288 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3290 ccode = gimple_assign_rhs_code (use_stmt);
3291 crhs1 = gimple_assign_rhs1 (use_stmt);
3292 crhs2 = gimple_assign_rhs2 (use_stmt);
3294 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3296 tree cond = gimple_assign_rhs1 (use_stmt);
3297 if (COMPARISON_CLASS_P (cond))
3299 ccode = TREE_CODE (cond);
3300 crhs1 = TREE_OPERAND (cond, 0);
3301 crhs2 = TREE_OPERAND (cond, 1);
3303 else
3304 return 0;
3306 else
3307 return 0;
3309 else
3310 return 0;
3312 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3313 return 0;
3315 enum tree_code code = gimple_assign_rhs_code (stmt);
3316 tree lhs = gimple_assign_lhs (stmt);
3317 tree rhs1 = gimple_assign_rhs1 (stmt);
3318 tree rhs2 = gimple_assign_rhs2 (stmt);
3320 switch (ccode)
3322 case GT_EXPR:
3323 case LE_EXPR:
3324 /* r = a - b; r > a or r <= a
3325 r = a + b; a > r or a <= r or b > r or b <= r. */
3326 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3327 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3328 && crhs2 == lhs))
3329 return ccode == GT_EXPR ? 1 : -1;
3330 break;
3331 case LT_EXPR:
3332 case GE_EXPR:
3333 /* r = a - b; a < r or a >= r
3334 r = a + b; r < a or r >= a or r < b or r >= b. */
3335 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3336 || (code == PLUS_EXPR && crhs1 == lhs
3337 && (crhs2 == rhs1 || crhs2 == rhs2)))
3338 return ccode == LT_EXPR ? 1 : -1;
3339 break;
3340 default:
3341 break;
3343 return 0;
3346 /* Recognize for unsigned x
3347 x = y - z;
3348 if (x > y)
3349 where there are other uses of x and replace it with
3350 _7 = SUB_OVERFLOW (y, z);
3351 x = REALPART_EXPR <_7>;
3352 _8 = IMAGPART_EXPR <_7>;
3353 if (_8)
3354 and similarly for addition. */
3356 static bool
3357 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3358 enum tree_code code)
3360 tree lhs = gimple_assign_lhs (stmt);
3361 tree type = TREE_TYPE (lhs);
3362 use_operand_p use_p;
3363 imm_use_iterator iter;
3364 bool use_seen = false;
3365 bool ovf_use_seen = false;
3366 gimple *use_stmt;
3368 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3369 if (!INTEGRAL_TYPE_P (type)
3370 || !TYPE_UNSIGNED (type)
3371 || has_zero_uses (lhs)
3372 || has_single_use (lhs)
3373 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3374 TYPE_MODE (type)) == CODE_FOR_nothing)
3375 return false;
3377 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3379 use_stmt = USE_STMT (use_p);
3380 if (is_gimple_debug (use_stmt))
3381 continue;
3383 if (uaddsub_overflow_check_p (stmt, use_stmt))
3384 ovf_use_seen = true;
3385 else
3386 use_seen = true;
3387 if (ovf_use_seen && use_seen)
3388 break;
3391 if (!ovf_use_seen || !use_seen)
3392 return false;
3394 tree ctype = build_complex_type (type);
3395 tree rhs1 = gimple_assign_rhs1 (stmt);
3396 tree rhs2 = gimple_assign_rhs2 (stmt);
3397 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3398 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3399 2, rhs1, rhs2);
3400 tree ctmp = make_ssa_name (ctype);
3401 gimple_call_set_lhs (g, ctmp);
3402 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3403 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3404 build1 (REALPART_EXPR, type, ctmp));
3405 gsi_replace (gsi, g2, true);
3406 tree ovf = make_ssa_name (type);
3407 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3408 build1 (IMAGPART_EXPR, type, ctmp));
3409 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3411 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3413 if (is_gimple_debug (use_stmt))
3414 continue;
3416 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3417 if (ovf_use == 0)
3418 continue;
3419 if (gimple_code (use_stmt) == GIMPLE_COND)
3421 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3422 gimple_cond_set_lhs (cond_stmt, ovf);
3423 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3424 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3426 else
3428 gcc_checking_assert (is_gimple_assign (use_stmt));
3429 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3431 gimple_assign_set_rhs1 (use_stmt, ovf);
3432 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3433 gimple_assign_set_rhs_code (use_stmt,
3434 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3436 else
3438 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3439 == COND_EXPR);
3440 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3441 boolean_type_node, ovf,
3442 build_int_cst (type, 0));
3443 gimple_assign_set_rhs1 (use_stmt, cond);
3446 update_stmt (use_stmt);
3448 return true;
3451 /* Return true if target has support for divmod. */
3453 static bool
3454 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3456 /* If target supports hardware divmod insn, use it for divmod. */
3457 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3458 return true;
3460 /* Check if libfunc for divmod is available. */
3461 rtx libfunc = optab_libfunc (divmod_optab, mode);
3462 if (libfunc != NULL_RTX)
3464 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3465 we don't want to use the libfunc even if it exists for given mode. */
3466 machine_mode div_mode;
3467 FOR_EACH_MODE_FROM (div_mode, mode)
3468 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3469 return false;
3471 return targetm.expand_divmod_libfunc != NULL;
3474 return false;
3477 /* Check if stmt is candidate for divmod transform. */
3479 static bool
3480 divmod_candidate_p (gassign *stmt)
3482 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3483 machine_mode mode = TYPE_MODE (type);
3484 optab divmod_optab, div_optab;
3486 if (TYPE_UNSIGNED (type))
3488 divmod_optab = udivmod_optab;
3489 div_optab = udiv_optab;
3491 else
3493 divmod_optab = sdivmod_optab;
3494 div_optab = sdiv_optab;
3497 tree op1 = gimple_assign_rhs1 (stmt);
3498 tree op2 = gimple_assign_rhs2 (stmt);
3500 /* Disable the transform if either is a constant, since division-by-constant
3501 may have specialized expansion. */
3502 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3503 return false;
3505 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3506 expand using the [su]divv optabs. */
3507 if (TYPE_OVERFLOW_TRAPS (type))
3508 return false;
3510 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3511 return false;
3513 return true;
3516 /* This function looks for:
3517 t1 = a TRUNC_DIV_EXPR b;
3518 t2 = a TRUNC_MOD_EXPR b;
3519 and transforms it to the following sequence:
3520 complex_tmp = DIVMOD (a, b);
3521 t1 = REALPART_EXPR(a);
3522 t2 = IMAGPART_EXPR(b);
3523 For conditions enabling the transform see divmod_candidate_p().
3525 The pass has three parts:
3526 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3527 other trunc_div_expr and trunc_mod_expr stmts.
3528 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3529 to stmts vector.
3530 3) Insert DIVMOD call just before top_stmt and update entries in
3531 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3532 IMAGPART_EXPR for mod). */
3534 static bool
3535 convert_to_divmod (gassign *stmt)
3537 if (stmt_can_throw_internal (stmt)
3538 || !divmod_candidate_p (stmt))
3539 return false;
3541 tree op1 = gimple_assign_rhs1 (stmt);
3542 tree op2 = gimple_assign_rhs2 (stmt);
3544 imm_use_iterator use_iter;
3545 gimple *use_stmt;
3546 auto_vec<gimple *> stmts;
3548 gimple *top_stmt = stmt;
3549 basic_block top_bb = gimple_bb (stmt);
3551 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3552 at-least stmt and possibly other trunc_div/trunc_mod stmts
3553 having same operands as stmt. */
3555 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3557 if (is_gimple_assign (use_stmt)
3558 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3559 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3560 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3561 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3563 if (stmt_can_throw_internal (use_stmt))
3564 continue;
3566 basic_block bb = gimple_bb (use_stmt);
3568 if (bb == top_bb)
3570 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3571 top_stmt = use_stmt;
3573 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3575 top_bb = bb;
3576 top_stmt = use_stmt;
3581 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3582 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3584 stmts.safe_push (top_stmt);
3585 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3587 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3588 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3589 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3590 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3592 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3594 if (is_gimple_assign (use_stmt)
3595 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3596 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3597 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3598 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3600 if (use_stmt == top_stmt
3601 || stmt_can_throw_internal (use_stmt)
3602 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3603 continue;
3605 stmts.safe_push (use_stmt);
3606 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3607 div_seen = true;
3611 if (!div_seen)
3612 return false;
3614 /* Part 3: Create libcall to internal fn DIVMOD:
3615 divmod_tmp = DIVMOD (op1, op2). */
3617 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3618 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3619 call_stmt, "divmod_tmp");
3620 gimple_call_set_lhs (call_stmt, res);
3621 /* We rejected throwing statements above. */
3622 gimple_call_set_nothrow (call_stmt, true);
3624 /* Insert the call before top_stmt. */
3625 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3626 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3628 widen_mul_stats.divmod_calls_inserted++;
3630 /* Update all statements in stmts vector:
3631 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3632 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3634 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3636 tree new_rhs;
3638 switch (gimple_assign_rhs_code (use_stmt))
3640 case TRUNC_DIV_EXPR:
3641 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
3642 break;
3644 case TRUNC_MOD_EXPR:
3645 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
3646 break;
3648 default:
3649 gcc_unreachable ();
3652 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3653 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
3654 update_stmt (use_stmt);
3657 return true;
3660 /* Find integer multiplications where the operands are extended from
3661 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3662 where appropriate. */
3664 namespace {
3666 const pass_data pass_data_optimize_widening_mul =
3668 GIMPLE_PASS, /* type */
3669 "widening_mul", /* name */
3670 OPTGROUP_NONE, /* optinfo_flags */
3671 TV_TREE_WIDEN_MUL, /* tv_id */
3672 PROP_ssa, /* properties_required */
3673 0, /* properties_provided */
3674 0, /* properties_destroyed */
3675 0, /* todo_flags_start */
3676 TODO_update_ssa, /* todo_flags_finish */
3679 class pass_optimize_widening_mul : public gimple_opt_pass
3681 public:
3682 pass_optimize_widening_mul (gcc::context *ctxt)
3683 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3686 /* opt_pass methods: */
3687 virtual bool gate (function *)
3689 return flag_expensive_optimizations && optimize;
3692 virtual unsigned int execute (function *);
3694 }; // class pass_optimize_widening_mul
3696 /* Walker class to perform the transformation in reverse dominance order. */
3698 class math_opts_dom_walker : public dom_walker
3700 public:
3701 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
3702 if walking modidifes the CFG. */
3704 math_opts_dom_walker (bool *cfg_changed_p)
3705 : dom_walker (CDI_DOMINATORS), m_last_result_set (),
3706 m_cfg_changed_p (cfg_changed_p) {}
3708 /* The actual actions performed in the walk. */
3710 virtual void after_dom_children (basic_block);
3712 /* Set of results of chains of multiply and add statement combinations that
3713 were not transformed into FMAs because of active deferring. */
3714 hash_set<tree> m_last_result_set;
3716 /* Pointer to a flag of the user that needs to be set if CFG has been
3717 modified. */
3718 bool *m_cfg_changed_p;
3721 void
3722 math_opts_dom_walker::after_dom_children (basic_block bb)
3724 gimple_stmt_iterator gsi;
3726 fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
3728 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3730 gimple *stmt = gsi_stmt (gsi);
3731 enum tree_code code;
3733 if (is_gimple_assign (stmt))
3735 code = gimple_assign_rhs_code (stmt);
3736 switch (code)
3738 case MULT_EXPR:
3739 if (!convert_mult_to_widen (stmt, &gsi)
3740 && !convert_expand_mult_copysign (stmt, &gsi)
3741 && convert_mult_to_fma (stmt,
3742 gimple_assign_rhs1 (stmt),
3743 gimple_assign_rhs2 (stmt),
3744 &fma_state))
3746 gsi_remove (&gsi, true);
3747 release_defs (stmt);
3748 continue;
3750 break;
3752 case PLUS_EXPR:
3753 case MINUS_EXPR:
3754 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3755 match_uaddsub_overflow (&gsi, stmt, code);
3756 break;
3758 case TRUNC_MOD_EXPR:
3759 convert_to_divmod (as_a<gassign *> (stmt));
3760 break;
3762 default:;
3765 else if (is_gimple_call (stmt))
3767 tree fndecl = gimple_call_fndecl (stmt);
3768 if (fndecl && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3770 switch (DECL_FUNCTION_CODE (fndecl))
3772 case BUILT_IN_POWF:
3773 case BUILT_IN_POW:
3774 case BUILT_IN_POWL:
3775 if (gimple_call_lhs (stmt)
3776 && TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3777 && real_equal
3778 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3779 &dconst2)
3780 && convert_mult_to_fma (stmt,
3781 gimple_call_arg (stmt, 0),
3782 gimple_call_arg (stmt, 0),
3783 &fma_state))
3785 unlink_stmt_vdef (stmt);
3786 if (gsi_remove (&gsi, true)
3787 && gimple_purge_dead_eh_edges (bb))
3788 *m_cfg_changed_p = true;
3789 release_defs (stmt);
3790 continue;
3792 break;
3794 default:;
3797 else
3798 cancel_fma_deferring (&fma_state);
3800 gsi_next (&gsi);
3802 if (fma_state.m_deferring_p
3803 && fma_state.m_initial_phi)
3805 gcc_checking_assert (fma_state.m_last_result);
3806 if (!last_fma_candidate_feeds_initial_phi (&fma_state,
3807 &m_last_result_set))
3808 cancel_fma_deferring (&fma_state);
3809 else
3810 m_last_result_set.add (fma_state.m_last_result);
3815 unsigned int
3816 pass_optimize_widening_mul::execute (function *fun)
3818 bool cfg_changed = false;
3820 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3821 calculate_dominance_info (CDI_DOMINATORS);
3822 renumber_gimple_stmt_uids ();
3824 math_opts_dom_walker (&cfg_changed).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
3826 statistics_counter_event (fun, "widening multiplications inserted",
3827 widen_mul_stats.widen_mults_inserted);
3828 statistics_counter_event (fun, "widening maccs inserted",
3829 widen_mul_stats.maccs_inserted);
3830 statistics_counter_event (fun, "fused multiply-adds inserted",
3831 widen_mul_stats.fmas_inserted);
3832 statistics_counter_event (fun, "divmod calls inserted",
3833 widen_mul_stats.divmod_calls_inserted);
3835 return cfg_changed ? TODO_cleanup_cfg : 0;
3838 } // anon namespace
3840 gimple_opt_pass *
3841 make_pass_optimize_widening_mul (gcc::context *ctxt)
3843 return new pass_optimize_widening_mul (ctxt);