gcc/ChangeLog:
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blobd21077734e6040d7b0f298bab35ada863f0e916e
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
118 #include "domwalk.h"
120 /* This structure represents one basic block that either computes a
121 division, or is a common dominator for basic block that compute a
122 division. */
123 struct occurrence {
124 /* The basic block represented by this structure. */
125 basic_block bb;
127 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
128 inserted in BB. */
129 tree recip_def;
131 /* If non-NULL, the SSA_NAME holding the definition for a squared
132 reciprocal inserted in BB. */
133 tree square_recip_def;
135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
136 was inserted in BB. */
137 gimple *recip_def_stmt;
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 by BB. */
141 struct occurrence *children;
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence *next;
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
149 compute_merit. */
150 int num_divisions;
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division;
158 static struct
160 /* Number of 1.0/X ops inserted. */
161 int rdivs_inserted;
163 /* Number of 1.0/FUNC ops inserted. */
164 int rfuncs_inserted;
165 } reciprocal_stats;
167 static struct
169 /* Number of cexpi calls inserted. */
170 int inserted;
171 } sincos_stats;
173 static struct
175 /* Number of widening multiplication ops inserted. */
176 int widen_mults_inserted;
178 /* Number of integer multiply-and-accumulate ops inserted. */
179 int maccs_inserted;
181 /* Number of fp fused multiply-add ops inserted. */
182 int fmas_inserted;
184 /* Number of divmod calls inserted. */
185 int divmod_calls_inserted;
186 } widen_mul_stats;
188 /* The instance of "struct occurrence" representing the highest
189 interesting block in the dominator tree. */
190 static struct occurrence *occ_head;
192 /* Allocation pool for getting instances of "struct occurrence". */
193 static object_allocator<occurrence> *occ_pool;
197 /* Allocate and return a new struct occurrence for basic block BB, and
198 whose children list is headed by CHILDREN. */
199 static struct occurrence *
200 occ_new (basic_block bb, struct occurrence *children)
202 struct occurrence *occ;
204 bb->aux = occ = occ_pool->allocate ();
205 memset (occ, 0, sizeof (struct occurrence));
207 occ->bb = bb;
208 occ->children = children;
209 return occ;
213 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
214 list of "struct occurrence"s, one per basic block, having IDOM as
215 their common dominator.
217 We try to insert NEW_OCC as deep as possible in the tree, and we also
218 insert any other block that is a common dominator for BB and one
219 block already in the tree. */
221 static void
222 insert_bb (struct occurrence *new_occ, basic_block idom,
223 struct occurrence **p_head)
225 struct occurrence *occ, **p_occ;
227 for (p_occ = p_head; (occ = *p_occ) != NULL; )
229 basic_block bb = new_occ->bb, occ_bb = occ->bb;
230 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
231 if (dom == bb)
233 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
234 from its list. */
235 *p_occ = occ->next;
236 occ->next = new_occ->children;
237 new_occ->children = occ;
239 /* Try the next block (it may as well be dominated by BB). */
242 else if (dom == occ_bb)
244 /* OCC_BB dominates BB. Tail recurse to look deeper. */
245 insert_bb (new_occ, dom, &occ->children);
246 return;
249 else if (dom != idom)
251 gcc_assert (!dom->aux);
253 /* There is a dominator between IDOM and BB, add it and make
254 two children out of NEW_OCC and OCC. First, remove OCC from
255 its list. */
256 *p_occ = occ->next;
257 new_occ->next = occ;
258 occ->next = NULL;
260 /* None of the previous blocks has DOM as a dominator: if we tail
261 recursed, we would reexamine them uselessly. Just switch BB with
262 DOM, and go on looking for blocks dominated by DOM. */
263 new_occ = occ_new (dom, new_occ);
266 else
268 /* Nothing special, go on with the next element. */
269 p_occ = &occ->next;
273 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
274 new_occ->next = *p_head;
275 *p_head = new_occ;
278 /* Register that we found a division in BB.
279 IMPORTANCE is a measure of how much weighting to give
280 that division. Use IMPORTANCE = 2 to register a single
281 division. If the division is going to be found multiple
282 times use 1 (as it is with squares). */
284 static inline void
285 register_division_in (basic_block bb, int importance)
287 struct occurrence *occ;
289 occ = (struct occurrence *) bb->aux;
290 if (!occ)
292 occ = occ_new (bb, NULL);
293 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
296 occ->bb_has_division = true;
297 occ->num_divisions += importance;
301 /* Compute the number of divisions that postdominate each block in OCC and
302 its children. */
304 static void
305 compute_merit (struct occurrence *occ)
307 struct occurrence *occ_child;
308 basic_block dom = occ->bb;
310 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
312 basic_block bb;
313 if (occ_child->children)
314 compute_merit (occ_child);
316 if (flag_exceptions)
317 bb = single_noncomplex_succ (dom);
318 else
319 bb = dom;
321 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
322 occ->num_divisions += occ_child->num_divisions;
327 /* Return whether USE_STMT is a floating-point division by DEF. */
328 static inline bool
329 is_division_by (gimple *use_stmt, tree def)
331 return is_gimple_assign (use_stmt)
332 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
333 && gimple_assign_rhs2 (use_stmt) == def
334 /* Do not recognize x / x as valid division, as we are getting
335 confused later by replacing all immediate uses x in such
336 a stmt. */
337 && gimple_assign_rhs1 (use_stmt) != def;
340 /* Return TRUE if USE_STMT is a multiplication of DEF by A. */
341 static inline bool
342 is_mult_by (gimple *use_stmt, tree def, tree a)
344 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
345 && gimple_assign_rhs_code (use_stmt) == MULT_EXPR)
347 tree op0 = gimple_assign_rhs1 (use_stmt);
348 tree op1 = gimple_assign_rhs2 (use_stmt);
350 return (op0 == def && op1 == a)
351 || (op0 == a && op1 == def);
353 return 0;
356 /* Return whether USE_STMT is DEF * DEF. */
357 static inline bool
358 is_square_of (gimple *use_stmt, tree def)
360 return is_mult_by (use_stmt, def, def);
363 /* Return whether USE_STMT is a floating-point division by
364 DEF * DEF. */
365 static inline bool
366 is_division_by_square (gimple *use_stmt, tree def)
368 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
369 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
370 && gimple_assign_rhs1 (use_stmt) != gimple_assign_rhs2 (use_stmt))
372 tree denominator = gimple_assign_rhs2 (use_stmt);
373 if (TREE_CODE (denominator) == SSA_NAME)
375 return is_square_of (SSA_NAME_DEF_STMT (denominator), def);
378 return 0;
381 /* Walk the subset of the dominator tree rooted at OCC, setting the
382 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
383 the given basic block. The field may be left NULL, of course,
384 if it is not possible or profitable to do the optimization.
386 DEF_BSI is an iterator pointing at the statement defining DEF.
387 If RECIP_DEF is set, a dominator already has a computation that can
388 be used.
390 If should_insert_square_recip is set, then this also inserts
391 the square of the reciprocal immediately after the definition
392 of the reciprocal. */
394 static void
395 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
396 tree def, tree recip_def, tree square_recip_def,
397 int should_insert_square_recip, int threshold)
399 tree type;
400 gassign *new_stmt, *new_square_stmt;
401 gimple_stmt_iterator gsi;
402 struct occurrence *occ_child;
404 if (!recip_def
405 && (occ->bb_has_division || !flag_trapping_math)
406 /* Divide by two as all divisions are counted twice in
407 the costing loop. */
408 && occ->num_divisions / 2 >= threshold)
410 /* Make a variable with the replacement and substitute it. */
411 type = TREE_TYPE (def);
412 recip_def = create_tmp_reg (type, "reciptmp");
413 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
414 build_one_cst (type), def);
416 if (should_insert_square_recip)
418 square_recip_def = create_tmp_reg (type, "powmult_reciptmp");
419 new_square_stmt = gimple_build_assign (square_recip_def, MULT_EXPR,
420 recip_def, recip_def);
423 if (occ->bb_has_division)
425 /* Case 1: insert before an existing division. */
426 gsi = gsi_after_labels (occ->bb);
427 while (!gsi_end_p (gsi)
428 && (!is_division_by (gsi_stmt (gsi), def))
429 && (!is_division_by_square (gsi_stmt (gsi), def)))
430 gsi_next (&gsi);
432 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
433 if (should_insert_square_recip)
434 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
436 else if (def_gsi && occ->bb == def_gsi->bb)
438 /* Case 2: insert right after the definition. Note that this will
439 never happen if the definition statement can throw, because in
440 that case the sole successor of the statement's basic block will
441 dominate all the uses as well. */
442 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
443 if (should_insert_square_recip)
444 gsi_insert_after (def_gsi, new_square_stmt, GSI_NEW_STMT);
446 else
448 /* Case 3: insert in a basic block not containing defs/uses. */
449 gsi = gsi_after_labels (occ->bb);
450 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
451 if (should_insert_square_recip)
452 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
455 reciprocal_stats.rdivs_inserted++;
457 occ->recip_def_stmt = new_stmt;
460 occ->recip_def = recip_def;
461 occ->square_recip_def = square_recip_def;
462 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
463 insert_reciprocals (def_gsi, occ_child, def, recip_def,
464 square_recip_def, should_insert_square_recip,
465 threshold);
468 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
469 Take as argument the use for (x * x). */
470 static inline void
471 replace_reciprocal_squares (use_operand_p use_p)
473 gimple *use_stmt = USE_STMT (use_p);
474 basic_block bb = gimple_bb (use_stmt);
475 struct occurrence *occ = (struct occurrence *) bb->aux;
477 if (optimize_bb_for_speed_p (bb) && occ->square_recip_def
478 && occ->recip_def)
480 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
481 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
482 gimple_assign_set_rhs2 (use_stmt, occ->square_recip_def);
483 SET_USE (use_p, occ->square_recip_def);
484 fold_stmt_inplace (&gsi);
485 update_stmt (use_stmt);
490 /* Replace the division at USE_P with a multiplication by the reciprocal, if
491 possible. */
493 static inline void
494 replace_reciprocal (use_operand_p use_p)
496 gimple *use_stmt = USE_STMT (use_p);
497 basic_block bb = gimple_bb (use_stmt);
498 struct occurrence *occ = (struct occurrence *) bb->aux;
500 if (optimize_bb_for_speed_p (bb)
501 && occ->recip_def && use_stmt != occ->recip_def_stmt)
503 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
504 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
505 SET_USE (use_p, occ->recip_def);
506 fold_stmt_inplace (&gsi);
507 update_stmt (use_stmt);
512 /* Free OCC and return one more "struct occurrence" to be freed. */
514 static struct occurrence *
515 free_bb (struct occurrence *occ)
517 struct occurrence *child, *next;
519 /* First get the two pointers hanging off OCC. */
520 next = occ->next;
521 child = occ->children;
522 occ->bb->aux = NULL;
523 occ_pool->remove (occ);
525 /* Now ensure that we don't recurse unless it is necessary. */
526 if (!child)
527 return next;
528 else
530 while (next)
531 next = free_bb (next);
533 return child;
537 /* Transform sequences like
538 t = sqrt (a)
539 x = 1.0 / t;
540 r1 = x * x;
541 r2 = a * x;
542 into:
543 t = sqrt (a)
544 r1 = 1.0 / a;
545 r2 = t;
546 x = r1 * r2;
547 depending on the uses of x, r1, r2. This removes one multiplication and
548 allows the sqrt and division operations to execute in parallel.
549 DEF_GSI is the gsi of the initial division by sqrt that defines
550 DEF (x in the example above). */
552 static void
553 optimize_recip_sqrt (gimple_stmt_iterator *def_gsi, tree def)
555 gimple *use_stmt;
556 imm_use_iterator use_iter;
557 gimple *stmt = gsi_stmt (*def_gsi);
558 tree x = def;
559 tree orig_sqrt_ssa_name = gimple_assign_rhs2 (stmt);
560 tree div_rhs1 = gimple_assign_rhs1 (stmt);
562 if (TREE_CODE (orig_sqrt_ssa_name) != SSA_NAME
563 || TREE_CODE (div_rhs1) != REAL_CST
564 || !real_equal (&TREE_REAL_CST (div_rhs1), &dconst1))
565 return;
567 gcall *sqrt_stmt
568 = dyn_cast <gcall *> (SSA_NAME_DEF_STMT (orig_sqrt_ssa_name));
570 if (!sqrt_stmt || !gimple_call_lhs (sqrt_stmt))
571 return;
573 switch (gimple_call_combined_fn (sqrt_stmt))
575 CASE_CFN_SQRT:
576 CASE_CFN_SQRT_FN:
577 break;
579 default:
580 return;
582 tree a = gimple_call_arg (sqrt_stmt, 0);
584 /* We have 'a' and 'x'. Now analyze the uses of 'x'. */
586 /* Statements that use x in x * x. */
587 auto_vec<gimple *> sqr_stmts;
588 /* Statements that use x in a * x. */
589 auto_vec<gimple *> mult_stmts;
590 bool has_other_use = false;
591 bool mult_on_main_path = false;
593 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, x)
595 if (is_gimple_debug (use_stmt))
596 continue;
597 if (is_square_of (use_stmt, x))
599 sqr_stmts.safe_push (use_stmt);
600 if (gimple_bb (use_stmt) == gimple_bb (stmt))
601 mult_on_main_path = true;
603 else if (is_mult_by (use_stmt, x, a))
605 mult_stmts.safe_push (use_stmt);
606 if (gimple_bb (use_stmt) == gimple_bb (stmt))
607 mult_on_main_path = true;
609 else
610 has_other_use = true;
613 /* In the x * x and a * x cases we just rewire stmt operands or
614 remove multiplications. In the has_other_use case we introduce
615 a multiplication so make sure we don't introduce a multiplication
616 on a path where there was none. */
617 if (has_other_use && !mult_on_main_path)
618 return;
620 if (sqr_stmts.is_empty () && mult_stmts.is_empty ())
621 return;
623 /* If x = 1.0 / sqrt (a) has uses other than those optimized here we want
624 to be able to compose it from the sqr and mult cases. */
625 if (has_other_use && (sqr_stmts.is_empty () || mult_stmts.is_empty ()))
626 return;
628 if (dump_file)
630 fprintf (dump_file, "Optimizing reciprocal sqrt multiplications of\n");
631 print_gimple_stmt (dump_file, sqrt_stmt, 0, TDF_NONE);
632 print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
633 fprintf (dump_file, "\n");
636 bool delete_div = !has_other_use;
637 tree sqr_ssa_name = NULL_TREE;
638 if (!sqr_stmts.is_empty ())
640 /* r1 = x * x. Transform the original
641 x = 1.0 / t
642 into
643 tmp1 = 1.0 / a
644 r1 = tmp1. */
646 sqr_ssa_name
647 = make_temp_ssa_name (TREE_TYPE (a), NULL, "recip_sqrt_sqr");
649 if (dump_file)
651 fprintf (dump_file, "Replacing original division\n");
652 print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
653 fprintf (dump_file, "with new division\n");
655 stmt
656 = gimple_build_assign (sqr_ssa_name, gimple_assign_rhs_code (stmt),
657 gimple_assign_rhs1 (stmt), a);
658 gsi_insert_before (def_gsi, stmt, GSI_SAME_STMT);
659 gsi_remove (def_gsi, true);
660 *def_gsi = gsi_for_stmt (stmt);
661 fold_stmt_inplace (def_gsi);
662 update_stmt (stmt);
664 if (dump_file)
665 print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
667 delete_div = false;
668 gimple *sqr_stmt;
669 unsigned int i;
670 FOR_EACH_VEC_ELT (sqr_stmts, i, sqr_stmt)
672 gimple_stmt_iterator gsi2 = gsi_for_stmt (sqr_stmt);
673 gimple_assign_set_rhs_from_tree (&gsi2, sqr_ssa_name);
674 update_stmt (sqr_stmt);
677 if (!mult_stmts.is_empty ())
679 /* r2 = a * x. Transform this into:
680 r2 = t (The original sqrt (a)). */
681 unsigned int i;
682 gimple *mult_stmt = NULL;
683 FOR_EACH_VEC_ELT (mult_stmts, i, mult_stmt)
685 gimple_stmt_iterator gsi2 = gsi_for_stmt (mult_stmt);
687 if (dump_file)
689 fprintf (dump_file, "Replacing squaring multiplication\n");
690 print_gimple_stmt (dump_file, mult_stmt, 0, TDF_NONE);
691 fprintf (dump_file, "with assignment\n");
693 gimple_assign_set_rhs_from_tree (&gsi2, orig_sqrt_ssa_name);
694 fold_stmt_inplace (&gsi2);
695 update_stmt (mult_stmt);
696 if (dump_file)
697 print_gimple_stmt (dump_file, mult_stmt, 0, TDF_NONE);
701 if (has_other_use)
703 /* Using the two temporaries tmp1, tmp2 from above
704 the original x is now:
705 x = tmp1 * tmp2. */
706 gcc_assert (orig_sqrt_ssa_name);
707 gcc_assert (sqr_ssa_name);
709 gimple *new_stmt
710 = gimple_build_assign (x, MULT_EXPR,
711 orig_sqrt_ssa_name, sqr_ssa_name);
712 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
713 update_stmt (stmt);
715 else if (delete_div)
717 /* Remove the original division. */
718 gimple_stmt_iterator gsi2 = gsi_for_stmt (stmt);
719 gsi_remove (&gsi2, true);
720 release_defs (stmt);
722 else
723 release_ssa_name (x);
726 /* Look for floating-point divisions among DEF's uses, and try to
727 replace them by multiplications with the reciprocal. Add
728 as many statements computing the reciprocal as needed.
730 DEF must be a GIMPLE register of a floating-point type. */
732 static void
733 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
735 use_operand_p use_p, square_use_p;
736 imm_use_iterator use_iter, square_use_iter;
737 tree square_def;
738 struct occurrence *occ;
739 int count = 0;
740 int threshold;
741 int square_recip_count = 0;
742 int sqrt_recip_count = 0;
744 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && TREE_CODE (def) == SSA_NAME);
745 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
747 /* If DEF is a square (x * x), count the number of divisions by x.
748 If there are more divisions by x than by (DEF * DEF), prefer to optimize
749 the reciprocal of x instead of DEF. This improves cases like:
750 def = x * x
751 t0 = a / def
752 t1 = b / def
753 t2 = c / x
754 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
755 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
757 if (is_gimple_assign (def_stmt)
758 && gimple_assign_rhs_code (def_stmt) == MULT_EXPR
759 && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
760 && gimple_assign_rhs1 (def_stmt) == gimple_assign_rhs2 (def_stmt))
762 tree op0 = gimple_assign_rhs1 (def_stmt);
764 FOR_EACH_IMM_USE_FAST (use_p, use_iter, op0)
766 gimple *use_stmt = USE_STMT (use_p);
767 if (is_division_by (use_stmt, op0))
768 sqrt_recip_count++;
772 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
774 gimple *use_stmt = USE_STMT (use_p);
775 if (is_division_by (use_stmt, def))
777 register_division_in (gimple_bb (use_stmt), 2);
778 count++;
781 if (is_square_of (use_stmt, def))
783 square_def = gimple_assign_lhs (use_stmt);
784 FOR_EACH_IMM_USE_FAST (square_use_p, square_use_iter, square_def)
786 gimple *square_use_stmt = USE_STMT (square_use_p);
787 if (is_division_by (square_use_stmt, square_def))
789 /* This is executed twice for each division by a square. */
790 register_division_in (gimple_bb (square_use_stmt), 1);
791 square_recip_count++;
797 /* Square reciprocals were counted twice above. */
798 square_recip_count /= 2;
800 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
801 if (sqrt_recip_count > square_recip_count)
802 return;
804 /* Do the expensive part only if we can hope to optimize something. */
805 if (count + square_recip_count >= threshold && count >= 1)
807 gimple *use_stmt;
808 for (occ = occ_head; occ; occ = occ->next)
810 compute_merit (occ);
811 insert_reciprocals (def_gsi, occ, def, NULL, NULL,
812 square_recip_count, threshold);
815 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
817 if (is_division_by (use_stmt, def))
819 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
820 replace_reciprocal (use_p);
822 else if (square_recip_count > 0 && is_square_of (use_stmt, def))
824 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
826 /* Find all uses of the square that are divisions and
827 * replace them by multiplications with the inverse. */
828 imm_use_iterator square_iterator;
829 gimple *powmult_use_stmt = USE_STMT (use_p);
830 tree powmult_def_name = gimple_assign_lhs (powmult_use_stmt);
832 FOR_EACH_IMM_USE_STMT (powmult_use_stmt,
833 square_iterator, powmult_def_name)
834 FOR_EACH_IMM_USE_ON_STMT (square_use_p, square_iterator)
836 gimple *powmult_use_stmt = USE_STMT (square_use_p);
837 if (is_division_by (powmult_use_stmt, powmult_def_name))
838 replace_reciprocal_squares (square_use_p);
845 for (occ = occ_head; occ; )
846 occ = free_bb (occ);
848 occ_head = NULL;
851 /* Return an internal function that implements the reciprocal of CALL,
852 or IFN_LAST if there is no such function that the target supports. */
854 internal_fn
855 internal_fn_reciprocal (gcall *call)
857 internal_fn ifn;
859 switch (gimple_call_combined_fn (call))
861 CASE_CFN_SQRT:
862 CASE_CFN_SQRT_FN:
863 ifn = IFN_RSQRT;
864 break;
866 default:
867 return IFN_LAST;
870 tree_pair types = direct_internal_fn_types (ifn, call);
871 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
872 return IFN_LAST;
874 return ifn;
877 /* Go through all the floating-point SSA_NAMEs, and call
878 execute_cse_reciprocals_1 on each of them. */
879 namespace {
881 const pass_data pass_data_cse_reciprocals =
883 GIMPLE_PASS, /* type */
884 "recip", /* name */
885 OPTGROUP_NONE, /* optinfo_flags */
886 TV_TREE_RECIP, /* tv_id */
887 PROP_ssa, /* properties_required */
888 0, /* properties_provided */
889 0, /* properties_destroyed */
890 0, /* todo_flags_start */
891 TODO_update_ssa, /* todo_flags_finish */
894 class pass_cse_reciprocals : public gimple_opt_pass
896 public:
897 pass_cse_reciprocals (gcc::context *ctxt)
898 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
901 /* opt_pass methods: */
902 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
903 virtual unsigned int execute (function *);
905 }; // class pass_cse_reciprocals
907 unsigned int
908 pass_cse_reciprocals::execute (function *fun)
910 basic_block bb;
911 tree arg;
913 occ_pool = new object_allocator<occurrence> ("dominators for recip");
915 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
916 calculate_dominance_info (CDI_DOMINATORS);
917 calculate_dominance_info (CDI_POST_DOMINATORS);
919 if (flag_checking)
920 FOR_EACH_BB_FN (bb, fun)
921 gcc_assert (!bb->aux);
923 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
924 if (FLOAT_TYPE_P (TREE_TYPE (arg))
925 && is_gimple_reg (arg))
927 tree name = ssa_default_def (fun, arg);
928 if (name)
929 execute_cse_reciprocals_1 (NULL, name);
932 FOR_EACH_BB_FN (bb, fun)
934 tree def;
936 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
937 gsi_next (&gsi))
939 gphi *phi = gsi.phi ();
940 def = PHI_RESULT (phi);
941 if (! virtual_operand_p (def)
942 && FLOAT_TYPE_P (TREE_TYPE (def)))
943 execute_cse_reciprocals_1 (NULL, def);
946 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
947 gsi_next (&gsi))
949 gimple *stmt = gsi_stmt (gsi);
951 if (gimple_has_lhs (stmt)
952 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
953 && FLOAT_TYPE_P (TREE_TYPE (def))
954 && TREE_CODE (def) == SSA_NAME)
956 execute_cse_reciprocals_1 (&gsi, def);
957 stmt = gsi_stmt (gsi);
958 if (flag_unsafe_math_optimizations
959 && is_gimple_assign (stmt)
960 && gimple_assign_lhs (stmt) == def
961 && !stmt_can_throw_internal (cfun, stmt)
962 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
963 optimize_recip_sqrt (&gsi, def);
967 if (optimize_bb_for_size_p (bb))
968 continue;
970 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
971 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
972 gsi_next (&gsi))
974 gimple *stmt = gsi_stmt (gsi);
976 if (is_gimple_assign (stmt)
977 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
979 tree arg1 = gimple_assign_rhs2 (stmt);
980 gimple *stmt1;
982 if (TREE_CODE (arg1) != SSA_NAME)
983 continue;
985 stmt1 = SSA_NAME_DEF_STMT (arg1);
987 if (is_gimple_call (stmt1)
988 && gimple_call_lhs (stmt1))
990 bool fail;
991 imm_use_iterator ui;
992 use_operand_p use_p;
993 tree fndecl = NULL_TREE;
995 gcall *call = as_a <gcall *> (stmt1);
996 internal_fn ifn = internal_fn_reciprocal (call);
997 if (ifn == IFN_LAST)
999 fndecl = gimple_call_fndecl (call);
1000 if (!fndecl
1001 || !fndecl_built_in_p (fndecl, BUILT_IN_MD))
1002 continue;
1003 fndecl = targetm.builtin_reciprocal (fndecl);
1004 if (!fndecl)
1005 continue;
1008 /* Check that all uses of the SSA name are divisions,
1009 otherwise replacing the defining statement will do
1010 the wrong thing. */
1011 fail = false;
1012 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
1014 gimple *stmt2 = USE_STMT (use_p);
1015 if (is_gimple_debug (stmt2))
1016 continue;
1017 if (!is_gimple_assign (stmt2)
1018 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
1019 || gimple_assign_rhs1 (stmt2) == arg1
1020 || gimple_assign_rhs2 (stmt2) != arg1)
1022 fail = true;
1023 break;
1026 if (fail)
1027 continue;
1029 gimple_replace_ssa_lhs (call, arg1);
1030 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
1032 auto_vec<tree, 4> args;
1033 for (unsigned int i = 0;
1034 i < gimple_call_num_args (call); i++)
1035 args.safe_push (gimple_call_arg (call, i));
1036 gcall *stmt2;
1037 if (ifn == IFN_LAST)
1038 stmt2 = gimple_build_call_vec (fndecl, args);
1039 else
1040 stmt2 = gimple_build_call_internal_vec (ifn, args);
1041 gimple_call_set_lhs (stmt2, arg1);
1042 if (gimple_vdef (call))
1044 gimple_set_vdef (stmt2, gimple_vdef (call));
1045 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
1047 gimple_call_set_nothrow (stmt2,
1048 gimple_call_nothrow_p (call));
1049 gimple_set_vuse (stmt2, gimple_vuse (call));
1050 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
1051 gsi_replace (&gsi2, stmt2, true);
1053 else
1055 if (ifn == IFN_LAST)
1056 gimple_call_set_fndecl (call, fndecl);
1057 else
1058 gimple_call_set_internal_fn (call, ifn);
1059 update_stmt (call);
1061 reciprocal_stats.rfuncs_inserted++;
1063 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
1065 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1066 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
1067 fold_stmt_inplace (&gsi);
1068 update_stmt (stmt);
1075 statistics_counter_event (fun, "reciprocal divs inserted",
1076 reciprocal_stats.rdivs_inserted);
1077 statistics_counter_event (fun, "reciprocal functions inserted",
1078 reciprocal_stats.rfuncs_inserted);
1080 free_dominance_info (CDI_DOMINATORS);
1081 free_dominance_info (CDI_POST_DOMINATORS);
1082 delete occ_pool;
1083 return 0;
1086 } // anon namespace
1088 gimple_opt_pass *
1089 make_pass_cse_reciprocals (gcc::context *ctxt)
1091 return new pass_cse_reciprocals (ctxt);
1094 /* Records an occurrence at statement USE_STMT in the vector of trees
1095 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
1096 is not yet initialized. Returns true if the occurrence was pushed on
1097 the vector. Adjusts *TOP_BB to be the basic block dominating all
1098 statements in the vector. */
1100 static bool
1101 maybe_record_sincos (vec<gimple *> *stmts,
1102 basic_block *top_bb, gimple *use_stmt)
1104 basic_block use_bb = gimple_bb (use_stmt);
1105 if (*top_bb
1106 && (*top_bb == use_bb
1107 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
1108 stmts->safe_push (use_stmt);
1109 else if (!*top_bb
1110 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
1112 stmts->safe_push (use_stmt);
1113 *top_bb = use_bb;
1115 else
1116 return false;
1118 return true;
1121 /* Look for sin, cos and cexpi calls with the same argument NAME and
1122 create a single call to cexpi CSEing the result in this case.
1123 We first walk over all immediate uses of the argument collecting
1124 statements that we can CSE in a vector and in a second pass replace
1125 the statement rhs with a REALPART or IMAGPART expression on the
1126 result of the cexpi call we insert before the use statement that
1127 dominates all other candidates. */
1129 static bool
1130 execute_cse_sincos_1 (tree name)
1132 gimple_stmt_iterator gsi;
1133 imm_use_iterator use_iter;
1134 tree fndecl, res, type;
1135 gimple *def_stmt, *use_stmt, *stmt;
1136 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
1137 auto_vec<gimple *> stmts;
1138 basic_block top_bb = NULL;
1139 int i;
1140 bool cfg_changed = false;
1142 type = TREE_TYPE (name);
1143 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
1145 if (gimple_code (use_stmt) != GIMPLE_CALL
1146 || !gimple_call_lhs (use_stmt))
1147 continue;
1149 switch (gimple_call_combined_fn (use_stmt))
1151 CASE_CFN_COS:
1152 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
1153 break;
1155 CASE_CFN_SIN:
1156 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
1157 break;
1159 CASE_CFN_CEXPI:
1160 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
1161 break;
1163 default:;
1167 if (seen_cos + seen_sin + seen_cexpi <= 1)
1168 return false;
1170 /* Simply insert cexpi at the beginning of top_bb but not earlier than
1171 the name def statement. */
1172 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
1173 if (!fndecl)
1174 return false;
1175 stmt = gimple_build_call (fndecl, 1, name);
1176 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
1177 gimple_call_set_lhs (stmt, res);
1179 def_stmt = SSA_NAME_DEF_STMT (name);
1180 if (!SSA_NAME_IS_DEFAULT_DEF (name)
1181 && gimple_code (def_stmt) != GIMPLE_PHI
1182 && gimple_bb (def_stmt) == top_bb)
1184 gsi = gsi_for_stmt (def_stmt);
1185 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
1187 else
1189 gsi = gsi_after_labels (top_bb);
1190 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1192 sincos_stats.inserted++;
1194 /* And adjust the recorded old call sites. */
1195 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
1197 tree rhs = NULL;
1199 switch (gimple_call_combined_fn (use_stmt))
1201 CASE_CFN_COS:
1202 rhs = fold_build1 (REALPART_EXPR, type, res);
1203 break;
1205 CASE_CFN_SIN:
1206 rhs = fold_build1 (IMAGPART_EXPR, type, res);
1207 break;
1209 CASE_CFN_CEXPI:
1210 rhs = res;
1211 break;
1213 default:;
1214 gcc_unreachable ();
1217 /* Replace call with a copy. */
1218 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
1220 gsi = gsi_for_stmt (use_stmt);
1221 gsi_replace (&gsi, stmt, true);
1222 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
1223 cfg_changed = true;
1226 return cfg_changed;
1229 /* To evaluate powi(x,n), the floating point value x raised to the
1230 constant integer exponent n, we use a hybrid algorithm that
1231 combines the "window method" with look-up tables. For an
1232 introduction to exponentiation algorithms and "addition chains",
1233 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1234 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1235 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1236 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1238 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1239 multiplications to inline before calling the system library's pow
1240 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1241 so this default never requires calling pow, powf or powl. */
1243 #ifndef POWI_MAX_MULTS
1244 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1245 #endif
1247 /* The size of the "optimal power tree" lookup table. All
1248 exponents less than this value are simply looked up in the
1249 powi_table below. This threshold is also used to size the
1250 cache of pseudo registers that hold intermediate results. */
1251 #define POWI_TABLE_SIZE 256
1253 /* The size, in bits of the window, used in the "window method"
1254 exponentiation algorithm. This is equivalent to a radix of
1255 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1256 #define POWI_WINDOW_SIZE 3
1258 /* The following table is an efficient representation of an
1259 "optimal power tree". For each value, i, the corresponding
1260 value, j, in the table states than an optimal evaluation
1261 sequence for calculating pow(x,i) can be found by evaluating
1262 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1263 100 integers is given in Knuth's "Seminumerical algorithms". */
1265 static const unsigned char powi_table[POWI_TABLE_SIZE] =
1267 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1268 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1269 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1270 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1271 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1272 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1273 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1274 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1275 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1276 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1277 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1278 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1279 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1280 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1281 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1282 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1283 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1284 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1285 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1286 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1287 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1288 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1289 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1290 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1291 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1292 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1293 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1294 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1295 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1296 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1297 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1298 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1302 /* Return the number of multiplications required to calculate
1303 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1304 subroutine of powi_cost. CACHE is an array indicating
1305 which exponents have already been calculated. */
1307 static int
1308 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
1310 /* If we've already calculated this exponent, then this evaluation
1311 doesn't require any additional multiplications. */
1312 if (cache[n])
1313 return 0;
1315 cache[n] = true;
1316 return powi_lookup_cost (n - powi_table[n], cache)
1317 + powi_lookup_cost (powi_table[n], cache) + 1;
1320 /* Return the number of multiplications required to calculate
1321 powi(x,n) for an arbitrary x, given the exponent N. This
1322 function needs to be kept in sync with powi_as_mults below. */
1324 static int
1325 powi_cost (HOST_WIDE_INT n)
1327 bool cache[POWI_TABLE_SIZE];
1328 unsigned HOST_WIDE_INT digit;
1329 unsigned HOST_WIDE_INT val;
1330 int result;
1332 if (n == 0)
1333 return 0;
1335 /* Ignore the reciprocal when calculating the cost. */
1336 val = (n < 0) ? -n : n;
1338 /* Initialize the exponent cache. */
1339 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
1340 cache[1] = true;
1342 result = 0;
1344 while (val >= POWI_TABLE_SIZE)
1346 if (val & 1)
1348 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
1349 result += powi_lookup_cost (digit, cache)
1350 + POWI_WINDOW_SIZE + 1;
1351 val >>= POWI_WINDOW_SIZE;
1353 else
1355 val >>= 1;
1356 result++;
1360 return result + powi_lookup_cost (val, cache);
1363 /* Recursive subroutine of powi_as_mults. This function takes the
1364 array, CACHE, of already calculated exponents and an exponent N and
1365 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1367 static tree
1368 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1369 HOST_WIDE_INT n, tree *cache)
1371 tree op0, op1, ssa_target;
1372 unsigned HOST_WIDE_INT digit;
1373 gassign *mult_stmt;
1375 if (n < POWI_TABLE_SIZE && cache[n])
1376 return cache[n];
1378 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1380 if (n < POWI_TABLE_SIZE)
1382 cache[n] = ssa_target;
1383 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1384 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1386 else if (n & 1)
1388 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1389 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1390 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1392 else
1394 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1395 op1 = op0;
1398 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1399 gimple_set_location (mult_stmt, loc);
1400 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1402 return ssa_target;
1405 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1406 This function needs to be kept in sync with powi_cost above. */
1408 static tree
1409 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1410 tree arg0, HOST_WIDE_INT n)
1412 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1413 gassign *div_stmt;
1414 tree target;
1416 if (n == 0)
1417 return build_real (type, dconst1);
1419 memset (cache, 0, sizeof (cache));
1420 cache[1] = arg0;
1422 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1423 if (n >= 0)
1424 return result;
1426 /* If the original exponent was negative, reciprocate the result. */
1427 target = make_temp_ssa_name (type, NULL, "powmult");
1428 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1429 build_real (type, dconst1), result);
1430 gimple_set_location (div_stmt, loc);
1431 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1433 return target;
1436 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1437 location info LOC. If the arguments are appropriate, create an
1438 equivalent sequence of statements prior to GSI using an optimal
1439 number of multiplications, and return an expession holding the
1440 result. */
1442 static tree
1443 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1444 tree arg0, HOST_WIDE_INT n)
1446 /* Avoid largest negative number. */
1447 if (n != -n
1448 && ((n >= -1 && n <= 2)
1449 || (optimize_function_for_speed_p (cfun)
1450 && powi_cost (n) <= POWI_MAX_MULTS)))
1451 return powi_as_mults (gsi, loc, arg0, n);
1453 return NULL_TREE;
1456 /* Build a gimple call statement that calls FN with argument ARG.
1457 Set the lhs of the call statement to a fresh SSA name. Insert the
1458 statement prior to GSI's current position, and return the fresh
1459 SSA name. */
1461 static tree
1462 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1463 tree fn, tree arg)
1465 gcall *call_stmt;
1466 tree ssa_target;
1468 call_stmt = gimple_build_call (fn, 1, arg);
1469 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1470 gimple_set_lhs (call_stmt, ssa_target);
1471 gimple_set_location (call_stmt, loc);
1472 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1474 return ssa_target;
1477 /* Build a gimple binary operation with the given CODE and arguments
1478 ARG0, ARG1, assigning the result to a new SSA name for variable
1479 TARGET. Insert the statement prior to GSI's current position, and
1480 return the fresh SSA name.*/
1482 static tree
1483 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1484 const char *name, enum tree_code code,
1485 tree arg0, tree arg1)
1487 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1488 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1489 gimple_set_location (stmt, loc);
1490 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1491 return result;
1494 /* Build a gimple reference operation with the given CODE and argument
1495 ARG, assigning the result to a new SSA name of TYPE with NAME.
1496 Insert the statement prior to GSI's current position, and return
1497 the fresh SSA name. */
1499 static inline tree
1500 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1501 const char *name, enum tree_code code, tree arg0)
1503 tree result = make_temp_ssa_name (type, NULL, name);
1504 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1505 gimple_set_location (stmt, loc);
1506 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1507 return result;
1510 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1511 prior to GSI's current position, and return the fresh SSA name. */
1513 static tree
1514 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1515 tree type, tree val)
1517 tree result = make_ssa_name (type);
1518 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1519 gimple_set_location (stmt, loc);
1520 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1521 return result;
1524 struct pow_synth_sqrt_info
1526 bool *factors;
1527 unsigned int deepest;
1528 unsigned int num_mults;
1531 /* Return true iff the real value C can be represented as a
1532 sum of powers of 0.5 up to N. That is:
1533 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1534 Record in INFO the various parameters of the synthesis algorithm such
1535 as the factors a[i], the maximum 0.5 power and the number of
1536 multiplications that will be required. */
1538 bool
1539 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1540 struct pow_synth_sqrt_info *info)
1542 REAL_VALUE_TYPE factor = dconsthalf;
1543 REAL_VALUE_TYPE remainder = c;
1545 info->deepest = 0;
1546 info->num_mults = 0;
1547 memset (info->factors, 0, n * sizeof (bool));
1549 for (unsigned i = 0; i < n; i++)
1551 REAL_VALUE_TYPE res;
1553 /* If something inexact happened bail out now. */
1554 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1555 return false;
1557 /* We have hit zero. The number is representable as a sum
1558 of powers of 0.5. */
1559 if (real_equal (&res, &dconst0))
1561 info->factors[i] = true;
1562 info->deepest = i + 1;
1563 return true;
1565 else if (!REAL_VALUE_NEGATIVE (res))
1567 remainder = res;
1568 info->factors[i] = true;
1569 info->num_mults++;
1571 else
1572 info->factors[i] = false;
1574 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1576 return false;
1579 /* Return the tree corresponding to FN being applied
1580 to ARG N times at GSI and LOC.
1581 Look up previous results from CACHE if need be.
1582 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1584 static tree
1585 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1586 tree fn, location_t loc, tree *cache)
1588 tree res = cache[n];
1589 if (!res)
1591 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1592 res = build_and_insert_call (gsi, loc, fn, prev);
1593 cache[n] = res;
1596 return res;
1599 /* Print to STREAM the repeated application of function FNAME to ARG
1600 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1601 "foo (foo (x))". */
1603 static void
1604 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1605 unsigned int n)
1607 if (n == 0)
1608 fprintf (stream, "%s", arg);
1609 else
1611 fprintf (stream, "%s (", fname);
1612 print_nested_fn (stream, fname, arg, n - 1);
1613 fprintf (stream, ")");
1617 /* Print to STREAM the fractional sequence of sqrt chains
1618 applied to ARG, described by INFO. Used for the dump file. */
1620 static void
1621 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1622 struct pow_synth_sqrt_info *info)
1624 for (unsigned int i = 0; i < info->deepest; i++)
1626 bool is_set = info->factors[i];
1627 if (is_set)
1629 print_nested_fn (stream, "sqrt", arg, i + 1);
1630 if (i != info->deepest - 1)
1631 fprintf (stream, " * ");
1636 /* Print to STREAM a representation of raising ARG to an integer
1637 power N. Used for the dump file. */
1639 static void
1640 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1642 if (n > 1)
1643 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1644 else if (n == 1)
1645 fprintf (stream, "%s", arg);
1648 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1649 square roots. Place at GSI and LOC. Limit the maximum depth
1650 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1651 result of the expanded sequence or NULL_TREE if the expansion failed.
1653 This routine assumes that ARG1 is a real number with a fractional part
1654 (the integer exponent case will have been handled earlier in
1655 gimple_expand_builtin_pow).
1657 For ARG1 > 0.0:
1658 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1659 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1660 FRAC_PART == ARG1 - WHOLE_PART:
1661 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1662 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1663 if it can be expressed as such, that is if FRAC_PART satisfies:
1664 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1665 where integer a[i] is either 0 or 1.
1667 Example:
1668 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1669 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1671 For ARG1 < 0.0 there are two approaches:
1672 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1673 is calculated as above.
1675 Example:
1676 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1677 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1679 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1680 FRAC_PART := ARG1 - WHOLE_PART
1681 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1682 Example:
1683 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1684 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1686 For ARG1 < 0.0 we choose between (A) and (B) depending on
1687 how many multiplications we'd have to do.
1688 So, for the example in (B): POW (x, -5.875), if we were to
1689 follow algorithm (A) we would produce:
1690 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1691 which contains more multiplications than approach (B).
1693 Hopefully, this approach will eliminate potentially expensive POW library
1694 calls when unsafe floating point math is enabled and allow the compiler to
1695 further optimise the multiplies, square roots and divides produced by this
1696 function. */
1698 static tree
1699 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1700 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1702 tree type = TREE_TYPE (arg0);
1703 machine_mode mode = TYPE_MODE (type);
1704 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1705 bool one_over = true;
1707 if (!sqrtfn)
1708 return NULL_TREE;
1710 if (TREE_CODE (arg1) != REAL_CST)
1711 return NULL_TREE;
1713 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1715 gcc_assert (max_depth > 0);
1716 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1718 struct pow_synth_sqrt_info synth_info;
1719 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1720 synth_info.deepest = 0;
1721 synth_info.num_mults = 0;
1723 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1724 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1726 /* The whole and fractional parts of exp. */
1727 REAL_VALUE_TYPE whole_part;
1728 REAL_VALUE_TYPE frac_part;
1730 real_floor (&whole_part, mode, &exp);
1731 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1734 REAL_VALUE_TYPE ceil_whole = dconst0;
1735 REAL_VALUE_TYPE ceil_fract = dconst0;
1737 if (neg_exp)
1739 real_ceil (&ceil_whole, mode, &exp);
1740 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1743 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1744 return NULL_TREE;
1746 /* Check whether it's more profitable to not use 1.0 / ... */
1747 if (neg_exp)
1749 struct pow_synth_sqrt_info alt_synth_info;
1750 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1751 alt_synth_info.deepest = 0;
1752 alt_synth_info.num_mults = 0;
1754 if (representable_as_half_series_p (ceil_fract, max_depth,
1755 &alt_synth_info)
1756 && alt_synth_info.deepest <= synth_info.deepest
1757 && alt_synth_info.num_mults < synth_info.num_mults)
1759 whole_part = ceil_whole;
1760 frac_part = ceil_fract;
1761 synth_info.deepest = alt_synth_info.deepest;
1762 synth_info.num_mults = alt_synth_info.num_mults;
1763 memcpy (synth_info.factors, alt_synth_info.factors,
1764 (max_depth + 1) * sizeof (bool));
1765 one_over = false;
1769 HOST_WIDE_INT n = real_to_integer (&whole_part);
1770 REAL_VALUE_TYPE cint;
1771 real_from_integer (&cint, VOIDmode, n, SIGNED);
1773 if (!real_identical (&whole_part, &cint))
1774 return NULL_TREE;
1776 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1777 return NULL_TREE;
1779 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1781 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1783 /* Calculate the integer part of the exponent. */
1784 if (n > 1)
1786 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1787 if (!integer_res)
1788 return NULL_TREE;
1791 if (dump_file)
1793 char string[64];
1795 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1796 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1798 if (neg_exp)
1800 if (one_over)
1802 fprintf (dump_file, "1.0 / (");
1803 dump_integer_part (dump_file, "x", n);
1804 if (n > 0)
1805 fprintf (dump_file, " * ");
1806 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1807 fprintf (dump_file, ")");
1809 else
1811 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1812 fprintf (dump_file, " / (");
1813 dump_integer_part (dump_file, "x", n);
1814 fprintf (dump_file, ")");
1817 else
1819 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1820 if (n > 0)
1821 fprintf (dump_file, " * ");
1822 dump_integer_part (dump_file, "x", n);
1825 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1829 tree fract_res = NULL_TREE;
1830 cache[0] = arg0;
1832 /* Calculate the fractional part of the exponent. */
1833 for (unsigned i = 0; i < synth_info.deepest; i++)
1835 if (synth_info.factors[i])
1837 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1839 if (!fract_res)
1840 fract_res = sqrt_chain;
1842 else
1843 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1844 fract_res, sqrt_chain);
1848 tree res = NULL_TREE;
1850 if (neg_exp)
1852 if (one_over)
1854 if (n > 0)
1855 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1856 fract_res, integer_res);
1857 else
1858 res = fract_res;
1860 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1861 build_real (type, dconst1), res);
1863 else
1865 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1866 fract_res, integer_res);
1869 else
1870 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1871 fract_res, integer_res);
1872 return res;
1875 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1876 with location info LOC. If possible, create an equivalent and
1877 less expensive sequence of statements prior to GSI, and return an
1878 expession holding the result. */
1880 static tree
1881 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1882 tree arg0, tree arg1)
1884 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1885 REAL_VALUE_TYPE c2, dconst3;
1886 HOST_WIDE_INT n;
1887 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1888 machine_mode mode;
1889 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1890 bool hw_sqrt_exists, c_is_int, c2_is_int;
1892 dconst1_4 = dconst1;
1893 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1895 /* If the exponent isn't a constant, there's nothing of interest
1896 to be done. */
1897 if (TREE_CODE (arg1) != REAL_CST)
1898 return NULL_TREE;
1900 /* Don't perform the operation if flag_signaling_nans is on
1901 and the operand is a signaling NaN. */
1902 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1903 && ((TREE_CODE (arg0) == REAL_CST
1904 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1905 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1906 return NULL_TREE;
1908 /* If the exponent is equivalent to an integer, expand to an optimal
1909 multiplication sequence when profitable. */
1910 c = TREE_REAL_CST (arg1);
1911 n = real_to_integer (&c);
1912 real_from_integer (&cint, VOIDmode, n, SIGNED);
1913 c_is_int = real_identical (&c, &cint);
1915 if (c_is_int
1916 && ((n >= -1 && n <= 2)
1917 || (flag_unsafe_math_optimizations
1918 && speed_p
1919 && powi_cost (n) <= POWI_MAX_MULTS)))
1920 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1922 /* Attempt various optimizations using sqrt and cbrt. */
1923 type = TREE_TYPE (arg0);
1924 mode = TYPE_MODE (type);
1925 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1927 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1928 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1929 sqrt(-0) = -0. */
1930 if (sqrtfn
1931 && real_equal (&c, &dconsthalf)
1932 && !HONOR_SIGNED_ZEROS (mode))
1933 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1935 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1937 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1938 optimizations since 1./3. is not exactly representable. If x
1939 is negative and finite, the correct value of pow(x,1./3.) is
1940 a NaN with the "invalid" exception raised, because the value
1941 of 1./3. actually has an even denominator. The correct value
1942 of cbrt(x) is a negative real value. */
1943 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1944 dconst1_3 = real_value_truncate (mode, dconst_third ());
1946 if (flag_unsafe_math_optimizations
1947 && cbrtfn
1948 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1949 && real_equal (&c, &dconst1_3))
1950 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1952 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1953 if we don't have a hardware sqrt insn. */
1954 dconst1_6 = dconst1_3;
1955 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1957 if (flag_unsafe_math_optimizations
1958 && sqrtfn
1959 && cbrtfn
1960 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1961 && speed_p
1962 && hw_sqrt_exists
1963 && real_equal (&c, &dconst1_6))
1965 /* sqrt(x) */
1966 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1968 /* cbrt(sqrt(x)) */
1969 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1973 /* Attempt to expand the POW as a product of square root chains.
1974 Expand the 0.25 case even when otpimising for size. */
1975 if (flag_unsafe_math_optimizations
1976 && sqrtfn
1977 && hw_sqrt_exists
1978 && (speed_p || real_equal (&c, &dconst1_4))
1979 && !HONOR_SIGNED_ZEROS (mode))
1981 unsigned int max_depth = speed_p
1982 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1983 : 2;
1985 tree expand_with_sqrts
1986 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1988 if (expand_with_sqrts)
1989 return expand_with_sqrts;
1992 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1993 n = real_to_integer (&c2);
1994 real_from_integer (&cint, VOIDmode, n, SIGNED);
1995 c2_is_int = real_identical (&c2, &cint);
1997 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1999 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
2000 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
2002 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
2003 different from pow(x, 1./3.) due to rounding and behavior with
2004 negative x, we need to constrain this transformation to unsafe
2005 math and positive x or finite math. */
2006 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
2007 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
2008 real_round (&c2, mode, &c2);
2009 n = real_to_integer (&c2);
2010 real_from_integer (&cint, VOIDmode, n, SIGNED);
2011 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
2012 real_convert (&c2, mode, &c2);
2014 if (flag_unsafe_math_optimizations
2015 && cbrtfn
2016 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
2017 && real_identical (&c2, &c)
2018 && !c2_is_int
2019 && optimize_function_for_speed_p (cfun)
2020 && powi_cost (n / 3) <= POWI_MAX_MULTS)
2022 tree powi_x_ndiv3 = NULL_TREE;
2024 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
2025 possible or profitable, give up. Skip the degenerate case when
2026 abs(n) < 3, where the result is always 1. */
2027 if (absu_hwi (n) >= 3)
2029 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
2030 abs_hwi (n / 3));
2031 if (!powi_x_ndiv3)
2032 return NULL_TREE;
2035 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
2036 as that creates an unnecessary variable. Instead, just produce
2037 either cbrt(x) or cbrt(x) * cbrt(x). */
2038 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
2040 if (absu_hwi (n) % 3 == 1)
2041 powi_cbrt_x = cbrt_x;
2042 else
2043 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
2044 cbrt_x, cbrt_x);
2046 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
2047 if (absu_hwi (n) < 3)
2048 result = powi_cbrt_x;
2049 else
2050 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
2051 powi_x_ndiv3, powi_cbrt_x);
2053 /* If n is negative, reciprocate the result. */
2054 if (n < 0)
2055 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
2056 build_real (type, dconst1), result);
2058 return result;
2061 /* No optimizations succeeded. */
2062 return NULL_TREE;
2065 /* ARG is the argument to a cabs builtin call in GSI with location info
2066 LOC. Create a sequence of statements prior to GSI that calculates
2067 sqrt(R*R + I*I), where R and I are the real and imaginary components
2068 of ARG, respectively. Return an expression holding the result. */
2070 static tree
2071 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
2073 tree real_part, imag_part, addend1, addend2, sum, result;
2074 tree type = TREE_TYPE (TREE_TYPE (arg));
2075 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
2076 machine_mode mode = TYPE_MODE (type);
2078 if (!flag_unsafe_math_optimizations
2079 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
2080 || !sqrtfn
2081 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
2082 return NULL_TREE;
2084 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
2085 REALPART_EXPR, arg);
2086 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
2087 real_part, real_part);
2088 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
2089 IMAGPART_EXPR, arg);
2090 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
2091 imag_part, imag_part);
2092 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
2093 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
2095 return result;
2098 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
2099 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
2100 an optimal number of multiplies, when n is a constant. */
2102 namespace {
2104 const pass_data pass_data_cse_sincos =
2106 GIMPLE_PASS, /* type */
2107 "sincos", /* name */
2108 OPTGROUP_NONE, /* optinfo_flags */
2109 TV_TREE_SINCOS, /* tv_id */
2110 PROP_ssa, /* properties_required */
2111 PROP_gimple_opt_math, /* properties_provided */
2112 0, /* properties_destroyed */
2113 0, /* todo_flags_start */
2114 TODO_update_ssa, /* todo_flags_finish */
2117 class pass_cse_sincos : public gimple_opt_pass
2119 public:
2120 pass_cse_sincos (gcc::context *ctxt)
2121 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
2124 /* opt_pass methods: */
2125 virtual bool gate (function *)
2127 /* We no longer require either sincos or cexp, since powi expansion
2128 piggybacks on this pass. */
2129 return optimize;
2132 virtual unsigned int execute (function *);
2134 }; // class pass_cse_sincos
2136 unsigned int
2137 pass_cse_sincos::execute (function *fun)
2139 basic_block bb;
2140 bool cfg_changed = false;
2142 calculate_dominance_info (CDI_DOMINATORS);
2143 memset (&sincos_stats, 0, sizeof (sincos_stats));
2145 FOR_EACH_BB_FN (bb, fun)
2147 gimple_stmt_iterator gsi;
2148 bool cleanup_eh = false;
2150 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2152 gimple *stmt = gsi_stmt (gsi);
2154 /* Only the last stmt in a bb could throw, no need to call
2155 gimple_purge_dead_eh_edges if we change something in the middle
2156 of a basic block. */
2157 cleanup_eh = false;
2159 if (is_gimple_call (stmt)
2160 && gimple_call_lhs (stmt))
2162 tree arg, arg0, arg1, result;
2163 HOST_WIDE_INT n;
2164 location_t loc;
2166 switch (gimple_call_combined_fn (stmt))
2168 CASE_CFN_COS:
2169 CASE_CFN_SIN:
2170 CASE_CFN_CEXPI:
2171 /* Make sure we have either sincos or cexp. */
2172 if (!targetm.libc_has_function (function_c99_math_complex)
2173 && !targetm.libc_has_function (function_sincos))
2174 break;
2176 arg = gimple_call_arg (stmt, 0);
2177 if (TREE_CODE (arg) == SSA_NAME)
2178 cfg_changed |= execute_cse_sincos_1 (arg);
2179 break;
2181 CASE_CFN_POW:
2182 arg0 = gimple_call_arg (stmt, 0);
2183 arg1 = gimple_call_arg (stmt, 1);
2185 loc = gimple_location (stmt);
2186 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
2188 if (result)
2190 tree lhs = gimple_get_lhs (stmt);
2191 gassign *new_stmt = gimple_build_assign (lhs, result);
2192 gimple_set_location (new_stmt, loc);
2193 unlink_stmt_vdef (stmt);
2194 gsi_replace (&gsi, new_stmt, true);
2195 cleanup_eh = true;
2196 if (gimple_vdef (stmt))
2197 release_ssa_name (gimple_vdef (stmt));
2199 break;
2201 CASE_CFN_POWI:
2202 arg0 = gimple_call_arg (stmt, 0);
2203 arg1 = gimple_call_arg (stmt, 1);
2204 loc = gimple_location (stmt);
2206 if (real_minus_onep (arg0))
2208 tree t0, t1, cond, one, minus_one;
2209 gassign *stmt;
2211 t0 = TREE_TYPE (arg0);
2212 t1 = TREE_TYPE (arg1);
2213 one = build_real (t0, dconst1);
2214 minus_one = build_real (t0, dconstm1);
2216 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
2217 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
2218 arg1, build_int_cst (t1, 1));
2219 gimple_set_location (stmt, loc);
2220 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2222 result = make_temp_ssa_name (t0, NULL, "powi");
2223 stmt = gimple_build_assign (result, COND_EXPR, cond,
2224 minus_one, one);
2225 gimple_set_location (stmt, loc);
2226 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2228 else
2230 if (!tree_fits_shwi_p (arg1))
2231 break;
2233 n = tree_to_shwi (arg1);
2234 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
2237 if (result)
2239 tree lhs = gimple_get_lhs (stmt);
2240 gassign *new_stmt = gimple_build_assign (lhs, result);
2241 gimple_set_location (new_stmt, loc);
2242 unlink_stmt_vdef (stmt);
2243 gsi_replace (&gsi, new_stmt, true);
2244 cleanup_eh = true;
2245 if (gimple_vdef (stmt))
2246 release_ssa_name (gimple_vdef (stmt));
2248 break;
2250 CASE_CFN_CABS:
2251 arg0 = gimple_call_arg (stmt, 0);
2252 loc = gimple_location (stmt);
2253 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
2255 if (result)
2257 tree lhs = gimple_get_lhs (stmt);
2258 gassign *new_stmt = gimple_build_assign (lhs, result);
2259 gimple_set_location (new_stmt, loc);
2260 unlink_stmt_vdef (stmt);
2261 gsi_replace (&gsi, new_stmt, true);
2262 cleanup_eh = true;
2263 if (gimple_vdef (stmt))
2264 release_ssa_name (gimple_vdef (stmt));
2266 break;
2268 default:;
2272 if (cleanup_eh)
2273 cfg_changed |= gimple_purge_dead_eh_edges (bb);
2276 statistics_counter_event (fun, "sincos statements inserted",
2277 sincos_stats.inserted);
2279 return cfg_changed ? TODO_cleanup_cfg : 0;
2282 } // anon namespace
2284 gimple_opt_pass *
2285 make_pass_cse_sincos (gcc::context *ctxt)
2287 return new pass_cse_sincos (ctxt);
2290 /* Return true if stmt is a type conversion operation that can be stripped
2291 when used in a widening multiply operation. */
2292 static bool
2293 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2295 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2297 if (TREE_CODE (result_type) == INTEGER_TYPE)
2299 tree op_type;
2300 tree inner_op_type;
2302 if (!CONVERT_EXPR_CODE_P (rhs_code))
2303 return false;
2305 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2307 /* If the type of OP has the same precision as the result, then
2308 we can strip this conversion. The multiply operation will be
2309 selected to create the correct extension as a by-product. */
2310 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2311 return true;
2313 /* We can also strip a conversion if it preserves the signed-ness of
2314 the operation and doesn't narrow the range. */
2315 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2317 /* If the inner-most type is unsigned, then we can strip any
2318 intermediate widening operation. If it's signed, then the
2319 intermediate widening operation must also be signed. */
2320 if ((TYPE_UNSIGNED (inner_op_type)
2321 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2322 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2323 return true;
2325 return false;
2328 return rhs_code == FIXED_CONVERT_EXPR;
2331 /* Return true if RHS is a suitable operand for a widening multiplication,
2332 assuming a target type of TYPE.
2333 There are two cases:
2335 - RHS makes some value at least twice as wide. Store that value
2336 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2338 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2339 but leave *TYPE_OUT untouched. */
2341 static bool
2342 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2343 tree *new_rhs_out)
2345 gimple *stmt;
2346 tree type1, rhs1;
2348 if (TREE_CODE (rhs) == SSA_NAME)
2350 stmt = SSA_NAME_DEF_STMT (rhs);
2351 if (is_gimple_assign (stmt))
2353 if (! widening_mult_conversion_strippable_p (type, stmt))
2354 rhs1 = rhs;
2355 else
2357 rhs1 = gimple_assign_rhs1 (stmt);
2359 if (TREE_CODE (rhs1) == INTEGER_CST)
2361 *new_rhs_out = rhs1;
2362 *type_out = NULL;
2363 return true;
2367 else
2368 rhs1 = rhs;
2370 type1 = TREE_TYPE (rhs1);
2372 if (TREE_CODE (type1) != TREE_CODE (type)
2373 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2374 return false;
2376 *new_rhs_out = rhs1;
2377 *type_out = type1;
2378 return true;
2381 if (TREE_CODE (rhs) == INTEGER_CST)
2383 *new_rhs_out = rhs;
2384 *type_out = NULL;
2385 return true;
2388 return false;
2391 /* Return true if STMT performs a widening multiplication, assuming the
2392 output type is TYPE. If so, store the unwidened types of the operands
2393 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2394 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2395 and *TYPE2_OUT would give the operands of the multiplication. */
2397 static bool
2398 is_widening_mult_p (gimple *stmt,
2399 tree *type1_out, tree *rhs1_out,
2400 tree *type2_out, tree *rhs2_out)
2402 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2404 if (TREE_CODE (type) == INTEGER_TYPE)
2406 if (TYPE_OVERFLOW_TRAPS (type))
2407 return false;
2409 else if (TREE_CODE (type) != FIXED_POINT_TYPE)
2410 return false;
2412 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2413 rhs1_out))
2414 return false;
2416 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2417 rhs2_out))
2418 return false;
2420 if (*type1_out == NULL)
2422 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2423 return false;
2424 *type1_out = *type2_out;
2427 if (*type2_out == NULL)
2429 if (!int_fits_type_p (*rhs2_out, *type1_out))
2430 return false;
2431 *type2_out = *type1_out;
2434 /* Ensure that the larger of the two operands comes first. */
2435 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2437 std::swap (*type1_out, *type2_out);
2438 std::swap (*rhs1_out, *rhs2_out);
2441 return true;
2444 /* Check to see if the CALL statement is an invocation of copysign
2445 with 1. being the first argument. */
2446 static bool
2447 is_copysign_call_with_1 (gimple *call)
2449 gcall *c = dyn_cast <gcall *> (call);
2450 if (! c)
2451 return false;
2453 enum combined_fn code = gimple_call_combined_fn (c);
2455 if (code == CFN_LAST)
2456 return false;
2458 if (builtin_fn_p (code))
2460 switch (as_builtin_fn (code))
2462 CASE_FLT_FN (BUILT_IN_COPYSIGN):
2463 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
2464 return real_onep (gimple_call_arg (c, 0));
2465 default:
2466 return false;
2470 if (internal_fn_p (code))
2472 switch (as_internal_fn (code))
2474 case IFN_COPYSIGN:
2475 return real_onep (gimple_call_arg (c, 0));
2476 default:
2477 return false;
2481 return false;
2484 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2485 This only happens when the the xorsign optab is defined, if the
2486 pattern is not a xorsign pattern or if expansion fails FALSE is
2487 returned, otherwise TRUE is returned. */
2488 static bool
2489 convert_expand_mult_copysign (gimple *stmt, gimple_stmt_iterator *gsi)
2491 tree treeop0, treeop1, lhs, type;
2492 location_t loc = gimple_location (stmt);
2493 lhs = gimple_assign_lhs (stmt);
2494 treeop0 = gimple_assign_rhs1 (stmt);
2495 treeop1 = gimple_assign_rhs2 (stmt);
2496 type = TREE_TYPE (lhs);
2497 machine_mode mode = TYPE_MODE (type);
2499 if (HONOR_SNANS (type))
2500 return false;
2502 if (TREE_CODE (treeop0) == SSA_NAME && TREE_CODE (treeop1) == SSA_NAME)
2504 gimple *call0 = SSA_NAME_DEF_STMT (treeop0);
2505 if (!has_single_use (treeop0) || !is_copysign_call_with_1 (call0))
2507 call0 = SSA_NAME_DEF_STMT (treeop1);
2508 if (!has_single_use (treeop1) || !is_copysign_call_with_1 (call0))
2509 return false;
2511 treeop1 = treeop0;
2513 if (optab_handler (xorsign_optab, mode) == CODE_FOR_nothing)
2514 return false;
2516 gcall *c = as_a<gcall*> (call0);
2517 treeop0 = gimple_call_arg (c, 1);
2519 gcall *call_stmt
2520 = gimple_build_call_internal (IFN_XORSIGN, 2, treeop1, treeop0);
2521 gimple_set_lhs (call_stmt, lhs);
2522 gimple_set_location (call_stmt, loc);
2523 gsi_replace (gsi, call_stmt, true);
2524 return true;
2527 return false;
2530 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2531 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2532 value is true iff we converted the statement. */
2534 static bool
2535 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
2537 tree lhs, rhs1, rhs2, type, type1, type2;
2538 enum insn_code handler;
2539 scalar_int_mode to_mode, from_mode, actual_mode;
2540 optab op;
2541 int actual_precision;
2542 location_t loc = gimple_location (stmt);
2543 bool from_unsigned1, from_unsigned2;
2545 lhs = gimple_assign_lhs (stmt);
2546 type = TREE_TYPE (lhs);
2547 if (TREE_CODE (type) != INTEGER_TYPE)
2548 return false;
2550 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2551 return false;
2553 to_mode = SCALAR_INT_TYPE_MODE (type);
2554 from_mode = SCALAR_INT_TYPE_MODE (type1);
2555 if (to_mode == from_mode)
2556 return false;
2558 from_unsigned1 = TYPE_UNSIGNED (type1);
2559 from_unsigned2 = TYPE_UNSIGNED (type2);
2561 if (from_unsigned1 && from_unsigned2)
2562 op = umul_widen_optab;
2563 else if (!from_unsigned1 && !from_unsigned2)
2564 op = smul_widen_optab;
2565 else
2566 op = usmul_widen_optab;
2568 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2569 &actual_mode);
2571 if (handler == CODE_FOR_nothing)
2573 if (op != smul_widen_optab)
2575 /* We can use a signed multiply with unsigned types as long as
2576 there is a wider mode to use, or it is the smaller of the two
2577 types that is unsigned. Note that type1 >= type2, always. */
2578 if ((TYPE_UNSIGNED (type1)
2579 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2580 || (TYPE_UNSIGNED (type2)
2581 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2583 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2584 || GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2585 return false;
2588 op = smul_widen_optab;
2589 handler = find_widening_optab_handler_and_mode (op, to_mode,
2590 from_mode,
2591 &actual_mode);
2593 if (handler == CODE_FOR_nothing)
2594 return false;
2596 from_unsigned1 = from_unsigned2 = false;
2598 else
2599 return false;
2602 /* Ensure that the inputs to the handler are in the correct precison
2603 for the opcode. This will be the full mode size. */
2604 actual_precision = GET_MODE_PRECISION (actual_mode);
2605 if (2 * actual_precision > TYPE_PRECISION (type))
2606 return false;
2607 if (actual_precision != TYPE_PRECISION (type1)
2608 || from_unsigned1 != TYPE_UNSIGNED (type1))
2609 rhs1 = build_and_insert_cast (gsi, loc,
2610 build_nonstandard_integer_type
2611 (actual_precision, from_unsigned1), rhs1);
2612 if (actual_precision != TYPE_PRECISION (type2)
2613 || from_unsigned2 != TYPE_UNSIGNED (type2))
2614 rhs2 = build_and_insert_cast (gsi, loc,
2615 build_nonstandard_integer_type
2616 (actual_precision, from_unsigned2), rhs2);
2618 /* Handle constants. */
2619 if (TREE_CODE (rhs1) == INTEGER_CST)
2620 rhs1 = fold_convert (type1, rhs1);
2621 if (TREE_CODE (rhs2) == INTEGER_CST)
2622 rhs2 = fold_convert (type2, rhs2);
2624 gimple_assign_set_rhs1 (stmt, rhs1);
2625 gimple_assign_set_rhs2 (stmt, rhs2);
2626 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2627 update_stmt (stmt);
2628 widen_mul_stats.widen_mults_inserted++;
2629 return true;
2632 /* Process a single gimple statement STMT, which is found at the
2633 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2634 rhs (given by CODE), and try to convert it into a
2635 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2636 is true iff we converted the statement. */
2638 static bool
2639 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
2640 enum tree_code code)
2642 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
2643 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
2644 tree type, type1, type2, optype;
2645 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2646 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2647 optab this_optab;
2648 enum tree_code wmult_code;
2649 enum insn_code handler;
2650 scalar_mode to_mode, from_mode, actual_mode;
2651 location_t loc = gimple_location (stmt);
2652 int actual_precision;
2653 bool from_unsigned1, from_unsigned2;
2655 lhs = gimple_assign_lhs (stmt);
2656 type = TREE_TYPE (lhs);
2657 if (TREE_CODE (type) != INTEGER_TYPE
2658 && TREE_CODE (type) != FIXED_POINT_TYPE)
2659 return false;
2661 if (code == MINUS_EXPR)
2662 wmult_code = WIDEN_MULT_MINUS_EXPR;
2663 else
2664 wmult_code = WIDEN_MULT_PLUS_EXPR;
2666 rhs1 = gimple_assign_rhs1 (stmt);
2667 rhs2 = gimple_assign_rhs2 (stmt);
2669 if (TREE_CODE (rhs1) == SSA_NAME)
2671 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2672 if (is_gimple_assign (rhs1_stmt))
2673 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2676 if (TREE_CODE (rhs2) == SSA_NAME)
2678 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2679 if (is_gimple_assign (rhs2_stmt))
2680 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2683 /* Allow for one conversion statement between the multiply
2684 and addition/subtraction statement. If there are more than
2685 one conversions then we assume they would invalidate this
2686 transformation. If that's not the case then they should have
2687 been folded before now. */
2688 if (CONVERT_EXPR_CODE_P (rhs1_code))
2690 conv1_stmt = rhs1_stmt;
2691 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2692 if (TREE_CODE (rhs1) == SSA_NAME)
2694 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2695 if (is_gimple_assign (rhs1_stmt))
2696 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2698 else
2699 return false;
2701 if (CONVERT_EXPR_CODE_P (rhs2_code))
2703 conv2_stmt = rhs2_stmt;
2704 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2705 if (TREE_CODE (rhs2) == SSA_NAME)
2707 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2708 if (is_gimple_assign (rhs2_stmt))
2709 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2711 else
2712 return false;
2715 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2716 is_widening_mult_p, but we still need the rhs returns.
2718 It might also appear that it would be sufficient to use the existing
2719 operands of the widening multiply, but that would limit the choice of
2720 multiply-and-accumulate instructions.
2722 If the widened-multiplication result has more than one uses, it is
2723 probably wiser not to do the conversion. */
2724 if (code == PLUS_EXPR
2725 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2727 if (!has_single_use (rhs1)
2728 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2729 &type2, &mult_rhs2))
2730 return false;
2731 add_rhs = rhs2;
2732 conv_stmt = conv1_stmt;
2734 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2736 if (!has_single_use (rhs2)
2737 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2738 &type2, &mult_rhs2))
2739 return false;
2740 add_rhs = rhs1;
2741 conv_stmt = conv2_stmt;
2743 else
2744 return false;
2746 to_mode = SCALAR_TYPE_MODE (type);
2747 from_mode = SCALAR_TYPE_MODE (type1);
2748 if (to_mode == from_mode)
2749 return false;
2751 from_unsigned1 = TYPE_UNSIGNED (type1);
2752 from_unsigned2 = TYPE_UNSIGNED (type2);
2753 optype = type1;
2755 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2756 if (from_unsigned1 != from_unsigned2)
2758 if (!INTEGRAL_TYPE_P (type))
2759 return false;
2760 /* We can use a signed multiply with unsigned types as long as
2761 there is a wider mode to use, or it is the smaller of the two
2762 types that is unsigned. Note that type1 >= type2, always. */
2763 if ((from_unsigned1
2764 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2765 || (from_unsigned2
2766 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2768 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2769 || GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2770 return false;
2773 from_unsigned1 = from_unsigned2 = false;
2774 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2775 false);
2778 /* If there was a conversion between the multiply and addition
2779 then we need to make sure it fits a multiply-and-accumulate.
2780 The should be a single mode change which does not change the
2781 value. */
2782 if (conv_stmt)
2784 /* We use the original, unmodified data types for this. */
2785 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2786 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2787 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2788 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2790 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2792 /* Conversion is a truncate. */
2793 if (TYPE_PRECISION (to_type) < data_size)
2794 return false;
2796 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2798 /* Conversion is an extend. Check it's the right sort. */
2799 if (TYPE_UNSIGNED (from_type) != is_unsigned
2800 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2801 return false;
2803 /* else convert is a no-op for our purposes. */
2806 /* Verify that the machine can perform a widening multiply
2807 accumulate in this mode/signedness combination, otherwise
2808 this transformation is likely to pessimize code. */
2809 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2810 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2811 from_mode, &actual_mode);
2813 if (handler == CODE_FOR_nothing)
2814 return false;
2816 /* Ensure that the inputs to the handler are in the correct precison
2817 for the opcode. This will be the full mode size. */
2818 actual_precision = GET_MODE_PRECISION (actual_mode);
2819 if (actual_precision != TYPE_PRECISION (type1)
2820 || from_unsigned1 != TYPE_UNSIGNED (type1))
2821 mult_rhs1 = build_and_insert_cast (gsi, loc,
2822 build_nonstandard_integer_type
2823 (actual_precision, from_unsigned1),
2824 mult_rhs1);
2825 if (actual_precision != TYPE_PRECISION (type2)
2826 || from_unsigned2 != TYPE_UNSIGNED (type2))
2827 mult_rhs2 = build_and_insert_cast (gsi, loc,
2828 build_nonstandard_integer_type
2829 (actual_precision, from_unsigned2),
2830 mult_rhs2);
2832 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2833 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2835 /* Handle constants. */
2836 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2837 mult_rhs1 = fold_convert (type1, mult_rhs1);
2838 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2839 mult_rhs2 = fold_convert (type2, mult_rhs2);
2841 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
2842 add_rhs);
2843 update_stmt (gsi_stmt (*gsi));
2844 widen_mul_stats.maccs_inserted++;
2845 return true;
2848 /* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
2849 OP2 and which we know is used in statements that can be, together with the
2850 multiplication, converted to FMAs, perform the transformation. */
2852 static void
2853 convert_mult_to_fma_1 (tree mul_result, tree op1, tree op2)
2855 tree type = TREE_TYPE (mul_result);
2856 gimple *use_stmt;
2857 imm_use_iterator imm_iter;
2858 gcall *fma_stmt;
2860 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2862 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2863 tree addop, mulop1 = op1, result = mul_result;
2864 bool negate_p = false;
2865 gimple_seq seq = NULL;
2867 if (is_gimple_debug (use_stmt))
2868 continue;
2870 if (is_gimple_assign (use_stmt)
2871 && gimple_assign_rhs_code (use_stmt) == NEGATE_EXPR)
2873 result = gimple_assign_lhs (use_stmt);
2874 use_operand_p use_p;
2875 gimple *neguse_stmt;
2876 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2877 gsi_remove (&gsi, true);
2878 release_defs (use_stmt);
2880 use_stmt = neguse_stmt;
2881 gsi = gsi_for_stmt (use_stmt);
2882 negate_p = true;
2885 tree cond, else_value, ops[3];
2886 tree_code code;
2887 if (!can_interpret_as_conditional_op_p (use_stmt, &cond, &code,
2888 ops, &else_value))
2889 gcc_unreachable ();
2890 addop = ops[0] == result ? ops[1] : ops[0];
2892 if (code == MINUS_EXPR)
2894 if (ops[0] == result)
2895 /* a * b - c -> a * b + (-c) */
2896 addop = gimple_build (&seq, NEGATE_EXPR, type, addop);
2897 else
2898 /* a - b * c -> (-b) * c + a */
2899 negate_p = !negate_p;
2902 if (negate_p)
2903 mulop1 = gimple_build (&seq, NEGATE_EXPR, type, mulop1);
2905 if (seq)
2906 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
2908 if (cond)
2909 fma_stmt = gimple_build_call_internal (IFN_COND_FMA, 5, cond, mulop1,
2910 op2, addop, else_value);
2911 else
2912 fma_stmt = gimple_build_call_internal (IFN_FMA, 3, mulop1, op2, addop);
2913 gimple_set_lhs (fma_stmt, gimple_get_lhs (use_stmt));
2914 gimple_call_set_nothrow (fma_stmt, !stmt_can_throw_internal (cfun,
2915 use_stmt));
2916 gsi_replace (&gsi, fma_stmt, true);
2917 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
2918 regardless of where the negation occurs. */
2919 if (fold_stmt (&gsi, follow_all_ssa_edges))
2920 update_stmt (gsi_stmt (gsi));
2922 if (dump_file && (dump_flags & TDF_DETAILS))
2924 fprintf (dump_file, "Generated FMA ");
2925 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, TDF_NONE);
2926 fprintf (dump_file, "\n");
2929 widen_mul_stats.fmas_inserted++;
2933 /* Data necessary to perform the actual transformation from a multiplication
2934 and an addition to an FMA after decision is taken it should be done and to
2935 then delete the multiplication statement from the function IL. */
2937 struct fma_transformation_info
2939 gimple *mul_stmt;
2940 tree mul_result;
2941 tree op1;
2942 tree op2;
2945 /* Structure containing the current state of FMA deferring, i.e. whether we are
2946 deferring, whether to continue deferring, and all data necessary to come
2947 back and perform all deferred transformations. */
2949 class fma_deferring_state
2951 public:
2952 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
2953 do any deferring. */
2955 fma_deferring_state (bool perform_deferring)
2956 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL),
2957 m_last_result (NULL_TREE), m_deferring_p (perform_deferring) {}
2959 /* List of FMA candidates for which we the transformation has been determined
2960 possible but we at this point in BB analysis we do not consider them
2961 beneficial. */
2962 auto_vec<fma_transformation_info, 8> m_candidates;
2964 /* Set of results of multiplication that are part of an already deferred FMA
2965 candidates. */
2966 hash_set<tree> m_mul_result_set;
2968 /* The PHI that supposedly feeds back result of a FMA to another over loop
2969 boundary. */
2970 gphi *m_initial_phi;
2972 /* Result of the last produced FMA candidate or NULL if there has not been
2973 one. */
2974 tree m_last_result;
2976 /* If true, deferring might still be profitable. If false, transform all
2977 candidates and no longer defer. */
2978 bool m_deferring_p;
2981 /* Transform all deferred FMA candidates and mark STATE as no longer
2982 deferring. */
2984 static void
2985 cancel_fma_deferring (fma_deferring_state *state)
2987 if (!state->m_deferring_p)
2988 return;
2990 for (unsigned i = 0; i < state->m_candidates.length (); i++)
2992 if (dump_file && (dump_flags & TDF_DETAILS))
2993 fprintf (dump_file, "Generating deferred FMA\n");
2995 const fma_transformation_info &fti = state->m_candidates[i];
2996 convert_mult_to_fma_1 (fti.mul_result, fti.op1, fti.op2);
2998 gimple_stmt_iterator gsi = gsi_for_stmt (fti.mul_stmt);
2999 gsi_remove (&gsi, true);
3000 release_defs (fti.mul_stmt);
3002 state->m_deferring_p = false;
3005 /* If OP is an SSA name defined by a PHI node, return the PHI statement.
3006 Otherwise return NULL. */
3008 static gphi *
3009 result_of_phi (tree op)
3011 if (TREE_CODE (op) != SSA_NAME)
3012 return NULL;
3014 return dyn_cast <gphi *> (SSA_NAME_DEF_STMT (op));
3017 /* After processing statements of a BB and recording STATE, return true if the
3018 initial phi is fed by the last FMA candidate result ore one such result from
3019 previously processed BBs marked in LAST_RESULT_SET. */
3021 static bool
3022 last_fma_candidate_feeds_initial_phi (fma_deferring_state *state,
3023 hash_set<tree> *last_result_set)
3025 ssa_op_iter iter;
3026 use_operand_p use;
3027 FOR_EACH_PHI_ARG (use, state->m_initial_phi, iter, SSA_OP_USE)
3029 tree t = USE_FROM_PTR (use);
3030 if (t == state->m_last_result
3031 || last_result_set->contains (t))
3032 return true;
3035 return false;
3038 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3039 with uses in additions and subtractions to form fused multiply-add
3040 operations. Returns true if successful and MUL_STMT should be removed.
3042 If STATE indicates that we are deferring FMA transformation, that means
3043 that we do not produce FMAs for basic blocks which look like:
3045 <bb 6>
3046 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
3047 _65 = _14 * _16;
3048 accumulator_66 = _65 + accumulator_111;
3050 or its unrolled version, i.e. with several FMA candidates that feed result
3051 of one into the addend of another. Instead, we add them to a list in STATE
3052 and if we later discover an FMA candidate that is not part of such a chain,
3053 we go back and perform all deferred past candidates. */
3055 static bool
3056 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
3057 fma_deferring_state *state)
3059 tree mul_result = gimple_get_lhs (mul_stmt);
3060 tree type = TREE_TYPE (mul_result);
3061 gimple *use_stmt, *neguse_stmt;
3062 use_operand_p use_p;
3063 imm_use_iterator imm_iter;
3065 if (FLOAT_TYPE_P (type)
3066 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3067 return false;
3069 /* We don't want to do bitfield reduction ops. */
3070 if (INTEGRAL_TYPE_P (type)
3071 && (!type_has_mode_precision_p (type) || TYPE_OVERFLOW_TRAPS (type)))
3072 return false;
3074 /* If the target doesn't support it, don't generate it. We assume that
3075 if fma isn't available then fms, fnma or fnms are not either. */
3076 optimization_type opt_type = bb_optimization_type (gimple_bb (mul_stmt));
3077 if (!direct_internal_fn_supported_p (IFN_FMA, type, opt_type))
3078 return false;
3080 /* If the multiplication has zero uses, it is kept around probably because
3081 of -fnon-call-exceptions. Don't optimize it away in that case,
3082 it is DCE job. */
3083 if (has_zero_uses (mul_result))
3084 return false;
3086 bool check_defer
3087 = (state->m_deferring_p
3088 && (tree_to_shwi (TYPE_SIZE (type))
3089 <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
3090 bool defer = check_defer;
3091 /* Make sure that the multiplication statement becomes dead after
3092 the transformation, thus that all uses are transformed to FMAs.
3093 This means we assume that an FMA operation has the same cost
3094 as an addition. */
3095 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3097 tree result = mul_result;
3098 bool negate_p = false;
3100 use_stmt = USE_STMT (use_p);
3102 if (is_gimple_debug (use_stmt))
3103 continue;
3105 /* For now restrict this operations to single basic blocks. In theory
3106 we would want to support sinking the multiplication in
3107 m = a*b;
3108 if ()
3109 ma = m + c;
3110 else
3111 d = m;
3112 to form a fma in the then block and sink the multiplication to the
3113 else block. */
3114 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3115 return false;
3117 /* A negate on the multiplication leads to FNMA. */
3118 if (is_gimple_assign (use_stmt)
3119 && gimple_assign_rhs_code (use_stmt) == NEGATE_EXPR)
3121 ssa_op_iter iter;
3122 use_operand_p usep;
3124 result = gimple_assign_lhs (use_stmt);
3126 /* Make sure the negate statement becomes dead with this
3127 single transformation. */
3128 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3129 &use_p, &neguse_stmt))
3130 return false;
3132 /* Make sure the multiplication isn't also used on that stmt. */
3133 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3134 if (USE_FROM_PTR (usep) == mul_result)
3135 return false;
3137 /* Re-validate. */
3138 use_stmt = neguse_stmt;
3139 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3140 return false;
3142 negate_p = true;
3145 tree cond, else_value, ops[3];
3146 tree_code code;
3147 if (!can_interpret_as_conditional_op_p (use_stmt, &cond, &code, ops,
3148 &else_value))
3149 return false;
3151 switch (code)
3153 case MINUS_EXPR:
3154 if (ops[1] == result)
3155 negate_p = !negate_p;
3156 break;
3157 case PLUS_EXPR:
3158 break;
3159 default:
3160 /* FMA can only be formed from PLUS and MINUS. */
3161 return false;
3164 if (cond)
3166 if (cond == result || else_value == result)
3167 return false;
3168 if (!direct_internal_fn_supported_p (IFN_COND_FMA, type, opt_type))
3169 return false;
3172 /* If the subtrahend (OPS[1]) is computed by a MULT_EXPR that
3173 we'll visit later, we might be able to get a more profitable
3174 match with fnma.
3175 OTOH, if we don't, a negate / fma pair has likely lower latency
3176 that a mult / subtract pair. */
3177 if (code == MINUS_EXPR
3178 && !negate_p
3179 && ops[0] == result
3180 && !direct_internal_fn_supported_p (IFN_FMS, type, opt_type)
3181 && direct_internal_fn_supported_p (IFN_FNMA, type, opt_type)
3182 && TREE_CODE (ops[1]) == SSA_NAME
3183 && has_single_use (ops[1]))
3185 gimple *stmt2 = SSA_NAME_DEF_STMT (ops[1]);
3186 if (is_gimple_assign (stmt2)
3187 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3188 return false;
3191 /* We can't handle a * b + a * b. */
3192 if (ops[0] == ops[1])
3193 return false;
3194 /* If deferring, make sure we are not looking at an instruction that
3195 wouldn't have existed if we were not. */
3196 if (state->m_deferring_p
3197 && (state->m_mul_result_set.contains (ops[0])
3198 || state->m_mul_result_set.contains (ops[1])))
3199 return false;
3201 if (check_defer)
3203 tree use_lhs = gimple_get_lhs (use_stmt);
3204 if (state->m_last_result)
3206 if (ops[1] == state->m_last_result
3207 || ops[0] == state->m_last_result)
3208 defer = true;
3209 else
3210 defer = false;
3212 else
3214 gcc_checking_assert (!state->m_initial_phi);
3215 gphi *phi;
3216 if (ops[0] == result)
3217 phi = result_of_phi (ops[1]);
3218 else
3220 gcc_assert (ops[1] == result);
3221 phi = result_of_phi (ops[0]);
3224 if (phi)
3226 state->m_initial_phi = phi;
3227 defer = true;
3229 else
3230 defer = false;
3233 state->m_last_result = use_lhs;
3234 check_defer = false;
3236 else
3237 defer = false;
3239 /* While it is possible to validate whether or not the exact form that
3240 we've recognized is available in the backend, the assumption is that
3241 if the deferring logic above did not trigger, the transformation is
3242 never a loss. For instance, suppose the target only has the plain FMA
3243 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3244 MUL+SUB for FMA+NEG, which is still two operations. Consider
3245 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3246 form the two NEGs are independent and could be run in parallel. */
3249 if (defer)
3251 fma_transformation_info fti;
3252 fti.mul_stmt = mul_stmt;
3253 fti.mul_result = mul_result;
3254 fti.op1 = op1;
3255 fti.op2 = op2;
3256 state->m_candidates.safe_push (fti);
3257 state->m_mul_result_set.add (mul_result);
3259 if (dump_file && (dump_flags & TDF_DETAILS))
3261 fprintf (dump_file, "Deferred generating FMA for multiplication ");
3262 print_gimple_stmt (dump_file, mul_stmt, 0, TDF_NONE);
3263 fprintf (dump_file, "\n");
3266 return false;
3268 else
3270 if (state->m_deferring_p)
3271 cancel_fma_deferring (state);
3272 convert_mult_to_fma_1 (mul_result, op1, op2);
3273 return true;
3278 /* Helper function of match_uaddsub_overflow. Return 1
3279 if USE_STMT is unsigned overflow check ovf != 0 for
3280 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3281 and 0 otherwise. */
3283 static int
3284 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3286 enum tree_code ccode = ERROR_MARK;
3287 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3288 if (gimple_code (use_stmt) == GIMPLE_COND)
3290 ccode = gimple_cond_code (use_stmt);
3291 crhs1 = gimple_cond_lhs (use_stmt);
3292 crhs2 = gimple_cond_rhs (use_stmt);
3294 else if (is_gimple_assign (use_stmt))
3296 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3298 ccode = gimple_assign_rhs_code (use_stmt);
3299 crhs1 = gimple_assign_rhs1 (use_stmt);
3300 crhs2 = gimple_assign_rhs2 (use_stmt);
3302 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3304 tree cond = gimple_assign_rhs1 (use_stmt);
3305 if (COMPARISON_CLASS_P (cond))
3307 ccode = TREE_CODE (cond);
3308 crhs1 = TREE_OPERAND (cond, 0);
3309 crhs2 = TREE_OPERAND (cond, 1);
3311 else
3312 return 0;
3314 else
3315 return 0;
3317 else
3318 return 0;
3320 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3321 return 0;
3323 enum tree_code code = gimple_assign_rhs_code (stmt);
3324 tree lhs = gimple_assign_lhs (stmt);
3325 tree rhs1 = gimple_assign_rhs1 (stmt);
3326 tree rhs2 = gimple_assign_rhs2 (stmt);
3328 switch (ccode)
3330 case GT_EXPR:
3331 case LE_EXPR:
3332 /* r = a - b; r > a or r <= a
3333 r = a + b; a > r or a <= r or b > r or b <= r. */
3334 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3335 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3336 && crhs2 == lhs))
3337 return ccode == GT_EXPR ? 1 : -1;
3338 break;
3339 case LT_EXPR:
3340 case GE_EXPR:
3341 /* r = a - b; a < r or a >= r
3342 r = a + b; r < a or r >= a or r < b or r >= b. */
3343 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3344 || (code == PLUS_EXPR && crhs1 == lhs
3345 && (crhs2 == rhs1 || crhs2 == rhs2)))
3346 return ccode == LT_EXPR ? 1 : -1;
3347 break;
3348 default:
3349 break;
3351 return 0;
3354 /* Recognize for unsigned x
3355 x = y - z;
3356 if (x > y)
3357 where there are other uses of x and replace it with
3358 _7 = SUB_OVERFLOW (y, z);
3359 x = REALPART_EXPR <_7>;
3360 _8 = IMAGPART_EXPR <_7>;
3361 if (_8)
3362 and similarly for addition. */
3364 static bool
3365 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3366 enum tree_code code)
3368 tree lhs = gimple_assign_lhs (stmt);
3369 tree type = TREE_TYPE (lhs);
3370 use_operand_p use_p;
3371 imm_use_iterator iter;
3372 bool use_seen = false;
3373 bool ovf_use_seen = false;
3374 gimple *use_stmt;
3376 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3377 if (!INTEGRAL_TYPE_P (type)
3378 || !TYPE_UNSIGNED (type)
3379 || has_zero_uses (lhs)
3380 || has_single_use (lhs)
3381 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3382 TYPE_MODE (type)) == CODE_FOR_nothing)
3383 return false;
3385 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3387 use_stmt = USE_STMT (use_p);
3388 if (is_gimple_debug (use_stmt))
3389 continue;
3391 if (uaddsub_overflow_check_p (stmt, use_stmt))
3392 ovf_use_seen = true;
3393 else
3394 use_seen = true;
3395 if (ovf_use_seen && use_seen)
3396 break;
3399 if (!ovf_use_seen || !use_seen)
3400 return false;
3402 tree ctype = build_complex_type (type);
3403 tree rhs1 = gimple_assign_rhs1 (stmt);
3404 tree rhs2 = gimple_assign_rhs2 (stmt);
3405 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3406 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3407 2, rhs1, rhs2);
3408 tree ctmp = make_ssa_name (ctype);
3409 gimple_call_set_lhs (g, ctmp);
3410 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3411 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3412 build1 (REALPART_EXPR, type, ctmp));
3413 gsi_replace (gsi, g2, true);
3414 tree ovf = make_ssa_name (type);
3415 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3416 build1 (IMAGPART_EXPR, type, ctmp));
3417 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3419 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3421 if (is_gimple_debug (use_stmt))
3422 continue;
3424 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3425 if (ovf_use == 0)
3426 continue;
3427 if (gimple_code (use_stmt) == GIMPLE_COND)
3429 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3430 gimple_cond_set_lhs (cond_stmt, ovf);
3431 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3432 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3434 else
3436 gcc_checking_assert (is_gimple_assign (use_stmt));
3437 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3439 gimple_assign_set_rhs1 (use_stmt, ovf);
3440 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3441 gimple_assign_set_rhs_code (use_stmt,
3442 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3444 else
3446 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3447 == COND_EXPR);
3448 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3449 boolean_type_node, ovf,
3450 build_int_cst (type, 0));
3451 gimple_assign_set_rhs1 (use_stmt, cond);
3454 update_stmt (use_stmt);
3456 return true;
3459 /* Return true if target has support for divmod. */
3461 static bool
3462 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3464 /* If target supports hardware divmod insn, use it for divmod. */
3465 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3466 return true;
3468 /* Check if libfunc for divmod is available. */
3469 rtx libfunc = optab_libfunc (divmod_optab, mode);
3470 if (libfunc != NULL_RTX)
3472 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3473 we don't want to use the libfunc even if it exists for given mode. */
3474 machine_mode div_mode;
3475 FOR_EACH_MODE_FROM (div_mode, mode)
3476 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3477 return false;
3479 return targetm.expand_divmod_libfunc != NULL;
3482 return false;
3485 /* Check if stmt is candidate for divmod transform. */
3487 static bool
3488 divmod_candidate_p (gassign *stmt)
3490 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3491 machine_mode mode = TYPE_MODE (type);
3492 optab divmod_optab, div_optab;
3494 if (TYPE_UNSIGNED (type))
3496 divmod_optab = udivmod_optab;
3497 div_optab = udiv_optab;
3499 else
3501 divmod_optab = sdivmod_optab;
3502 div_optab = sdiv_optab;
3505 tree op1 = gimple_assign_rhs1 (stmt);
3506 tree op2 = gimple_assign_rhs2 (stmt);
3508 /* Disable the transform if either is a constant, since division-by-constant
3509 may have specialized expansion. */
3510 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3511 return false;
3513 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3514 expand using the [su]divv optabs. */
3515 if (TYPE_OVERFLOW_TRAPS (type))
3516 return false;
3518 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3519 return false;
3521 return true;
3524 /* This function looks for:
3525 t1 = a TRUNC_DIV_EXPR b;
3526 t2 = a TRUNC_MOD_EXPR b;
3527 and transforms it to the following sequence:
3528 complex_tmp = DIVMOD (a, b);
3529 t1 = REALPART_EXPR(a);
3530 t2 = IMAGPART_EXPR(b);
3531 For conditions enabling the transform see divmod_candidate_p().
3533 The pass has three parts:
3534 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3535 other trunc_div_expr and trunc_mod_expr stmts.
3536 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3537 to stmts vector.
3538 3) Insert DIVMOD call just before top_stmt and update entries in
3539 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3540 IMAGPART_EXPR for mod). */
3542 static bool
3543 convert_to_divmod (gassign *stmt)
3545 if (stmt_can_throw_internal (cfun, stmt)
3546 || !divmod_candidate_p (stmt))
3547 return false;
3549 tree op1 = gimple_assign_rhs1 (stmt);
3550 tree op2 = gimple_assign_rhs2 (stmt);
3552 imm_use_iterator use_iter;
3553 gimple *use_stmt;
3554 auto_vec<gimple *> stmts;
3556 gimple *top_stmt = stmt;
3557 basic_block top_bb = gimple_bb (stmt);
3559 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3560 at-least stmt and possibly other trunc_div/trunc_mod stmts
3561 having same operands as stmt. */
3563 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3565 if (is_gimple_assign (use_stmt)
3566 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3567 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3568 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3569 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3571 if (stmt_can_throw_internal (cfun, use_stmt))
3572 continue;
3574 basic_block bb = gimple_bb (use_stmt);
3576 if (bb == top_bb)
3578 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3579 top_stmt = use_stmt;
3581 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3583 top_bb = bb;
3584 top_stmt = use_stmt;
3589 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3590 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3592 stmts.safe_push (top_stmt);
3593 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3595 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3596 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3597 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3598 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3600 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3602 if (is_gimple_assign (use_stmt)
3603 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3604 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3605 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3606 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3608 if (use_stmt == top_stmt
3609 || stmt_can_throw_internal (cfun, use_stmt)
3610 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3611 continue;
3613 stmts.safe_push (use_stmt);
3614 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3615 div_seen = true;
3619 if (!div_seen)
3620 return false;
3622 /* Part 3: Create libcall to internal fn DIVMOD:
3623 divmod_tmp = DIVMOD (op1, op2). */
3625 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3626 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3627 call_stmt, "divmod_tmp");
3628 gimple_call_set_lhs (call_stmt, res);
3629 /* We rejected throwing statements above. */
3630 gimple_call_set_nothrow (call_stmt, true);
3632 /* Insert the call before top_stmt. */
3633 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3634 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3636 widen_mul_stats.divmod_calls_inserted++;
3638 /* Update all statements in stmts vector:
3639 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3640 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3642 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3644 tree new_rhs;
3646 switch (gimple_assign_rhs_code (use_stmt))
3648 case TRUNC_DIV_EXPR:
3649 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
3650 break;
3652 case TRUNC_MOD_EXPR:
3653 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
3654 break;
3656 default:
3657 gcc_unreachable ();
3660 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3661 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
3662 update_stmt (use_stmt);
3665 return true;
3668 /* Find integer multiplications where the operands are extended from
3669 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3670 where appropriate. */
3672 namespace {
3674 const pass_data pass_data_optimize_widening_mul =
3676 GIMPLE_PASS, /* type */
3677 "widening_mul", /* name */
3678 OPTGROUP_NONE, /* optinfo_flags */
3679 TV_TREE_WIDEN_MUL, /* tv_id */
3680 PROP_ssa, /* properties_required */
3681 0, /* properties_provided */
3682 0, /* properties_destroyed */
3683 0, /* todo_flags_start */
3684 TODO_update_ssa, /* todo_flags_finish */
3687 class pass_optimize_widening_mul : public gimple_opt_pass
3689 public:
3690 pass_optimize_widening_mul (gcc::context *ctxt)
3691 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3694 /* opt_pass methods: */
3695 virtual bool gate (function *)
3697 return flag_expensive_optimizations && optimize;
3700 virtual unsigned int execute (function *);
3702 }; // class pass_optimize_widening_mul
3704 /* Walker class to perform the transformation in reverse dominance order. */
3706 class math_opts_dom_walker : public dom_walker
3708 public:
3709 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
3710 if walking modidifes the CFG. */
3712 math_opts_dom_walker (bool *cfg_changed_p)
3713 : dom_walker (CDI_DOMINATORS), m_last_result_set (),
3714 m_cfg_changed_p (cfg_changed_p) {}
3716 /* The actual actions performed in the walk. */
3718 virtual void after_dom_children (basic_block);
3720 /* Set of results of chains of multiply and add statement combinations that
3721 were not transformed into FMAs because of active deferring. */
3722 hash_set<tree> m_last_result_set;
3724 /* Pointer to a flag of the user that needs to be set if CFG has been
3725 modified. */
3726 bool *m_cfg_changed_p;
3729 void
3730 math_opts_dom_walker::after_dom_children (basic_block bb)
3732 gimple_stmt_iterator gsi;
3734 fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
3736 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3738 gimple *stmt = gsi_stmt (gsi);
3739 enum tree_code code;
3741 if (is_gimple_assign (stmt))
3743 code = gimple_assign_rhs_code (stmt);
3744 switch (code)
3746 case MULT_EXPR:
3747 if (!convert_mult_to_widen (stmt, &gsi)
3748 && !convert_expand_mult_copysign (stmt, &gsi)
3749 && convert_mult_to_fma (stmt,
3750 gimple_assign_rhs1 (stmt),
3751 gimple_assign_rhs2 (stmt),
3752 &fma_state))
3754 gsi_remove (&gsi, true);
3755 release_defs (stmt);
3756 continue;
3758 break;
3760 case PLUS_EXPR:
3761 case MINUS_EXPR:
3762 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3763 match_uaddsub_overflow (&gsi, stmt, code);
3764 break;
3766 case TRUNC_MOD_EXPR:
3767 convert_to_divmod (as_a<gassign *> (stmt));
3768 break;
3770 default:;
3773 else if (is_gimple_call (stmt))
3775 tree fndecl = gimple_call_fndecl (stmt);
3776 if (fndecl && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3778 switch (DECL_FUNCTION_CODE (fndecl))
3780 case BUILT_IN_POWF:
3781 case BUILT_IN_POW:
3782 case BUILT_IN_POWL:
3783 if (gimple_call_lhs (stmt)
3784 && TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3785 && real_equal
3786 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3787 &dconst2)
3788 && convert_mult_to_fma (stmt,
3789 gimple_call_arg (stmt, 0),
3790 gimple_call_arg (stmt, 0),
3791 &fma_state))
3793 unlink_stmt_vdef (stmt);
3794 if (gsi_remove (&gsi, true)
3795 && gimple_purge_dead_eh_edges (bb))
3796 *m_cfg_changed_p = true;
3797 release_defs (stmt);
3798 continue;
3800 break;
3802 default:;
3805 else
3806 cancel_fma_deferring (&fma_state);
3808 gsi_next (&gsi);
3810 if (fma_state.m_deferring_p
3811 && fma_state.m_initial_phi)
3813 gcc_checking_assert (fma_state.m_last_result);
3814 if (!last_fma_candidate_feeds_initial_phi (&fma_state,
3815 &m_last_result_set))
3816 cancel_fma_deferring (&fma_state);
3817 else
3818 m_last_result_set.add (fma_state.m_last_result);
3823 unsigned int
3824 pass_optimize_widening_mul::execute (function *fun)
3826 bool cfg_changed = false;
3828 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3829 calculate_dominance_info (CDI_DOMINATORS);
3830 renumber_gimple_stmt_uids ();
3832 math_opts_dom_walker (&cfg_changed).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
3834 statistics_counter_event (fun, "widening multiplications inserted",
3835 widen_mul_stats.widen_mults_inserted);
3836 statistics_counter_event (fun, "widening maccs inserted",
3837 widen_mul_stats.maccs_inserted);
3838 statistics_counter_event (fun, "fused multiply-adds inserted",
3839 widen_mul_stats.fmas_inserted);
3840 statistics_counter_event (fun, "divmod calls inserted",
3841 widen_mul_stats.divmod_calls_inserted);
3843 return cfg_changed ? TODO_cleanup_cfg : 0;
3846 } // anon namespace
3848 gimple_opt_pass *
3849 make_pass_optimize_widening_mul (gcc::context *ctxt)
3851 return new pass_optimize_widening_mul (ctxt);