Add {symbol,call}_summary::get method and use it in HSA.
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blob273396da073e0aaefa9058aab4f53cd5e4900fb3
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
118 #include "domwalk.h"
120 /* This structure represents one basic block that either computes a
121 division, or is a common dominator for basic block that compute a
122 division. */
123 struct occurrence {
124 /* The basic block represented by this structure. */
125 basic_block bb;
127 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
128 inserted in BB. */
129 tree recip_def;
131 /* If non-NULL, the SSA_NAME holding the definition for a squared
132 reciprocal inserted in BB. */
133 tree square_recip_def;
135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
136 was inserted in BB. */
137 gimple *recip_def_stmt;
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 by BB. */
141 struct occurrence *children;
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence *next;
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
149 compute_merit. */
150 int num_divisions;
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division;
158 static struct
160 /* Number of 1.0/X ops inserted. */
161 int rdivs_inserted;
163 /* Number of 1.0/FUNC ops inserted. */
164 int rfuncs_inserted;
165 } reciprocal_stats;
167 static struct
169 /* Number of cexpi calls inserted. */
170 int inserted;
171 } sincos_stats;
173 static struct
175 /* Number of widening multiplication ops inserted. */
176 int widen_mults_inserted;
178 /* Number of integer multiply-and-accumulate ops inserted. */
179 int maccs_inserted;
181 /* Number of fp fused multiply-add ops inserted. */
182 int fmas_inserted;
184 /* Number of divmod calls inserted. */
185 int divmod_calls_inserted;
186 } widen_mul_stats;
188 /* The instance of "struct occurrence" representing the highest
189 interesting block in the dominator tree. */
190 static struct occurrence *occ_head;
192 /* Allocation pool for getting instances of "struct occurrence". */
193 static object_allocator<occurrence> *occ_pool;
197 /* Allocate and return a new struct occurrence for basic block BB, and
198 whose children list is headed by CHILDREN. */
199 static struct occurrence *
200 occ_new (basic_block bb, struct occurrence *children)
202 struct occurrence *occ;
204 bb->aux = occ = occ_pool->allocate ();
205 memset (occ, 0, sizeof (struct occurrence));
207 occ->bb = bb;
208 occ->children = children;
209 return occ;
213 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
214 list of "struct occurrence"s, one per basic block, having IDOM as
215 their common dominator.
217 We try to insert NEW_OCC as deep as possible in the tree, and we also
218 insert any other block that is a common dominator for BB and one
219 block already in the tree. */
221 static void
222 insert_bb (struct occurrence *new_occ, basic_block idom,
223 struct occurrence **p_head)
225 struct occurrence *occ, **p_occ;
227 for (p_occ = p_head; (occ = *p_occ) != NULL; )
229 basic_block bb = new_occ->bb, occ_bb = occ->bb;
230 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
231 if (dom == bb)
233 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
234 from its list. */
235 *p_occ = occ->next;
236 occ->next = new_occ->children;
237 new_occ->children = occ;
239 /* Try the next block (it may as well be dominated by BB). */
242 else if (dom == occ_bb)
244 /* OCC_BB dominates BB. Tail recurse to look deeper. */
245 insert_bb (new_occ, dom, &occ->children);
246 return;
249 else if (dom != idom)
251 gcc_assert (!dom->aux);
253 /* There is a dominator between IDOM and BB, add it and make
254 two children out of NEW_OCC and OCC. First, remove OCC from
255 its list. */
256 *p_occ = occ->next;
257 new_occ->next = occ;
258 occ->next = NULL;
260 /* None of the previous blocks has DOM as a dominator: if we tail
261 recursed, we would reexamine them uselessly. Just switch BB with
262 DOM, and go on looking for blocks dominated by DOM. */
263 new_occ = occ_new (dom, new_occ);
266 else
268 /* Nothing special, go on with the next element. */
269 p_occ = &occ->next;
273 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
274 new_occ->next = *p_head;
275 *p_head = new_occ;
278 /* Register that we found a division in BB.
279 IMPORTANCE is a measure of how much weighting to give
280 that division. Use IMPORTANCE = 2 to register a single
281 division. If the division is going to be found multiple
282 times use 1 (as it is with squares). */
284 static inline void
285 register_division_in (basic_block bb, int importance)
287 struct occurrence *occ;
289 occ = (struct occurrence *) bb->aux;
290 if (!occ)
292 occ = occ_new (bb, NULL);
293 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
296 occ->bb_has_division = true;
297 occ->num_divisions += importance;
301 /* Compute the number of divisions that postdominate each block in OCC and
302 its children. */
304 static void
305 compute_merit (struct occurrence *occ)
307 struct occurrence *occ_child;
308 basic_block dom = occ->bb;
310 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
312 basic_block bb;
313 if (occ_child->children)
314 compute_merit (occ_child);
316 if (flag_exceptions)
317 bb = single_noncomplex_succ (dom);
318 else
319 bb = dom;
321 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
322 occ->num_divisions += occ_child->num_divisions;
327 /* Return whether USE_STMT is a floating-point division by DEF. */
328 static inline bool
329 is_division_by (gimple *use_stmt, tree def)
331 return is_gimple_assign (use_stmt)
332 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
333 && gimple_assign_rhs2 (use_stmt) == def
334 /* Do not recognize x / x as valid division, as we are getting
335 confused later by replacing all immediate uses x in such
336 a stmt. */
337 && gimple_assign_rhs1 (use_stmt) != def;
340 /* Return whether USE_STMT is DEF * DEF. */
341 static inline bool
342 is_square_of (gimple *use_stmt, tree def)
344 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
345 && gimple_assign_rhs_code (use_stmt) == MULT_EXPR)
347 tree op0 = gimple_assign_rhs1 (use_stmt);
348 tree op1 = gimple_assign_rhs2 (use_stmt);
350 return op0 == op1 && op0 == def;
352 return 0;
355 /* Return whether USE_STMT is a floating-point division by
356 DEF * DEF. */
357 static inline bool
358 is_division_by_square (gimple *use_stmt, tree def)
360 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
361 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
362 && gimple_assign_rhs1 (use_stmt) != gimple_assign_rhs2 (use_stmt))
364 tree denominator = gimple_assign_rhs2 (use_stmt);
365 if (TREE_CODE (denominator) == SSA_NAME)
367 return is_square_of (SSA_NAME_DEF_STMT (denominator), def);
370 return 0;
373 /* Walk the subset of the dominator tree rooted at OCC, setting the
374 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
375 the given basic block. The field may be left NULL, of course,
376 if it is not possible or profitable to do the optimization.
378 DEF_BSI is an iterator pointing at the statement defining DEF.
379 If RECIP_DEF is set, a dominator already has a computation that can
380 be used.
382 If should_insert_square_recip is set, then this also inserts
383 the square of the reciprocal immediately after the definition
384 of the reciprocal. */
386 static void
387 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
388 tree def, tree recip_def, tree square_recip_def,
389 int should_insert_square_recip, int threshold)
391 tree type;
392 gassign *new_stmt, *new_square_stmt;
393 gimple_stmt_iterator gsi;
394 struct occurrence *occ_child;
396 if (!recip_def
397 && (occ->bb_has_division || !flag_trapping_math)
398 /* Divide by two as all divisions are counted twice in
399 the costing loop. */
400 && occ->num_divisions / 2 >= threshold)
402 /* Make a variable with the replacement and substitute it. */
403 type = TREE_TYPE (def);
404 recip_def = create_tmp_reg (type, "reciptmp");
405 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
406 build_one_cst (type), def);
408 if (should_insert_square_recip)
410 square_recip_def = create_tmp_reg (type, "powmult_reciptmp");
411 new_square_stmt = gimple_build_assign (square_recip_def, MULT_EXPR,
412 recip_def, recip_def);
415 if (occ->bb_has_division)
417 /* Case 1: insert before an existing division. */
418 gsi = gsi_after_labels (occ->bb);
419 while (!gsi_end_p (gsi)
420 && (!is_division_by (gsi_stmt (gsi), def))
421 && (!is_division_by_square (gsi_stmt (gsi), def)))
422 gsi_next (&gsi);
424 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
426 else if (def_gsi && occ->bb == def_gsi->bb)
428 /* Case 2: insert right after the definition. Note that this will
429 never happen if the definition statement can throw, because in
430 that case the sole successor of the statement's basic block will
431 dominate all the uses as well. */
432 gsi = *def_gsi;
433 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
435 else
437 /* Case 3: insert in a basic block not containing defs/uses. */
438 gsi = gsi_after_labels (occ->bb);
439 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
442 /* Regardless of which case the reciprocal as inserted in,
443 we insert the square immediately after the reciprocal. */
444 if (should_insert_square_recip)
445 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
447 reciprocal_stats.rdivs_inserted++;
449 occ->recip_def_stmt = new_stmt;
452 occ->recip_def = recip_def;
453 occ->square_recip_def = square_recip_def;
454 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
455 insert_reciprocals (def_gsi, occ_child, def, recip_def,
456 square_recip_def, should_insert_square_recip,
457 threshold);
460 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
461 Take as argument the use for (x * x). */
462 static inline void
463 replace_reciprocal_squares (use_operand_p use_p)
465 gimple *use_stmt = USE_STMT (use_p);
466 basic_block bb = gimple_bb (use_stmt);
467 struct occurrence *occ = (struct occurrence *) bb->aux;
469 if (optimize_bb_for_speed_p (bb) && occ->square_recip_def
470 && occ->recip_def)
472 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
473 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
474 gimple_assign_set_rhs2 (use_stmt, occ->square_recip_def);
475 SET_USE (use_p, occ->square_recip_def);
476 fold_stmt_inplace (&gsi);
477 update_stmt (use_stmt);
482 /* Replace the division at USE_P with a multiplication by the reciprocal, if
483 possible. */
485 static inline void
486 replace_reciprocal (use_operand_p use_p)
488 gimple *use_stmt = USE_STMT (use_p);
489 basic_block bb = gimple_bb (use_stmt);
490 struct occurrence *occ = (struct occurrence *) bb->aux;
492 if (optimize_bb_for_speed_p (bb)
493 && occ->recip_def && use_stmt != occ->recip_def_stmt)
495 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
496 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
497 SET_USE (use_p, occ->recip_def);
498 fold_stmt_inplace (&gsi);
499 update_stmt (use_stmt);
504 /* Free OCC and return one more "struct occurrence" to be freed. */
506 static struct occurrence *
507 free_bb (struct occurrence *occ)
509 struct occurrence *child, *next;
511 /* First get the two pointers hanging off OCC. */
512 next = occ->next;
513 child = occ->children;
514 occ->bb->aux = NULL;
515 occ_pool->remove (occ);
517 /* Now ensure that we don't recurse unless it is necessary. */
518 if (!child)
519 return next;
520 else
522 while (next)
523 next = free_bb (next);
525 return child;
530 /* Look for floating-point divisions among DEF's uses, and try to
531 replace them by multiplications with the reciprocal. Add
532 as many statements computing the reciprocal as needed.
534 DEF must be a GIMPLE register of a floating-point type. */
536 static void
537 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
539 use_operand_p use_p, square_use_p;
540 imm_use_iterator use_iter, square_use_iter;
541 tree square_def;
542 struct occurrence *occ;
543 int count = 0;
544 int threshold;
545 int square_recip_count = 0;
546 int sqrt_recip_count = 0;
548 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && TREE_CODE (def) == SSA_NAME);
549 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
551 /* If DEF is a square (x * x), count the number of divisions by x.
552 If there are more divisions by x than by (DEF * DEF), prefer to optimize
553 the reciprocal of x instead of DEF. This improves cases like:
554 def = x * x
555 t0 = a / def
556 t1 = b / def
557 t2 = c / x
558 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
559 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
561 if (is_gimple_assign (def_stmt)
562 && gimple_assign_rhs_code (def_stmt) == MULT_EXPR
563 && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
564 && gimple_assign_rhs1 (def_stmt) == gimple_assign_rhs2 (def_stmt))
566 tree op0 = gimple_assign_rhs1 (def_stmt);
568 FOR_EACH_IMM_USE_FAST (use_p, use_iter, op0)
570 gimple *use_stmt = USE_STMT (use_p);
571 if (is_division_by (use_stmt, op0))
572 sqrt_recip_count++;
576 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
578 gimple *use_stmt = USE_STMT (use_p);
579 if (is_division_by (use_stmt, def))
581 register_division_in (gimple_bb (use_stmt), 2);
582 count++;
585 if (is_square_of (use_stmt, def))
587 square_def = gimple_assign_lhs (use_stmt);
588 FOR_EACH_IMM_USE_FAST (square_use_p, square_use_iter, square_def)
590 gimple *square_use_stmt = USE_STMT (square_use_p);
591 if (is_division_by (square_use_stmt, square_def))
593 /* This is executed twice for each division by a square. */
594 register_division_in (gimple_bb (square_use_stmt), 1);
595 square_recip_count++;
601 /* Square reciprocals were counted twice above. */
602 square_recip_count /= 2;
604 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
605 if (sqrt_recip_count > square_recip_count)
606 return;
608 /* Do the expensive part only if we can hope to optimize something. */
609 if (count + square_recip_count >= threshold && count >= 1)
611 gimple *use_stmt;
612 for (occ = occ_head; occ; occ = occ->next)
614 compute_merit (occ);
615 insert_reciprocals (def_gsi, occ, def, NULL, NULL,
616 square_recip_count, threshold);
619 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
621 if (is_division_by (use_stmt, def))
623 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
624 replace_reciprocal (use_p);
626 else if (square_recip_count > 0 && is_square_of (use_stmt, def))
628 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
630 /* Find all uses of the square that are divisions and
631 * replace them by multiplications with the inverse. */
632 imm_use_iterator square_iterator;
633 gimple *powmult_use_stmt = USE_STMT (use_p);
634 tree powmult_def_name = gimple_assign_lhs (powmult_use_stmt);
636 FOR_EACH_IMM_USE_STMT (powmult_use_stmt,
637 square_iterator, powmult_def_name)
638 FOR_EACH_IMM_USE_ON_STMT (square_use_p, square_iterator)
640 gimple *powmult_use_stmt = USE_STMT (square_use_p);
641 if (is_division_by (powmult_use_stmt, powmult_def_name))
642 replace_reciprocal_squares (square_use_p);
649 for (occ = occ_head; occ; )
650 occ = free_bb (occ);
652 occ_head = NULL;
655 /* Return an internal function that implements the reciprocal of CALL,
656 or IFN_LAST if there is no such function that the target supports. */
658 internal_fn
659 internal_fn_reciprocal (gcall *call)
661 internal_fn ifn;
663 switch (gimple_call_combined_fn (call))
665 CASE_CFN_SQRT:
666 CASE_CFN_SQRT_FN:
667 ifn = IFN_RSQRT;
668 break;
670 default:
671 return IFN_LAST;
674 tree_pair types = direct_internal_fn_types (ifn, call);
675 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
676 return IFN_LAST;
678 return ifn;
681 /* Go through all the floating-point SSA_NAMEs, and call
682 execute_cse_reciprocals_1 on each of them. */
683 namespace {
685 const pass_data pass_data_cse_reciprocals =
687 GIMPLE_PASS, /* type */
688 "recip", /* name */
689 OPTGROUP_NONE, /* optinfo_flags */
690 TV_TREE_RECIP, /* tv_id */
691 PROP_ssa, /* properties_required */
692 0, /* properties_provided */
693 0, /* properties_destroyed */
694 0, /* todo_flags_start */
695 TODO_update_ssa, /* todo_flags_finish */
698 class pass_cse_reciprocals : public gimple_opt_pass
700 public:
701 pass_cse_reciprocals (gcc::context *ctxt)
702 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
705 /* opt_pass methods: */
706 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
707 virtual unsigned int execute (function *);
709 }; // class pass_cse_reciprocals
711 unsigned int
712 pass_cse_reciprocals::execute (function *fun)
714 basic_block bb;
715 tree arg;
717 occ_pool = new object_allocator<occurrence> ("dominators for recip");
719 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
720 calculate_dominance_info (CDI_DOMINATORS);
721 calculate_dominance_info (CDI_POST_DOMINATORS);
723 if (flag_checking)
724 FOR_EACH_BB_FN (bb, fun)
725 gcc_assert (!bb->aux);
727 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
728 if (FLOAT_TYPE_P (TREE_TYPE (arg))
729 && is_gimple_reg (arg))
731 tree name = ssa_default_def (fun, arg);
732 if (name)
733 execute_cse_reciprocals_1 (NULL, name);
736 FOR_EACH_BB_FN (bb, fun)
738 tree def;
740 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
741 gsi_next (&gsi))
743 gphi *phi = gsi.phi ();
744 def = PHI_RESULT (phi);
745 if (! virtual_operand_p (def)
746 && FLOAT_TYPE_P (TREE_TYPE (def)))
747 execute_cse_reciprocals_1 (NULL, def);
750 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
751 gsi_next (&gsi))
753 gimple *stmt = gsi_stmt (gsi);
755 if (gimple_has_lhs (stmt)
756 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
757 && FLOAT_TYPE_P (TREE_TYPE (def))
758 && TREE_CODE (def) == SSA_NAME)
759 execute_cse_reciprocals_1 (&gsi, def);
762 if (optimize_bb_for_size_p (bb))
763 continue;
765 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
766 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
767 gsi_next (&gsi))
769 gimple *stmt = gsi_stmt (gsi);
771 if (is_gimple_assign (stmt)
772 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
774 tree arg1 = gimple_assign_rhs2 (stmt);
775 gimple *stmt1;
777 if (TREE_CODE (arg1) != SSA_NAME)
778 continue;
780 stmt1 = SSA_NAME_DEF_STMT (arg1);
782 if (is_gimple_call (stmt1)
783 && gimple_call_lhs (stmt1))
785 bool fail;
786 imm_use_iterator ui;
787 use_operand_p use_p;
788 tree fndecl = NULL_TREE;
790 gcall *call = as_a <gcall *> (stmt1);
791 internal_fn ifn = internal_fn_reciprocal (call);
792 if (ifn == IFN_LAST)
794 fndecl = gimple_call_fndecl (call);
795 if (!fndecl
796 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
797 continue;
798 fndecl = targetm.builtin_reciprocal (fndecl);
799 if (!fndecl)
800 continue;
803 /* Check that all uses of the SSA name are divisions,
804 otherwise replacing the defining statement will do
805 the wrong thing. */
806 fail = false;
807 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
809 gimple *stmt2 = USE_STMT (use_p);
810 if (is_gimple_debug (stmt2))
811 continue;
812 if (!is_gimple_assign (stmt2)
813 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
814 || gimple_assign_rhs1 (stmt2) == arg1
815 || gimple_assign_rhs2 (stmt2) != arg1)
817 fail = true;
818 break;
821 if (fail)
822 continue;
824 gimple_replace_ssa_lhs (call, arg1);
825 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
827 auto_vec<tree, 4> args;
828 for (unsigned int i = 0;
829 i < gimple_call_num_args (call); i++)
830 args.safe_push (gimple_call_arg (call, i));
831 gcall *stmt2;
832 if (ifn == IFN_LAST)
833 stmt2 = gimple_build_call_vec (fndecl, args);
834 else
835 stmt2 = gimple_build_call_internal_vec (ifn, args);
836 gimple_call_set_lhs (stmt2, arg1);
837 if (gimple_vdef (call))
839 gimple_set_vdef (stmt2, gimple_vdef (call));
840 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
842 gimple_call_set_nothrow (stmt2,
843 gimple_call_nothrow_p (call));
844 gimple_set_vuse (stmt2, gimple_vuse (call));
845 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
846 gsi_replace (&gsi2, stmt2, true);
848 else
850 if (ifn == IFN_LAST)
851 gimple_call_set_fndecl (call, fndecl);
852 else
853 gimple_call_set_internal_fn (call, ifn);
854 update_stmt (call);
856 reciprocal_stats.rfuncs_inserted++;
858 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
860 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
861 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
862 fold_stmt_inplace (&gsi);
863 update_stmt (stmt);
870 statistics_counter_event (fun, "reciprocal divs inserted",
871 reciprocal_stats.rdivs_inserted);
872 statistics_counter_event (fun, "reciprocal functions inserted",
873 reciprocal_stats.rfuncs_inserted);
875 free_dominance_info (CDI_DOMINATORS);
876 free_dominance_info (CDI_POST_DOMINATORS);
877 delete occ_pool;
878 return 0;
881 } // anon namespace
883 gimple_opt_pass *
884 make_pass_cse_reciprocals (gcc::context *ctxt)
886 return new pass_cse_reciprocals (ctxt);
889 /* Records an occurrence at statement USE_STMT in the vector of trees
890 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
891 is not yet initialized. Returns true if the occurrence was pushed on
892 the vector. Adjusts *TOP_BB to be the basic block dominating all
893 statements in the vector. */
895 static bool
896 maybe_record_sincos (vec<gimple *> *stmts,
897 basic_block *top_bb, gimple *use_stmt)
899 basic_block use_bb = gimple_bb (use_stmt);
900 if (*top_bb
901 && (*top_bb == use_bb
902 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
903 stmts->safe_push (use_stmt);
904 else if (!*top_bb
905 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
907 stmts->safe_push (use_stmt);
908 *top_bb = use_bb;
910 else
911 return false;
913 return true;
916 /* Look for sin, cos and cexpi calls with the same argument NAME and
917 create a single call to cexpi CSEing the result in this case.
918 We first walk over all immediate uses of the argument collecting
919 statements that we can CSE in a vector and in a second pass replace
920 the statement rhs with a REALPART or IMAGPART expression on the
921 result of the cexpi call we insert before the use statement that
922 dominates all other candidates. */
924 static bool
925 execute_cse_sincos_1 (tree name)
927 gimple_stmt_iterator gsi;
928 imm_use_iterator use_iter;
929 tree fndecl, res, type;
930 gimple *def_stmt, *use_stmt, *stmt;
931 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
932 auto_vec<gimple *> stmts;
933 basic_block top_bb = NULL;
934 int i;
935 bool cfg_changed = false;
937 type = TREE_TYPE (name);
938 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
940 if (gimple_code (use_stmt) != GIMPLE_CALL
941 || !gimple_call_lhs (use_stmt))
942 continue;
944 switch (gimple_call_combined_fn (use_stmt))
946 CASE_CFN_COS:
947 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
948 break;
950 CASE_CFN_SIN:
951 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
952 break;
954 CASE_CFN_CEXPI:
955 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
956 break;
958 default:;
962 if (seen_cos + seen_sin + seen_cexpi <= 1)
963 return false;
965 /* Simply insert cexpi at the beginning of top_bb but not earlier than
966 the name def statement. */
967 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
968 if (!fndecl)
969 return false;
970 stmt = gimple_build_call (fndecl, 1, name);
971 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
972 gimple_call_set_lhs (stmt, res);
974 def_stmt = SSA_NAME_DEF_STMT (name);
975 if (!SSA_NAME_IS_DEFAULT_DEF (name)
976 && gimple_code (def_stmt) != GIMPLE_PHI
977 && gimple_bb (def_stmt) == top_bb)
979 gsi = gsi_for_stmt (def_stmt);
980 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
982 else
984 gsi = gsi_after_labels (top_bb);
985 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
987 sincos_stats.inserted++;
989 /* And adjust the recorded old call sites. */
990 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
992 tree rhs = NULL;
994 switch (gimple_call_combined_fn (use_stmt))
996 CASE_CFN_COS:
997 rhs = fold_build1 (REALPART_EXPR, type, res);
998 break;
1000 CASE_CFN_SIN:
1001 rhs = fold_build1 (IMAGPART_EXPR, type, res);
1002 break;
1004 CASE_CFN_CEXPI:
1005 rhs = res;
1006 break;
1008 default:;
1009 gcc_unreachable ();
1012 /* Replace call with a copy. */
1013 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
1015 gsi = gsi_for_stmt (use_stmt);
1016 gsi_replace (&gsi, stmt, true);
1017 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
1018 cfg_changed = true;
1021 return cfg_changed;
1024 /* To evaluate powi(x,n), the floating point value x raised to the
1025 constant integer exponent n, we use a hybrid algorithm that
1026 combines the "window method" with look-up tables. For an
1027 introduction to exponentiation algorithms and "addition chains",
1028 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1029 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1030 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1031 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1033 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1034 multiplications to inline before calling the system library's pow
1035 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1036 so this default never requires calling pow, powf or powl. */
1038 #ifndef POWI_MAX_MULTS
1039 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1040 #endif
1042 /* The size of the "optimal power tree" lookup table. All
1043 exponents less than this value are simply looked up in the
1044 powi_table below. This threshold is also used to size the
1045 cache of pseudo registers that hold intermediate results. */
1046 #define POWI_TABLE_SIZE 256
1048 /* The size, in bits of the window, used in the "window method"
1049 exponentiation algorithm. This is equivalent to a radix of
1050 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1051 #define POWI_WINDOW_SIZE 3
1053 /* The following table is an efficient representation of an
1054 "optimal power tree". For each value, i, the corresponding
1055 value, j, in the table states than an optimal evaluation
1056 sequence for calculating pow(x,i) can be found by evaluating
1057 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1058 100 integers is given in Knuth's "Seminumerical algorithms". */
1060 static const unsigned char powi_table[POWI_TABLE_SIZE] =
1062 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1063 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1064 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1065 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1066 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1067 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1068 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1069 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1070 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1071 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1072 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1073 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1074 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1075 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1076 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1077 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1078 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1079 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1080 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1081 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1082 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1083 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1084 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1085 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1086 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1087 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1088 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1089 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1090 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1091 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1092 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1093 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1097 /* Return the number of multiplications required to calculate
1098 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1099 subroutine of powi_cost. CACHE is an array indicating
1100 which exponents have already been calculated. */
1102 static int
1103 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
1105 /* If we've already calculated this exponent, then this evaluation
1106 doesn't require any additional multiplications. */
1107 if (cache[n])
1108 return 0;
1110 cache[n] = true;
1111 return powi_lookup_cost (n - powi_table[n], cache)
1112 + powi_lookup_cost (powi_table[n], cache) + 1;
1115 /* Return the number of multiplications required to calculate
1116 powi(x,n) for an arbitrary x, given the exponent N. This
1117 function needs to be kept in sync with powi_as_mults below. */
1119 static int
1120 powi_cost (HOST_WIDE_INT n)
1122 bool cache[POWI_TABLE_SIZE];
1123 unsigned HOST_WIDE_INT digit;
1124 unsigned HOST_WIDE_INT val;
1125 int result;
1127 if (n == 0)
1128 return 0;
1130 /* Ignore the reciprocal when calculating the cost. */
1131 val = (n < 0) ? -n : n;
1133 /* Initialize the exponent cache. */
1134 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
1135 cache[1] = true;
1137 result = 0;
1139 while (val >= POWI_TABLE_SIZE)
1141 if (val & 1)
1143 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
1144 result += powi_lookup_cost (digit, cache)
1145 + POWI_WINDOW_SIZE + 1;
1146 val >>= POWI_WINDOW_SIZE;
1148 else
1150 val >>= 1;
1151 result++;
1155 return result + powi_lookup_cost (val, cache);
1158 /* Recursive subroutine of powi_as_mults. This function takes the
1159 array, CACHE, of already calculated exponents and an exponent N and
1160 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1162 static tree
1163 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1164 HOST_WIDE_INT n, tree *cache)
1166 tree op0, op1, ssa_target;
1167 unsigned HOST_WIDE_INT digit;
1168 gassign *mult_stmt;
1170 if (n < POWI_TABLE_SIZE && cache[n])
1171 return cache[n];
1173 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1175 if (n < POWI_TABLE_SIZE)
1177 cache[n] = ssa_target;
1178 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1179 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1181 else if (n & 1)
1183 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1184 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1185 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1187 else
1189 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1190 op1 = op0;
1193 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1194 gimple_set_location (mult_stmt, loc);
1195 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1197 return ssa_target;
1200 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1201 This function needs to be kept in sync with powi_cost above. */
1203 static tree
1204 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1205 tree arg0, HOST_WIDE_INT n)
1207 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1208 gassign *div_stmt;
1209 tree target;
1211 if (n == 0)
1212 return build_real (type, dconst1);
1214 memset (cache, 0, sizeof (cache));
1215 cache[1] = arg0;
1217 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1218 if (n >= 0)
1219 return result;
1221 /* If the original exponent was negative, reciprocate the result. */
1222 target = make_temp_ssa_name (type, NULL, "powmult");
1223 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1224 build_real (type, dconst1), result);
1225 gimple_set_location (div_stmt, loc);
1226 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1228 return target;
1231 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1232 location info LOC. If the arguments are appropriate, create an
1233 equivalent sequence of statements prior to GSI using an optimal
1234 number of multiplications, and return an expession holding the
1235 result. */
1237 static tree
1238 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1239 tree arg0, HOST_WIDE_INT n)
1241 /* Avoid largest negative number. */
1242 if (n != -n
1243 && ((n >= -1 && n <= 2)
1244 || (optimize_function_for_speed_p (cfun)
1245 && powi_cost (n) <= POWI_MAX_MULTS)))
1246 return powi_as_mults (gsi, loc, arg0, n);
1248 return NULL_TREE;
1251 /* Build a gimple call statement that calls FN with argument ARG.
1252 Set the lhs of the call statement to a fresh SSA name. Insert the
1253 statement prior to GSI's current position, and return the fresh
1254 SSA name. */
1256 static tree
1257 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1258 tree fn, tree arg)
1260 gcall *call_stmt;
1261 tree ssa_target;
1263 call_stmt = gimple_build_call (fn, 1, arg);
1264 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1265 gimple_set_lhs (call_stmt, ssa_target);
1266 gimple_set_location (call_stmt, loc);
1267 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1269 return ssa_target;
1272 /* Build a gimple binary operation with the given CODE and arguments
1273 ARG0, ARG1, assigning the result to a new SSA name for variable
1274 TARGET. Insert the statement prior to GSI's current position, and
1275 return the fresh SSA name.*/
1277 static tree
1278 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1279 const char *name, enum tree_code code,
1280 tree arg0, tree arg1)
1282 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1283 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1284 gimple_set_location (stmt, loc);
1285 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1286 return result;
1289 /* Build a gimple reference operation with the given CODE and argument
1290 ARG, assigning the result to a new SSA name of TYPE with NAME.
1291 Insert the statement prior to GSI's current position, and return
1292 the fresh SSA name. */
1294 static inline tree
1295 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1296 const char *name, enum tree_code code, tree arg0)
1298 tree result = make_temp_ssa_name (type, NULL, name);
1299 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1300 gimple_set_location (stmt, loc);
1301 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1302 return result;
1305 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1306 prior to GSI's current position, and return the fresh SSA name. */
1308 static tree
1309 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1310 tree type, tree val)
1312 tree result = make_ssa_name (type);
1313 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1314 gimple_set_location (stmt, loc);
1315 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1316 return result;
1319 struct pow_synth_sqrt_info
1321 bool *factors;
1322 unsigned int deepest;
1323 unsigned int num_mults;
1326 /* Return true iff the real value C can be represented as a
1327 sum of powers of 0.5 up to N. That is:
1328 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1329 Record in INFO the various parameters of the synthesis algorithm such
1330 as the factors a[i], the maximum 0.5 power and the number of
1331 multiplications that will be required. */
1333 bool
1334 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1335 struct pow_synth_sqrt_info *info)
1337 REAL_VALUE_TYPE factor = dconsthalf;
1338 REAL_VALUE_TYPE remainder = c;
1340 info->deepest = 0;
1341 info->num_mults = 0;
1342 memset (info->factors, 0, n * sizeof (bool));
1344 for (unsigned i = 0; i < n; i++)
1346 REAL_VALUE_TYPE res;
1348 /* If something inexact happened bail out now. */
1349 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1350 return false;
1352 /* We have hit zero. The number is representable as a sum
1353 of powers of 0.5. */
1354 if (real_equal (&res, &dconst0))
1356 info->factors[i] = true;
1357 info->deepest = i + 1;
1358 return true;
1360 else if (!REAL_VALUE_NEGATIVE (res))
1362 remainder = res;
1363 info->factors[i] = true;
1364 info->num_mults++;
1366 else
1367 info->factors[i] = false;
1369 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1371 return false;
1374 /* Return the tree corresponding to FN being applied
1375 to ARG N times at GSI and LOC.
1376 Look up previous results from CACHE if need be.
1377 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1379 static tree
1380 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1381 tree fn, location_t loc, tree *cache)
1383 tree res = cache[n];
1384 if (!res)
1386 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1387 res = build_and_insert_call (gsi, loc, fn, prev);
1388 cache[n] = res;
1391 return res;
1394 /* Print to STREAM the repeated application of function FNAME to ARG
1395 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1396 "foo (foo (x))". */
1398 static void
1399 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1400 unsigned int n)
1402 if (n == 0)
1403 fprintf (stream, "%s", arg);
1404 else
1406 fprintf (stream, "%s (", fname);
1407 print_nested_fn (stream, fname, arg, n - 1);
1408 fprintf (stream, ")");
1412 /* Print to STREAM the fractional sequence of sqrt chains
1413 applied to ARG, described by INFO. Used for the dump file. */
1415 static void
1416 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1417 struct pow_synth_sqrt_info *info)
1419 for (unsigned int i = 0; i < info->deepest; i++)
1421 bool is_set = info->factors[i];
1422 if (is_set)
1424 print_nested_fn (stream, "sqrt", arg, i + 1);
1425 if (i != info->deepest - 1)
1426 fprintf (stream, " * ");
1431 /* Print to STREAM a representation of raising ARG to an integer
1432 power N. Used for the dump file. */
1434 static void
1435 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1437 if (n > 1)
1438 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1439 else if (n == 1)
1440 fprintf (stream, "%s", arg);
1443 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1444 square roots. Place at GSI and LOC. Limit the maximum depth
1445 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1446 result of the expanded sequence or NULL_TREE if the expansion failed.
1448 This routine assumes that ARG1 is a real number with a fractional part
1449 (the integer exponent case will have been handled earlier in
1450 gimple_expand_builtin_pow).
1452 For ARG1 > 0.0:
1453 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1454 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1455 FRAC_PART == ARG1 - WHOLE_PART:
1456 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1457 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1458 if it can be expressed as such, that is if FRAC_PART satisfies:
1459 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1460 where integer a[i] is either 0 or 1.
1462 Example:
1463 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1464 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1466 For ARG1 < 0.0 there are two approaches:
1467 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1468 is calculated as above.
1470 Example:
1471 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1472 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1474 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1475 FRAC_PART := ARG1 - WHOLE_PART
1476 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1477 Example:
1478 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1479 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1481 For ARG1 < 0.0 we choose between (A) and (B) depending on
1482 how many multiplications we'd have to do.
1483 So, for the example in (B): POW (x, -5.875), if we were to
1484 follow algorithm (A) we would produce:
1485 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1486 which contains more multiplications than approach (B).
1488 Hopefully, this approach will eliminate potentially expensive POW library
1489 calls when unsafe floating point math is enabled and allow the compiler to
1490 further optimise the multiplies, square roots and divides produced by this
1491 function. */
1493 static tree
1494 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1495 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1497 tree type = TREE_TYPE (arg0);
1498 machine_mode mode = TYPE_MODE (type);
1499 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1500 bool one_over = true;
1502 if (!sqrtfn)
1503 return NULL_TREE;
1505 if (TREE_CODE (arg1) != REAL_CST)
1506 return NULL_TREE;
1508 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1510 gcc_assert (max_depth > 0);
1511 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1513 struct pow_synth_sqrt_info synth_info;
1514 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1515 synth_info.deepest = 0;
1516 synth_info.num_mults = 0;
1518 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1519 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1521 /* The whole and fractional parts of exp. */
1522 REAL_VALUE_TYPE whole_part;
1523 REAL_VALUE_TYPE frac_part;
1525 real_floor (&whole_part, mode, &exp);
1526 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1529 REAL_VALUE_TYPE ceil_whole = dconst0;
1530 REAL_VALUE_TYPE ceil_fract = dconst0;
1532 if (neg_exp)
1534 real_ceil (&ceil_whole, mode, &exp);
1535 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1538 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1539 return NULL_TREE;
1541 /* Check whether it's more profitable to not use 1.0 / ... */
1542 if (neg_exp)
1544 struct pow_synth_sqrt_info alt_synth_info;
1545 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1546 alt_synth_info.deepest = 0;
1547 alt_synth_info.num_mults = 0;
1549 if (representable_as_half_series_p (ceil_fract, max_depth,
1550 &alt_synth_info)
1551 && alt_synth_info.deepest <= synth_info.deepest
1552 && alt_synth_info.num_mults < synth_info.num_mults)
1554 whole_part = ceil_whole;
1555 frac_part = ceil_fract;
1556 synth_info.deepest = alt_synth_info.deepest;
1557 synth_info.num_mults = alt_synth_info.num_mults;
1558 memcpy (synth_info.factors, alt_synth_info.factors,
1559 (max_depth + 1) * sizeof (bool));
1560 one_over = false;
1564 HOST_WIDE_INT n = real_to_integer (&whole_part);
1565 REAL_VALUE_TYPE cint;
1566 real_from_integer (&cint, VOIDmode, n, SIGNED);
1568 if (!real_identical (&whole_part, &cint))
1569 return NULL_TREE;
1571 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1572 return NULL_TREE;
1574 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1576 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1578 /* Calculate the integer part of the exponent. */
1579 if (n > 1)
1581 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1582 if (!integer_res)
1583 return NULL_TREE;
1586 if (dump_file)
1588 char string[64];
1590 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1591 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1593 if (neg_exp)
1595 if (one_over)
1597 fprintf (dump_file, "1.0 / (");
1598 dump_integer_part (dump_file, "x", n);
1599 if (n > 0)
1600 fprintf (dump_file, " * ");
1601 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1602 fprintf (dump_file, ")");
1604 else
1606 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1607 fprintf (dump_file, " / (");
1608 dump_integer_part (dump_file, "x", n);
1609 fprintf (dump_file, ")");
1612 else
1614 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1615 if (n > 0)
1616 fprintf (dump_file, " * ");
1617 dump_integer_part (dump_file, "x", n);
1620 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1624 tree fract_res = NULL_TREE;
1625 cache[0] = arg0;
1627 /* Calculate the fractional part of the exponent. */
1628 for (unsigned i = 0; i < synth_info.deepest; i++)
1630 if (synth_info.factors[i])
1632 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1634 if (!fract_res)
1635 fract_res = sqrt_chain;
1637 else
1638 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1639 fract_res, sqrt_chain);
1643 tree res = NULL_TREE;
1645 if (neg_exp)
1647 if (one_over)
1649 if (n > 0)
1650 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1651 fract_res, integer_res);
1652 else
1653 res = fract_res;
1655 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1656 build_real (type, dconst1), res);
1658 else
1660 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1661 fract_res, integer_res);
1664 else
1665 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1666 fract_res, integer_res);
1667 return res;
1670 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1671 with location info LOC. If possible, create an equivalent and
1672 less expensive sequence of statements prior to GSI, and return an
1673 expession holding the result. */
1675 static tree
1676 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1677 tree arg0, tree arg1)
1679 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1680 REAL_VALUE_TYPE c2, dconst3;
1681 HOST_WIDE_INT n;
1682 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1683 machine_mode mode;
1684 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1685 bool hw_sqrt_exists, c_is_int, c2_is_int;
1687 dconst1_4 = dconst1;
1688 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1690 /* If the exponent isn't a constant, there's nothing of interest
1691 to be done. */
1692 if (TREE_CODE (arg1) != REAL_CST)
1693 return NULL_TREE;
1695 /* Don't perform the operation if flag_signaling_nans is on
1696 and the operand is a signaling NaN. */
1697 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1698 && ((TREE_CODE (arg0) == REAL_CST
1699 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1700 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1701 return NULL_TREE;
1703 /* If the exponent is equivalent to an integer, expand to an optimal
1704 multiplication sequence when profitable. */
1705 c = TREE_REAL_CST (arg1);
1706 n = real_to_integer (&c);
1707 real_from_integer (&cint, VOIDmode, n, SIGNED);
1708 c_is_int = real_identical (&c, &cint);
1710 if (c_is_int
1711 && ((n >= -1 && n <= 2)
1712 || (flag_unsafe_math_optimizations
1713 && speed_p
1714 && powi_cost (n) <= POWI_MAX_MULTS)))
1715 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1717 /* Attempt various optimizations using sqrt and cbrt. */
1718 type = TREE_TYPE (arg0);
1719 mode = TYPE_MODE (type);
1720 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1722 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1723 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1724 sqrt(-0) = -0. */
1725 if (sqrtfn
1726 && real_equal (&c, &dconsthalf)
1727 && !HONOR_SIGNED_ZEROS (mode))
1728 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1730 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1732 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1733 optimizations since 1./3. is not exactly representable. If x
1734 is negative and finite, the correct value of pow(x,1./3.) is
1735 a NaN with the "invalid" exception raised, because the value
1736 of 1./3. actually has an even denominator. The correct value
1737 of cbrt(x) is a negative real value. */
1738 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1739 dconst1_3 = real_value_truncate (mode, dconst_third ());
1741 if (flag_unsafe_math_optimizations
1742 && cbrtfn
1743 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1744 && real_equal (&c, &dconst1_3))
1745 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1747 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1748 if we don't have a hardware sqrt insn. */
1749 dconst1_6 = dconst1_3;
1750 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1752 if (flag_unsafe_math_optimizations
1753 && sqrtfn
1754 && cbrtfn
1755 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1756 && speed_p
1757 && hw_sqrt_exists
1758 && real_equal (&c, &dconst1_6))
1760 /* sqrt(x) */
1761 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1763 /* cbrt(sqrt(x)) */
1764 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1768 /* Attempt to expand the POW as a product of square root chains.
1769 Expand the 0.25 case even when otpimising for size. */
1770 if (flag_unsafe_math_optimizations
1771 && sqrtfn
1772 && hw_sqrt_exists
1773 && (speed_p || real_equal (&c, &dconst1_4))
1774 && !HONOR_SIGNED_ZEROS (mode))
1776 unsigned int max_depth = speed_p
1777 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1778 : 2;
1780 tree expand_with_sqrts
1781 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1783 if (expand_with_sqrts)
1784 return expand_with_sqrts;
1787 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1788 n = real_to_integer (&c2);
1789 real_from_integer (&cint, VOIDmode, n, SIGNED);
1790 c2_is_int = real_identical (&c2, &cint);
1792 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1794 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1795 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1797 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1798 different from pow(x, 1./3.) due to rounding and behavior with
1799 negative x, we need to constrain this transformation to unsafe
1800 math and positive x or finite math. */
1801 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1802 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1803 real_round (&c2, mode, &c2);
1804 n = real_to_integer (&c2);
1805 real_from_integer (&cint, VOIDmode, n, SIGNED);
1806 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1807 real_convert (&c2, mode, &c2);
1809 if (flag_unsafe_math_optimizations
1810 && cbrtfn
1811 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1812 && real_identical (&c2, &c)
1813 && !c2_is_int
1814 && optimize_function_for_speed_p (cfun)
1815 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1817 tree powi_x_ndiv3 = NULL_TREE;
1819 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1820 possible or profitable, give up. Skip the degenerate case when
1821 abs(n) < 3, where the result is always 1. */
1822 if (absu_hwi (n) >= 3)
1824 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1825 abs_hwi (n / 3));
1826 if (!powi_x_ndiv3)
1827 return NULL_TREE;
1830 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1831 as that creates an unnecessary variable. Instead, just produce
1832 either cbrt(x) or cbrt(x) * cbrt(x). */
1833 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1835 if (absu_hwi (n) % 3 == 1)
1836 powi_cbrt_x = cbrt_x;
1837 else
1838 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1839 cbrt_x, cbrt_x);
1841 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1842 if (absu_hwi (n) < 3)
1843 result = powi_cbrt_x;
1844 else
1845 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1846 powi_x_ndiv3, powi_cbrt_x);
1848 /* If n is negative, reciprocate the result. */
1849 if (n < 0)
1850 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1851 build_real (type, dconst1), result);
1853 return result;
1856 /* No optimizations succeeded. */
1857 return NULL_TREE;
1860 /* ARG is the argument to a cabs builtin call in GSI with location info
1861 LOC. Create a sequence of statements prior to GSI that calculates
1862 sqrt(R*R + I*I), where R and I are the real and imaginary components
1863 of ARG, respectively. Return an expression holding the result. */
1865 static tree
1866 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1868 tree real_part, imag_part, addend1, addend2, sum, result;
1869 tree type = TREE_TYPE (TREE_TYPE (arg));
1870 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1871 machine_mode mode = TYPE_MODE (type);
1873 if (!flag_unsafe_math_optimizations
1874 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1875 || !sqrtfn
1876 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1877 return NULL_TREE;
1879 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1880 REALPART_EXPR, arg);
1881 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1882 real_part, real_part);
1883 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1884 IMAGPART_EXPR, arg);
1885 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1886 imag_part, imag_part);
1887 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1888 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1890 return result;
1893 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1894 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1895 an optimal number of multiplies, when n is a constant. */
1897 namespace {
1899 const pass_data pass_data_cse_sincos =
1901 GIMPLE_PASS, /* type */
1902 "sincos", /* name */
1903 OPTGROUP_NONE, /* optinfo_flags */
1904 TV_TREE_SINCOS, /* tv_id */
1905 PROP_ssa, /* properties_required */
1906 PROP_gimple_opt_math, /* properties_provided */
1907 0, /* properties_destroyed */
1908 0, /* todo_flags_start */
1909 TODO_update_ssa, /* todo_flags_finish */
1912 class pass_cse_sincos : public gimple_opt_pass
1914 public:
1915 pass_cse_sincos (gcc::context *ctxt)
1916 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1919 /* opt_pass methods: */
1920 virtual bool gate (function *)
1922 /* We no longer require either sincos or cexp, since powi expansion
1923 piggybacks on this pass. */
1924 return optimize;
1927 virtual unsigned int execute (function *);
1929 }; // class pass_cse_sincos
1931 unsigned int
1932 pass_cse_sincos::execute (function *fun)
1934 basic_block bb;
1935 bool cfg_changed = false;
1937 calculate_dominance_info (CDI_DOMINATORS);
1938 memset (&sincos_stats, 0, sizeof (sincos_stats));
1940 FOR_EACH_BB_FN (bb, fun)
1942 gimple_stmt_iterator gsi;
1943 bool cleanup_eh = false;
1945 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1947 gimple *stmt = gsi_stmt (gsi);
1949 /* Only the last stmt in a bb could throw, no need to call
1950 gimple_purge_dead_eh_edges if we change something in the middle
1951 of a basic block. */
1952 cleanup_eh = false;
1954 if (is_gimple_call (stmt)
1955 && gimple_call_lhs (stmt))
1957 tree arg, arg0, arg1, result;
1958 HOST_WIDE_INT n;
1959 location_t loc;
1961 switch (gimple_call_combined_fn (stmt))
1963 CASE_CFN_COS:
1964 CASE_CFN_SIN:
1965 CASE_CFN_CEXPI:
1966 /* Make sure we have either sincos or cexp. */
1967 if (!targetm.libc_has_function (function_c99_math_complex)
1968 && !targetm.libc_has_function (function_sincos))
1969 break;
1971 arg = gimple_call_arg (stmt, 0);
1972 if (TREE_CODE (arg) == SSA_NAME)
1973 cfg_changed |= execute_cse_sincos_1 (arg);
1974 break;
1976 CASE_CFN_POW:
1977 arg0 = gimple_call_arg (stmt, 0);
1978 arg1 = gimple_call_arg (stmt, 1);
1980 loc = gimple_location (stmt);
1981 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1983 if (result)
1985 tree lhs = gimple_get_lhs (stmt);
1986 gassign *new_stmt = gimple_build_assign (lhs, result);
1987 gimple_set_location (new_stmt, loc);
1988 unlink_stmt_vdef (stmt);
1989 gsi_replace (&gsi, new_stmt, true);
1990 cleanup_eh = true;
1991 if (gimple_vdef (stmt))
1992 release_ssa_name (gimple_vdef (stmt));
1994 break;
1996 CASE_CFN_POWI:
1997 arg0 = gimple_call_arg (stmt, 0);
1998 arg1 = gimple_call_arg (stmt, 1);
1999 loc = gimple_location (stmt);
2001 if (real_minus_onep (arg0))
2003 tree t0, t1, cond, one, minus_one;
2004 gassign *stmt;
2006 t0 = TREE_TYPE (arg0);
2007 t1 = TREE_TYPE (arg1);
2008 one = build_real (t0, dconst1);
2009 minus_one = build_real (t0, dconstm1);
2011 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
2012 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
2013 arg1, build_int_cst (t1, 1));
2014 gimple_set_location (stmt, loc);
2015 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2017 result = make_temp_ssa_name (t0, NULL, "powi");
2018 stmt = gimple_build_assign (result, COND_EXPR, cond,
2019 minus_one, one);
2020 gimple_set_location (stmt, loc);
2021 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2023 else
2025 if (!tree_fits_shwi_p (arg1))
2026 break;
2028 n = tree_to_shwi (arg1);
2029 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
2032 if (result)
2034 tree lhs = gimple_get_lhs (stmt);
2035 gassign *new_stmt = gimple_build_assign (lhs, result);
2036 gimple_set_location (new_stmt, loc);
2037 unlink_stmt_vdef (stmt);
2038 gsi_replace (&gsi, new_stmt, true);
2039 cleanup_eh = true;
2040 if (gimple_vdef (stmt))
2041 release_ssa_name (gimple_vdef (stmt));
2043 break;
2045 CASE_CFN_CABS:
2046 arg0 = gimple_call_arg (stmt, 0);
2047 loc = gimple_location (stmt);
2048 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
2050 if (result)
2052 tree lhs = gimple_get_lhs (stmt);
2053 gassign *new_stmt = gimple_build_assign (lhs, result);
2054 gimple_set_location (new_stmt, loc);
2055 unlink_stmt_vdef (stmt);
2056 gsi_replace (&gsi, new_stmt, true);
2057 cleanup_eh = true;
2058 if (gimple_vdef (stmt))
2059 release_ssa_name (gimple_vdef (stmt));
2061 break;
2063 default:;
2067 if (cleanup_eh)
2068 cfg_changed |= gimple_purge_dead_eh_edges (bb);
2071 statistics_counter_event (fun, "sincos statements inserted",
2072 sincos_stats.inserted);
2074 return cfg_changed ? TODO_cleanup_cfg : 0;
2077 } // anon namespace
2079 gimple_opt_pass *
2080 make_pass_cse_sincos (gcc::context *ctxt)
2082 return new pass_cse_sincos (ctxt);
2085 /* Return true if stmt is a type conversion operation that can be stripped
2086 when used in a widening multiply operation. */
2087 static bool
2088 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2090 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2092 if (TREE_CODE (result_type) == INTEGER_TYPE)
2094 tree op_type;
2095 tree inner_op_type;
2097 if (!CONVERT_EXPR_CODE_P (rhs_code))
2098 return false;
2100 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2102 /* If the type of OP has the same precision as the result, then
2103 we can strip this conversion. The multiply operation will be
2104 selected to create the correct extension as a by-product. */
2105 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2106 return true;
2108 /* We can also strip a conversion if it preserves the signed-ness of
2109 the operation and doesn't narrow the range. */
2110 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2112 /* If the inner-most type is unsigned, then we can strip any
2113 intermediate widening operation. If it's signed, then the
2114 intermediate widening operation must also be signed. */
2115 if ((TYPE_UNSIGNED (inner_op_type)
2116 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2117 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2118 return true;
2120 return false;
2123 return rhs_code == FIXED_CONVERT_EXPR;
2126 /* Return true if RHS is a suitable operand for a widening multiplication,
2127 assuming a target type of TYPE.
2128 There are two cases:
2130 - RHS makes some value at least twice as wide. Store that value
2131 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2133 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2134 but leave *TYPE_OUT untouched. */
2136 static bool
2137 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2138 tree *new_rhs_out)
2140 gimple *stmt;
2141 tree type1, rhs1;
2143 if (TREE_CODE (rhs) == SSA_NAME)
2145 stmt = SSA_NAME_DEF_STMT (rhs);
2146 if (is_gimple_assign (stmt))
2148 if (! widening_mult_conversion_strippable_p (type, stmt))
2149 rhs1 = rhs;
2150 else
2152 rhs1 = gimple_assign_rhs1 (stmt);
2154 if (TREE_CODE (rhs1) == INTEGER_CST)
2156 *new_rhs_out = rhs1;
2157 *type_out = NULL;
2158 return true;
2162 else
2163 rhs1 = rhs;
2165 type1 = TREE_TYPE (rhs1);
2167 if (TREE_CODE (type1) != TREE_CODE (type)
2168 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2169 return false;
2171 *new_rhs_out = rhs1;
2172 *type_out = type1;
2173 return true;
2176 if (TREE_CODE (rhs) == INTEGER_CST)
2178 *new_rhs_out = rhs;
2179 *type_out = NULL;
2180 return true;
2183 return false;
2186 /* Return true if STMT performs a widening multiplication, assuming the
2187 output type is TYPE. If so, store the unwidened types of the operands
2188 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2189 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2190 and *TYPE2_OUT would give the operands of the multiplication. */
2192 static bool
2193 is_widening_mult_p (gimple *stmt,
2194 tree *type1_out, tree *rhs1_out,
2195 tree *type2_out, tree *rhs2_out)
2197 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2199 if (TREE_CODE (type) == INTEGER_TYPE)
2201 if (TYPE_OVERFLOW_TRAPS (type))
2202 return false;
2204 else if (TREE_CODE (type) != FIXED_POINT_TYPE)
2205 return false;
2207 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2208 rhs1_out))
2209 return false;
2211 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2212 rhs2_out))
2213 return false;
2215 if (*type1_out == NULL)
2217 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2218 return false;
2219 *type1_out = *type2_out;
2222 if (*type2_out == NULL)
2224 if (!int_fits_type_p (*rhs2_out, *type1_out))
2225 return false;
2226 *type2_out = *type1_out;
2229 /* Ensure that the larger of the two operands comes first. */
2230 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2232 std::swap (*type1_out, *type2_out);
2233 std::swap (*rhs1_out, *rhs2_out);
2236 return true;
2239 /* Check to see if the CALL statement is an invocation of copysign
2240 with 1. being the first argument. */
2241 static bool
2242 is_copysign_call_with_1 (gimple *call)
2244 gcall *c = dyn_cast <gcall *> (call);
2245 if (! c)
2246 return false;
2248 enum combined_fn code = gimple_call_combined_fn (c);
2250 if (code == CFN_LAST)
2251 return false;
2253 if (builtin_fn_p (code))
2255 switch (as_builtin_fn (code))
2257 CASE_FLT_FN (BUILT_IN_COPYSIGN):
2258 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
2259 return real_onep (gimple_call_arg (c, 0));
2260 default:
2261 return false;
2265 if (internal_fn_p (code))
2267 switch (as_internal_fn (code))
2269 case IFN_COPYSIGN:
2270 return real_onep (gimple_call_arg (c, 0));
2271 default:
2272 return false;
2276 return false;
2279 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2280 This only happens when the the xorsign optab is defined, if the
2281 pattern is not a xorsign pattern or if expansion fails FALSE is
2282 returned, otherwise TRUE is returned. */
2283 static bool
2284 convert_expand_mult_copysign (gimple *stmt, gimple_stmt_iterator *gsi)
2286 tree treeop0, treeop1, lhs, type;
2287 location_t loc = gimple_location (stmt);
2288 lhs = gimple_assign_lhs (stmt);
2289 treeop0 = gimple_assign_rhs1 (stmt);
2290 treeop1 = gimple_assign_rhs2 (stmt);
2291 type = TREE_TYPE (lhs);
2292 machine_mode mode = TYPE_MODE (type);
2294 if (HONOR_SNANS (type))
2295 return false;
2297 if (TREE_CODE (treeop0) == SSA_NAME && TREE_CODE (treeop1) == SSA_NAME)
2299 gimple *call0 = SSA_NAME_DEF_STMT (treeop0);
2300 if (!has_single_use (treeop0) || !is_copysign_call_with_1 (call0))
2302 call0 = SSA_NAME_DEF_STMT (treeop1);
2303 if (!has_single_use (treeop1) || !is_copysign_call_with_1 (call0))
2304 return false;
2306 treeop1 = treeop0;
2308 if (optab_handler (xorsign_optab, mode) == CODE_FOR_nothing)
2309 return false;
2311 gcall *c = as_a<gcall*> (call0);
2312 treeop0 = gimple_call_arg (c, 1);
2314 gcall *call_stmt
2315 = gimple_build_call_internal (IFN_XORSIGN, 2, treeop1, treeop0);
2316 gimple_set_lhs (call_stmt, lhs);
2317 gimple_set_location (call_stmt, loc);
2318 gsi_replace (gsi, call_stmt, true);
2319 return true;
2322 return false;
2325 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2326 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2327 value is true iff we converted the statement. */
2329 static bool
2330 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
2332 tree lhs, rhs1, rhs2, type, type1, type2;
2333 enum insn_code handler;
2334 scalar_int_mode to_mode, from_mode, actual_mode;
2335 optab op;
2336 int actual_precision;
2337 location_t loc = gimple_location (stmt);
2338 bool from_unsigned1, from_unsigned2;
2340 lhs = gimple_assign_lhs (stmt);
2341 type = TREE_TYPE (lhs);
2342 if (TREE_CODE (type) != INTEGER_TYPE)
2343 return false;
2345 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2346 return false;
2348 to_mode = SCALAR_INT_TYPE_MODE (type);
2349 from_mode = SCALAR_INT_TYPE_MODE (type1);
2350 if (to_mode == from_mode)
2351 return false;
2353 from_unsigned1 = TYPE_UNSIGNED (type1);
2354 from_unsigned2 = TYPE_UNSIGNED (type2);
2356 if (from_unsigned1 && from_unsigned2)
2357 op = umul_widen_optab;
2358 else if (!from_unsigned1 && !from_unsigned2)
2359 op = smul_widen_optab;
2360 else
2361 op = usmul_widen_optab;
2363 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2364 &actual_mode);
2366 if (handler == CODE_FOR_nothing)
2368 if (op != smul_widen_optab)
2370 /* We can use a signed multiply with unsigned types as long as
2371 there is a wider mode to use, or it is the smaller of the two
2372 types that is unsigned. Note that type1 >= type2, always. */
2373 if ((TYPE_UNSIGNED (type1)
2374 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2375 || (TYPE_UNSIGNED (type2)
2376 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2378 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2379 || GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2380 return false;
2383 op = smul_widen_optab;
2384 handler = find_widening_optab_handler_and_mode (op, to_mode,
2385 from_mode,
2386 &actual_mode);
2388 if (handler == CODE_FOR_nothing)
2389 return false;
2391 from_unsigned1 = from_unsigned2 = false;
2393 else
2394 return false;
2397 /* Ensure that the inputs to the handler are in the correct precison
2398 for the opcode. This will be the full mode size. */
2399 actual_precision = GET_MODE_PRECISION (actual_mode);
2400 if (2 * actual_precision > TYPE_PRECISION (type))
2401 return false;
2402 if (actual_precision != TYPE_PRECISION (type1)
2403 || from_unsigned1 != TYPE_UNSIGNED (type1))
2404 rhs1 = build_and_insert_cast (gsi, loc,
2405 build_nonstandard_integer_type
2406 (actual_precision, from_unsigned1), rhs1);
2407 if (actual_precision != TYPE_PRECISION (type2)
2408 || from_unsigned2 != TYPE_UNSIGNED (type2))
2409 rhs2 = build_and_insert_cast (gsi, loc,
2410 build_nonstandard_integer_type
2411 (actual_precision, from_unsigned2), rhs2);
2413 /* Handle constants. */
2414 if (TREE_CODE (rhs1) == INTEGER_CST)
2415 rhs1 = fold_convert (type1, rhs1);
2416 if (TREE_CODE (rhs2) == INTEGER_CST)
2417 rhs2 = fold_convert (type2, rhs2);
2419 gimple_assign_set_rhs1 (stmt, rhs1);
2420 gimple_assign_set_rhs2 (stmt, rhs2);
2421 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2422 update_stmt (stmt);
2423 widen_mul_stats.widen_mults_inserted++;
2424 return true;
2427 /* Process a single gimple statement STMT, which is found at the
2428 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2429 rhs (given by CODE), and try to convert it into a
2430 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2431 is true iff we converted the statement. */
2433 static bool
2434 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
2435 enum tree_code code)
2437 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
2438 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
2439 tree type, type1, type2, optype;
2440 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2441 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2442 optab this_optab;
2443 enum tree_code wmult_code;
2444 enum insn_code handler;
2445 scalar_mode to_mode, from_mode, actual_mode;
2446 location_t loc = gimple_location (stmt);
2447 int actual_precision;
2448 bool from_unsigned1, from_unsigned2;
2450 lhs = gimple_assign_lhs (stmt);
2451 type = TREE_TYPE (lhs);
2452 if (TREE_CODE (type) != INTEGER_TYPE
2453 && TREE_CODE (type) != FIXED_POINT_TYPE)
2454 return false;
2456 if (code == MINUS_EXPR)
2457 wmult_code = WIDEN_MULT_MINUS_EXPR;
2458 else
2459 wmult_code = WIDEN_MULT_PLUS_EXPR;
2461 rhs1 = gimple_assign_rhs1 (stmt);
2462 rhs2 = gimple_assign_rhs2 (stmt);
2464 if (TREE_CODE (rhs1) == SSA_NAME)
2466 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2467 if (is_gimple_assign (rhs1_stmt))
2468 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2471 if (TREE_CODE (rhs2) == SSA_NAME)
2473 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2474 if (is_gimple_assign (rhs2_stmt))
2475 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2478 /* Allow for one conversion statement between the multiply
2479 and addition/subtraction statement. If there are more than
2480 one conversions then we assume they would invalidate this
2481 transformation. If that's not the case then they should have
2482 been folded before now. */
2483 if (CONVERT_EXPR_CODE_P (rhs1_code))
2485 conv1_stmt = rhs1_stmt;
2486 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2487 if (TREE_CODE (rhs1) == SSA_NAME)
2489 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2490 if (is_gimple_assign (rhs1_stmt))
2491 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2493 else
2494 return false;
2496 if (CONVERT_EXPR_CODE_P (rhs2_code))
2498 conv2_stmt = rhs2_stmt;
2499 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2500 if (TREE_CODE (rhs2) == SSA_NAME)
2502 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2503 if (is_gimple_assign (rhs2_stmt))
2504 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2506 else
2507 return false;
2510 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2511 is_widening_mult_p, but we still need the rhs returns.
2513 It might also appear that it would be sufficient to use the existing
2514 operands of the widening multiply, but that would limit the choice of
2515 multiply-and-accumulate instructions.
2517 If the widened-multiplication result has more than one uses, it is
2518 probably wiser not to do the conversion. */
2519 if (code == PLUS_EXPR
2520 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2522 if (!has_single_use (rhs1)
2523 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2524 &type2, &mult_rhs2))
2525 return false;
2526 add_rhs = rhs2;
2527 conv_stmt = conv1_stmt;
2529 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2531 if (!has_single_use (rhs2)
2532 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2533 &type2, &mult_rhs2))
2534 return false;
2535 add_rhs = rhs1;
2536 conv_stmt = conv2_stmt;
2538 else
2539 return false;
2541 to_mode = SCALAR_TYPE_MODE (type);
2542 from_mode = SCALAR_TYPE_MODE (type1);
2543 if (to_mode == from_mode)
2544 return false;
2546 from_unsigned1 = TYPE_UNSIGNED (type1);
2547 from_unsigned2 = TYPE_UNSIGNED (type2);
2548 optype = type1;
2550 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2551 if (from_unsigned1 != from_unsigned2)
2553 if (!INTEGRAL_TYPE_P (type))
2554 return false;
2555 /* We can use a signed multiply with unsigned types as long as
2556 there is a wider mode to use, or it is the smaller of the two
2557 types that is unsigned. Note that type1 >= type2, always. */
2558 if ((from_unsigned1
2559 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2560 || (from_unsigned2
2561 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2563 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2564 || GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2565 return false;
2568 from_unsigned1 = from_unsigned2 = false;
2569 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2570 false);
2573 /* If there was a conversion between the multiply and addition
2574 then we need to make sure it fits a multiply-and-accumulate.
2575 The should be a single mode change which does not change the
2576 value. */
2577 if (conv_stmt)
2579 /* We use the original, unmodified data types for this. */
2580 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2581 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2582 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2583 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2585 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2587 /* Conversion is a truncate. */
2588 if (TYPE_PRECISION (to_type) < data_size)
2589 return false;
2591 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2593 /* Conversion is an extend. Check it's the right sort. */
2594 if (TYPE_UNSIGNED (from_type) != is_unsigned
2595 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2596 return false;
2598 /* else convert is a no-op for our purposes. */
2601 /* Verify that the machine can perform a widening multiply
2602 accumulate in this mode/signedness combination, otherwise
2603 this transformation is likely to pessimize code. */
2604 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2605 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2606 from_mode, &actual_mode);
2608 if (handler == CODE_FOR_nothing)
2609 return false;
2611 /* Ensure that the inputs to the handler are in the correct precison
2612 for the opcode. This will be the full mode size. */
2613 actual_precision = GET_MODE_PRECISION (actual_mode);
2614 if (actual_precision != TYPE_PRECISION (type1)
2615 || from_unsigned1 != TYPE_UNSIGNED (type1))
2616 mult_rhs1 = build_and_insert_cast (gsi, loc,
2617 build_nonstandard_integer_type
2618 (actual_precision, from_unsigned1),
2619 mult_rhs1);
2620 if (actual_precision != TYPE_PRECISION (type2)
2621 || from_unsigned2 != TYPE_UNSIGNED (type2))
2622 mult_rhs2 = build_and_insert_cast (gsi, loc,
2623 build_nonstandard_integer_type
2624 (actual_precision, from_unsigned2),
2625 mult_rhs2);
2627 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2628 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2630 /* Handle constants. */
2631 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2632 mult_rhs1 = fold_convert (type1, mult_rhs1);
2633 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2634 mult_rhs2 = fold_convert (type2, mult_rhs2);
2636 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
2637 add_rhs);
2638 update_stmt (gsi_stmt (*gsi));
2639 widen_mul_stats.maccs_inserted++;
2640 return true;
2643 /* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
2644 OP2 and which we know is used in statements that can be, together with the
2645 multiplication, converted to FMAs, perform the transformation. */
2647 static void
2648 convert_mult_to_fma_1 (tree mul_result, tree op1, tree op2)
2650 tree type = TREE_TYPE (mul_result);
2651 gimple *use_stmt;
2652 imm_use_iterator imm_iter;
2653 gcall *fma_stmt;
2655 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2657 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2658 enum tree_code use_code;
2659 tree addop, mulop1 = op1, result = mul_result;
2660 bool negate_p = false;
2661 gimple_seq seq = NULL;
2663 if (is_gimple_debug (use_stmt))
2664 continue;
2666 use_code = gimple_assign_rhs_code (use_stmt);
2667 if (use_code == NEGATE_EXPR)
2669 result = gimple_assign_lhs (use_stmt);
2670 use_operand_p use_p;
2671 gimple *neguse_stmt;
2672 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2673 gsi_remove (&gsi, true);
2674 release_defs (use_stmt);
2676 use_stmt = neguse_stmt;
2677 gsi = gsi_for_stmt (use_stmt);
2678 use_code = gimple_assign_rhs_code (use_stmt);
2679 negate_p = true;
2682 if (gimple_assign_rhs1 (use_stmt) == result)
2684 addop = gimple_assign_rhs2 (use_stmt);
2685 /* a * b - c -> a * b + (-c) */
2686 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2687 addop = gimple_build (&seq, NEGATE_EXPR, type, addop);
2689 else
2691 addop = gimple_assign_rhs1 (use_stmt);
2692 /* a - b * c -> (-b) * c + a */
2693 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2694 negate_p = !negate_p;
2697 if (negate_p)
2698 mulop1 = gimple_build (&seq, NEGATE_EXPR, type, mulop1);
2700 if (seq)
2701 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
2702 fma_stmt = gimple_build_call_internal (IFN_FMA, 3, mulop1, op2, addop);
2703 gimple_call_set_lhs (fma_stmt, gimple_assign_lhs (use_stmt));
2704 gimple_call_set_nothrow (fma_stmt, !stmt_can_throw_internal (use_stmt));
2705 gsi_replace (&gsi, fma_stmt, true);
2706 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
2707 regardless of where the negation occurs. */
2708 if (fold_stmt (&gsi, follow_all_ssa_edges))
2709 update_stmt (gsi_stmt (gsi));
2711 if (dump_file && (dump_flags & TDF_DETAILS))
2713 fprintf (dump_file, "Generated FMA ");
2714 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
2715 fprintf (dump_file, "\n");
2718 widen_mul_stats.fmas_inserted++;
2722 /* Data necessary to perform the actual transformation from a multiplication
2723 and an addition to an FMA after decision is taken it should be done and to
2724 then delete the multiplication statement from the function IL. */
2726 struct fma_transformation_info
2728 gimple *mul_stmt;
2729 tree mul_result;
2730 tree op1;
2731 tree op2;
2734 /* Structure containing the current state of FMA deferring, i.e. whether we are
2735 deferring, whether to continue deferring, and all data necessary to come
2736 back and perform all deferred transformations. */
2738 class fma_deferring_state
2740 public:
2741 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
2742 do any deferring. */
2744 fma_deferring_state (bool perform_deferring)
2745 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL),
2746 m_last_result (NULL_TREE), m_deferring_p (perform_deferring) {}
2748 /* List of FMA candidates for which we the transformation has been determined
2749 possible but we at this point in BB analysis we do not consider them
2750 beneficial. */
2751 auto_vec<fma_transformation_info, 8> m_candidates;
2753 /* Set of results of multiplication that are part of an already deferred FMA
2754 candidates. */
2755 hash_set<tree> m_mul_result_set;
2757 /* The PHI that supposedly feeds back result of a FMA to another over loop
2758 boundary. */
2759 gphi *m_initial_phi;
2761 /* Result of the last produced FMA candidate or NULL if there has not been
2762 one. */
2763 tree m_last_result;
2765 /* If true, deferring might still be profitable. If false, transform all
2766 candidates and no longer defer. */
2767 bool m_deferring_p;
2770 /* Transform all deferred FMA candidates and mark STATE as no longer
2771 deferring. */
2773 static void
2774 cancel_fma_deferring (fma_deferring_state *state)
2776 if (!state->m_deferring_p)
2777 return;
2779 for (unsigned i = 0; i < state->m_candidates.length (); i++)
2781 if (dump_file && (dump_flags & TDF_DETAILS))
2782 fprintf (dump_file, "Generating deferred FMA\n");
2784 const fma_transformation_info &fti = state->m_candidates[i];
2785 convert_mult_to_fma_1 (fti.mul_result, fti.op1, fti.op2);
2787 gimple_stmt_iterator gsi = gsi_for_stmt (fti.mul_stmt);
2788 gsi_remove (&gsi, true);
2789 release_defs (fti.mul_stmt);
2791 state->m_deferring_p = false;
2794 /* If OP is an SSA name defined by a PHI node, return the PHI statement.
2795 Otherwise return NULL. */
2797 static gphi *
2798 result_of_phi (tree op)
2800 if (TREE_CODE (op) != SSA_NAME)
2801 return NULL;
2803 return dyn_cast <gphi *> (SSA_NAME_DEF_STMT (op));
2806 /* After processing statements of a BB and recording STATE, return true if the
2807 initial phi is fed by the last FMA candidate result ore one such result from
2808 previously processed BBs marked in LAST_RESULT_SET. */
2810 static bool
2811 last_fma_candidate_feeds_initial_phi (fma_deferring_state *state,
2812 hash_set<tree> *last_result_set)
2814 ssa_op_iter iter;
2815 use_operand_p use;
2816 FOR_EACH_PHI_ARG (use, state->m_initial_phi, iter, SSA_OP_USE)
2818 tree t = USE_FROM_PTR (use);
2819 if (t == state->m_last_result
2820 || last_result_set->contains (t))
2821 return true;
2824 return false;
2827 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2828 with uses in additions and subtractions to form fused multiply-add
2829 operations. Returns true if successful and MUL_STMT should be removed.
2831 If STATE indicates that we are deferring FMA transformation, that means
2832 that we do not produce FMAs for basic blocks which look like:
2834 <bb 6>
2835 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
2836 _65 = _14 * _16;
2837 accumulator_66 = _65 + accumulator_111;
2839 or its unrolled version, i.e. with several FMA candidates that feed result
2840 of one into the addend of another. Instead, we add them to a list in STATE
2841 and if we later discover an FMA candidate that is not part of such a chain,
2842 we go back and perform all deferred past candidates. */
2844 static bool
2845 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
2846 fma_deferring_state *state)
2848 tree mul_result = gimple_get_lhs (mul_stmt);
2849 tree type = TREE_TYPE (mul_result);
2850 gimple *use_stmt, *neguse_stmt;
2851 use_operand_p use_p;
2852 imm_use_iterator imm_iter;
2854 if (FLOAT_TYPE_P (type)
2855 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2856 return false;
2858 /* We don't want to do bitfield reduction ops. */
2859 if (INTEGRAL_TYPE_P (type)
2860 && (!type_has_mode_precision_p (type) || TYPE_OVERFLOW_TRAPS (type)))
2861 return false;
2863 /* If the target doesn't support it, don't generate it. We assume that
2864 if fma isn't available then fms, fnma or fnms are not either. */
2865 optimization_type opt_type = bb_optimization_type (gimple_bb (mul_stmt));
2866 if (!direct_internal_fn_supported_p (IFN_FMA, type, opt_type))
2867 return false;
2869 /* If the multiplication has zero uses, it is kept around probably because
2870 of -fnon-call-exceptions. Don't optimize it away in that case,
2871 it is DCE job. */
2872 if (has_zero_uses (mul_result))
2873 return false;
2875 bool check_defer
2876 = (state->m_deferring_p
2877 && (tree_to_shwi (TYPE_SIZE (type))
2878 <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
2879 bool defer = check_defer;
2880 /* Make sure that the multiplication statement becomes dead after
2881 the transformation, thus that all uses are transformed to FMAs.
2882 This means we assume that an FMA operation has the same cost
2883 as an addition. */
2884 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2886 enum tree_code use_code;
2887 tree result = mul_result;
2888 bool negate_p = false;
2890 use_stmt = USE_STMT (use_p);
2892 if (is_gimple_debug (use_stmt))
2893 continue;
2895 /* For now restrict this operations to single basic blocks. In theory
2896 we would want to support sinking the multiplication in
2897 m = a*b;
2898 if ()
2899 ma = m + c;
2900 else
2901 d = m;
2902 to form a fma in the then block and sink the multiplication to the
2903 else block. */
2904 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2905 return false;
2907 if (!is_gimple_assign (use_stmt))
2908 return false;
2910 use_code = gimple_assign_rhs_code (use_stmt);
2912 /* A negate on the multiplication leads to FNMA. */
2913 if (use_code == NEGATE_EXPR)
2915 ssa_op_iter iter;
2916 use_operand_p usep;
2918 result = gimple_assign_lhs (use_stmt);
2920 /* Make sure the negate statement becomes dead with this
2921 single transformation. */
2922 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2923 &use_p, &neguse_stmt))
2924 return false;
2926 /* Make sure the multiplication isn't also used on that stmt. */
2927 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2928 if (USE_FROM_PTR (usep) == mul_result)
2929 return false;
2931 /* Re-validate. */
2932 use_stmt = neguse_stmt;
2933 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2934 return false;
2935 if (!is_gimple_assign (use_stmt))
2936 return false;
2938 use_code = gimple_assign_rhs_code (use_stmt);
2939 negate_p = true;
2942 switch (use_code)
2944 case MINUS_EXPR:
2945 if (gimple_assign_rhs2 (use_stmt) == result)
2946 negate_p = !negate_p;
2947 break;
2948 case PLUS_EXPR:
2949 break;
2950 default:
2951 /* FMA can only be formed from PLUS and MINUS. */
2952 return false;
2955 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2956 by a MULT_EXPR that we'll visit later, we might be able to
2957 get a more profitable match with fnma.
2958 OTOH, if we don't, a negate / fma pair has likely lower latency
2959 that a mult / subtract pair. */
2960 if (use_code == MINUS_EXPR && !negate_p
2961 && gimple_assign_rhs1 (use_stmt) == result
2962 && !direct_internal_fn_supported_p (IFN_FMS, type, opt_type)
2963 && direct_internal_fn_supported_p (IFN_FNMA, type, opt_type))
2965 tree rhs2 = gimple_assign_rhs2 (use_stmt);
2967 if (TREE_CODE (rhs2) == SSA_NAME)
2969 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
2970 if (has_single_use (rhs2)
2971 && is_gimple_assign (stmt2)
2972 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
2973 return false;
2977 tree use_rhs1 = gimple_assign_rhs1 (use_stmt);
2978 tree use_rhs2 = gimple_assign_rhs2 (use_stmt);
2979 /* We can't handle a * b + a * b. */
2980 if (use_rhs1 == use_rhs2)
2981 return false;
2982 /* If deferring, make sure we are not looking at an instruction that
2983 wouldn't have existed if we were not. */
2984 if (state->m_deferring_p
2985 && (state->m_mul_result_set.contains (use_rhs1)
2986 || state->m_mul_result_set.contains (use_rhs2)))
2987 return false;
2989 if (check_defer)
2991 tree use_lhs = gimple_assign_lhs (use_stmt);
2992 if (state->m_last_result)
2994 if (use_rhs2 == state->m_last_result
2995 || use_rhs1 == state->m_last_result)
2996 defer = true;
2997 else
2998 defer = false;
3000 else
3002 gcc_checking_assert (!state->m_initial_phi);
3003 gphi *phi;
3004 if (use_rhs1 == result)
3005 phi = result_of_phi (use_rhs2);
3006 else
3008 gcc_assert (use_rhs2 == result);
3009 phi = result_of_phi (use_rhs1);
3012 if (phi)
3014 state->m_initial_phi = phi;
3015 defer = true;
3017 else
3018 defer = false;
3021 state->m_last_result = use_lhs;
3022 check_defer = false;
3024 else
3025 defer = false;
3027 /* While it is possible to validate whether or not the exact form that
3028 we've recognized is available in the backend, the assumption is that
3029 if the deferring logic above did not trigger, the transformation is
3030 never a loss. For instance, suppose the target only has the plain FMA
3031 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3032 MUL+SUB for FMA+NEG, which is still two operations. Consider
3033 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3034 form the two NEGs are independent and could be run in parallel. */
3037 if (defer)
3039 fma_transformation_info fti;
3040 fti.mul_stmt = mul_stmt;
3041 fti.mul_result = mul_result;
3042 fti.op1 = op1;
3043 fti.op2 = op2;
3044 state->m_candidates.safe_push (fti);
3045 state->m_mul_result_set.add (mul_result);
3047 if (dump_file && (dump_flags & TDF_DETAILS))
3049 fprintf (dump_file, "Deferred generating FMA for multiplication ");
3050 print_gimple_stmt (dump_file, mul_stmt, 0, 0);
3051 fprintf (dump_file, "\n");
3054 return false;
3056 else
3058 if (state->m_deferring_p)
3059 cancel_fma_deferring (state);
3060 convert_mult_to_fma_1 (mul_result, op1, op2);
3061 return true;
3066 /* Helper function of match_uaddsub_overflow. Return 1
3067 if USE_STMT is unsigned overflow check ovf != 0 for
3068 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3069 and 0 otherwise. */
3071 static int
3072 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3074 enum tree_code ccode = ERROR_MARK;
3075 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3076 if (gimple_code (use_stmt) == GIMPLE_COND)
3078 ccode = gimple_cond_code (use_stmt);
3079 crhs1 = gimple_cond_lhs (use_stmt);
3080 crhs2 = gimple_cond_rhs (use_stmt);
3082 else if (is_gimple_assign (use_stmt))
3084 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3086 ccode = gimple_assign_rhs_code (use_stmt);
3087 crhs1 = gimple_assign_rhs1 (use_stmt);
3088 crhs2 = gimple_assign_rhs2 (use_stmt);
3090 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3092 tree cond = gimple_assign_rhs1 (use_stmt);
3093 if (COMPARISON_CLASS_P (cond))
3095 ccode = TREE_CODE (cond);
3096 crhs1 = TREE_OPERAND (cond, 0);
3097 crhs2 = TREE_OPERAND (cond, 1);
3099 else
3100 return 0;
3102 else
3103 return 0;
3105 else
3106 return 0;
3108 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3109 return 0;
3111 enum tree_code code = gimple_assign_rhs_code (stmt);
3112 tree lhs = gimple_assign_lhs (stmt);
3113 tree rhs1 = gimple_assign_rhs1 (stmt);
3114 tree rhs2 = gimple_assign_rhs2 (stmt);
3116 switch (ccode)
3118 case GT_EXPR:
3119 case LE_EXPR:
3120 /* r = a - b; r > a or r <= a
3121 r = a + b; a > r or a <= r or b > r or b <= r. */
3122 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3123 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3124 && crhs2 == lhs))
3125 return ccode == GT_EXPR ? 1 : -1;
3126 break;
3127 case LT_EXPR:
3128 case GE_EXPR:
3129 /* r = a - b; a < r or a >= r
3130 r = a + b; r < a or r >= a or r < b or r >= b. */
3131 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3132 || (code == PLUS_EXPR && crhs1 == lhs
3133 && (crhs2 == rhs1 || crhs2 == rhs2)))
3134 return ccode == LT_EXPR ? 1 : -1;
3135 break;
3136 default:
3137 break;
3139 return 0;
3142 /* Recognize for unsigned x
3143 x = y - z;
3144 if (x > y)
3145 where there are other uses of x and replace it with
3146 _7 = SUB_OVERFLOW (y, z);
3147 x = REALPART_EXPR <_7>;
3148 _8 = IMAGPART_EXPR <_7>;
3149 if (_8)
3150 and similarly for addition. */
3152 static bool
3153 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3154 enum tree_code code)
3156 tree lhs = gimple_assign_lhs (stmt);
3157 tree type = TREE_TYPE (lhs);
3158 use_operand_p use_p;
3159 imm_use_iterator iter;
3160 bool use_seen = false;
3161 bool ovf_use_seen = false;
3162 gimple *use_stmt;
3164 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3165 if (!INTEGRAL_TYPE_P (type)
3166 || !TYPE_UNSIGNED (type)
3167 || has_zero_uses (lhs)
3168 || has_single_use (lhs)
3169 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3170 TYPE_MODE (type)) == CODE_FOR_nothing)
3171 return false;
3173 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3175 use_stmt = USE_STMT (use_p);
3176 if (is_gimple_debug (use_stmt))
3177 continue;
3179 if (uaddsub_overflow_check_p (stmt, use_stmt))
3180 ovf_use_seen = true;
3181 else
3182 use_seen = true;
3183 if (ovf_use_seen && use_seen)
3184 break;
3187 if (!ovf_use_seen || !use_seen)
3188 return false;
3190 tree ctype = build_complex_type (type);
3191 tree rhs1 = gimple_assign_rhs1 (stmt);
3192 tree rhs2 = gimple_assign_rhs2 (stmt);
3193 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3194 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3195 2, rhs1, rhs2);
3196 tree ctmp = make_ssa_name (ctype);
3197 gimple_call_set_lhs (g, ctmp);
3198 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3199 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3200 build1 (REALPART_EXPR, type, ctmp));
3201 gsi_replace (gsi, g2, true);
3202 tree ovf = make_ssa_name (type);
3203 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3204 build1 (IMAGPART_EXPR, type, ctmp));
3205 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3207 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3209 if (is_gimple_debug (use_stmt))
3210 continue;
3212 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3213 if (ovf_use == 0)
3214 continue;
3215 if (gimple_code (use_stmt) == GIMPLE_COND)
3217 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3218 gimple_cond_set_lhs (cond_stmt, ovf);
3219 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3220 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3222 else
3224 gcc_checking_assert (is_gimple_assign (use_stmt));
3225 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3227 gimple_assign_set_rhs1 (use_stmt, ovf);
3228 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3229 gimple_assign_set_rhs_code (use_stmt,
3230 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3232 else
3234 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3235 == COND_EXPR);
3236 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3237 boolean_type_node, ovf,
3238 build_int_cst (type, 0));
3239 gimple_assign_set_rhs1 (use_stmt, cond);
3242 update_stmt (use_stmt);
3244 return true;
3247 /* Return true if target has support for divmod. */
3249 static bool
3250 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3252 /* If target supports hardware divmod insn, use it for divmod. */
3253 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3254 return true;
3256 /* Check if libfunc for divmod is available. */
3257 rtx libfunc = optab_libfunc (divmod_optab, mode);
3258 if (libfunc != NULL_RTX)
3260 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3261 we don't want to use the libfunc even if it exists for given mode. */
3262 machine_mode div_mode;
3263 FOR_EACH_MODE_FROM (div_mode, mode)
3264 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3265 return false;
3267 return targetm.expand_divmod_libfunc != NULL;
3270 return false;
3273 /* Check if stmt is candidate for divmod transform. */
3275 static bool
3276 divmod_candidate_p (gassign *stmt)
3278 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3279 machine_mode mode = TYPE_MODE (type);
3280 optab divmod_optab, div_optab;
3282 if (TYPE_UNSIGNED (type))
3284 divmod_optab = udivmod_optab;
3285 div_optab = udiv_optab;
3287 else
3289 divmod_optab = sdivmod_optab;
3290 div_optab = sdiv_optab;
3293 tree op1 = gimple_assign_rhs1 (stmt);
3294 tree op2 = gimple_assign_rhs2 (stmt);
3296 /* Disable the transform if either is a constant, since division-by-constant
3297 may have specialized expansion. */
3298 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3299 return false;
3301 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3302 expand using the [su]divv optabs. */
3303 if (TYPE_OVERFLOW_TRAPS (type))
3304 return false;
3306 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3307 return false;
3309 return true;
3312 /* This function looks for:
3313 t1 = a TRUNC_DIV_EXPR b;
3314 t2 = a TRUNC_MOD_EXPR b;
3315 and transforms it to the following sequence:
3316 complex_tmp = DIVMOD (a, b);
3317 t1 = REALPART_EXPR(a);
3318 t2 = IMAGPART_EXPR(b);
3319 For conditions enabling the transform see divmod_candidate_p().
3321 The pass has three parts:
3322 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3323 other trunc_div_expr and trunc_mod_expr stmts.
3324 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3325 to stmts vector.
3326 3) Insert DIVMOD call just before top_stmt and update entries in
3327 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3328 IMAGPART_EXPR for mod). */
3330 static bool
3331 convert_to_divmod (gassign *stmt)
3333 if (stmt_can_throw_internal (stmt)
3334 || !divmod_candidate_p (stmt))
3335 return false;
3337 tree op1 = gimple_assign_rhs1 (stmt);
3338 tree op2 = gimple_assign_rhs2 (stmt);
3340 imm_use_iterator use_iter;
3341 gimple *use_stmt;
3342 auto_vec<gimple *> stmts;
3344 gimple *top_stmt = stmt;
3345 basic_block top_bb = gimple_bb (stmt);
3347 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3348 at-least stmt and possibly other trunc_div/trunc_mod stmts
3349 having same operands as stmt. */
3351 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3353 if (is_gimple_assign (use_stmt)
3354 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3355 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3356 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3357 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3359 if (stmt_can_throw_internal (use_stmt))
3360 continue;
3362 basic_block bb = gimple_bb (use_stmt);
3364 if (bb == top_bb)
3366 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3367 top_stmt = use_stmt;
3369 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3371 top_bb = bb;
3372 top_stmt = use_stmt;
3377 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3378 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3380 stmts.safe_push (top_stmt);
3381 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3383 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3384 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3385 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3386 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3388 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3390 if (is_gimple_assign (use_stmt)
3391 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3392 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3393 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3394 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3396 if (use_stmt == top_stmt
3397 || stmt_can_throw_internal (use_stmt)
3398 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3399 continue;
3401 stmts.safe_push (use_stmt);
3402 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3403 div_seen = true;
3407 if (!div_seen)
3408 return false;
3410 /* Part 3: Create libcall to internal fn DIVMOD:
3411 divmod_tmp = DIVMOD (op1, op2). */
3413 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3414 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3415 call_stmt, "divmod_tmp");
3416 gimple_call_set_lhs (call_stmt, res);
3417 /* We rejected throwing statements above. */
3418 gimple_call_set_nothrow (call_stmt, true);
3420 /* Insert the call before top_stmt. */
3421 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3422 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3424 widen_mul_stats.divmod_calls_inserted++;
3426 /* Update all statements in stmts vector:
3427 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3428 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3430 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3432 tree new_rhs;
3434 switch (gimple_assign_rhs_code (use_stmt))
3436 case TRUNC_DIV_EXPR:
3437 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
3438 break;
3440 case TRUNC_MOD_EXPR:
3441 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
3442 break;
3444 default:
3445 gcc_unreachable ();
3448 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3449 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
3450 update_stmt (use_stmt);
3453 return true;
3456 /* Find integer multiplications where the operands are extended from
3457 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3458 where appropriate. */
3460 namespace {
3462 const pass_data pass_data_optimize_widening_mul =
3464 GIMPLE_PASS, /* type */
3465 "widening_mul", /* name */
3466 OPTGROUP_NONE, /* optinfo_flags */
3467 TV_TREE_WIDEN_MUL, /* tv_id */
3468 PROP_ssa, /* properties_required */
3469 0, /* properties_provided */
3470 0, /* properties_destroyed */
3471 0, /* todo_flags_start */
3472 TODO_update_ssa, /* todo_flags_finish */
3475 class pass_optimize_widening_mul : public gimple_opt_pass
3477 public:
3478 pass_optimize_widening_mul (gcc::context *ctxt)
3479 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3482 /* opt_pass methods: */
3483 virtual bool gate (function *)
3485 return flag_expensive_optimizations && optimize;
3488 virtual unsigned int execute (function *);
3490 }; // class pass_optimize_widening_mul
3492 /* Walker class to perform the transformation in reverse dominance order. */
3494 class math_opts_dom_walker : public dom_walker
3496 public:
3497 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
3498 if walking modidifes the CFG. */
3500 math_opts_dom_walker (bool *cfg_changed_p)
3501 : dom_walker (CDI_DOMINATORS), m_last_result_set (),
3502 m_cfg_changed_p (cfg_changed_p) {}
3504 /* The actual actions performed in the walk. */
3506 virtual void after_dom_children (basic_block);
3508 /* Set of results of chains of multiply and add statement combinations that
3509 were not transformed into FMAs because of active deferring. */
3510 hash_set<tree> m_last_result_set;
3512 /* Pointer to a flag of the user that needs to be set if CFG has been
3513 modified. */
3514 bool *m_cfg_changed_p;
3517 void
3518 math_opts_dom_walker::after_dom_children (basic_block bb)
3520 gimple_stmt_iterator gsi;
3522 fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
3524 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3526 gimple *stmt = gsi_stmt (gsi);
3527 enum tree_code code;
3529 if (is_gimple_assign (stmt))
3531 code = gimple_assign_rhs_code (stmt);
3532 switch (code)
3534 case MULT_EXPR:
3535 if (!convert_mult_to_widen (stmt, &gsi)
3536 && !convert_expand_mult_copysign (stmt, &gsi)
3537 && convert_mult_to_fma (stmt,
3538 gimple_assign_rhs1 (stmt),
3539 gimple_assign_rhs2 (stmt),
3540 &fma_state))
3542 gsi_remove (&gsi, true);
3543 release_defs (stmt);
3544 continue;
3546 break;
3548 case PLUS_EXPR:
3549 case MINUS_EXPR:
3550 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3551 match_uaddsub_overflow (&gsi, stmt, code);
3552 break;
3554 case TRUNC_MOD_EXPR:
3555 convert_to_divmod (as_a<gassign *> (stmt));
3556 break;
3558 default:;
3561 else if (is_gimple_call (stmt))
3563 tree fndecl = gimple_call_fndecl (stmt);
3564 if (fndecl && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3566 switch (DECL_FUNCTION_CODE (fndecl))
3568 case BUILT_IN_POWF:
3569 case BUILT_IN_POW:
3570 case BUILT_IN_POWL:
3571 if (gimple_call_lhs (stmt)
3572 && TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3573 && real_equal
3574 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3575 &dconst2)
3576 && convert_mult_to_fma (stmt,
3577 gimple_call_arg (stmt, 0),
3578 gimple_call_arg (stmt, 0),
3579 &fma_state))
3581 unlink_stmt_vdef (stmt);
3582 if (gsi_remove (&gsi, true)
3583 && gimple_purge_dead_eh_edges (bb))
3584 *m_cfg_changed_p = true;
3585 release_defs (stmt);
3586 continue;
3588 break;
3590 default:;
3593 else
3594 cancel_fma_deferring (&fma_state);
3596 gsi_next (&gsi);
3598 if (fma_state.m_deferring_p
3599 && fma_state.m_initial_phi)
3601 gcc_checking_assert (fma_state.m_last_result);
3602 if (!last_fma_candidate_feeds_initial_phi (&fma_state,
3603 &m_last_result_set))
3604 cancel_fma_deferring (&fma_state);
3605 else
3606 m_last_result_set.add (fma_state.m_last_result);
3611 unsigned int
3612 pass_optimize_widening_mul::execute (function *fun)
3614 bool cfg_changed = false;
3616 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3617 calculate_dominance_info (CDI_DOMINATORS);
3618 renumber_gimple_stmt_uids ();
3620 math_opts_dom_walker (&cfg_changed).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
3622 statistics_counter_event (fun, "widening multiplications inserted",
3623 widen_mul_stats.widen_mults_inserted);
3624 statistics_counter_event (fun, "widening maccs inserted",
3625 widen_mul_stats.maccs_inserted);
3626 statistics_counter_event (fun, "fused multiply-adds inserted",
3627 widen_mul_stats.fmas_inserted);
3628 statistics_counter_event (fun, "divmod calls inserted",
3629 widen_mul_stats.divmod_calls_inserted);
3631 return cfg_changed ? TODO_cleanup_cfg : 0;
3634 } // anon namespace
3636 gimple_opt_pass *
3637 make_pass_optimize_widening_mul (gcc::context *ctxt)
3639 return new pass_optimize_widening_mul (ctxt);