2016-01-26 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blobabd77e76dce4e90f49dec2a7522ecd6894bb7457
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
116 /* This structure represents one basic block that either computes a
117 division, or is a common dominator for basic block that compute a
118 division. */
119 struct occurrence {
120 /* The basic block represented by this structure. */
121 basic_block bb;
123 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
124 inserted in BB. */
125 tree recip_def;
127 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
128 was inserted in BB. */
129 gimple *recip_def_stmt;
131 /* Pointer to a list of "struct occurrence"s for blocks dominated
132 by BB. */
133 struct occurrence *children;
135 /* Pointer to the next "struct occurrence"s in the list of blocks
136 sharing a common dominator. */
137 struct occurrence *next;
139 /* The number of divisions that are in BB before compute_merit. The
140 number of divisions that are in BB or post-dominate it after
141 compute_merit. */
142 int num_divisions;
144 /* True if the basic block has a division, false if it is a common
145 dominator for basic blocks that do. If it is false and trapping
146 math is active, BB is not a candidate for inserting a reciprocal. */
147 bool bb_has_division;
150 static struct
152 /* Number of 1.0/X ops inserted. */
153 int rdivs_inserted;
155 /* Number of 1.0/FUNC ops inserted. */
156 int rfuncs_inserted;
157 } reciprocal_stats;
159 static struct
161 /* Number of cexpi calls inserted. */
162 int inserted;
163 } sincos_stats;
165 static struct
167 /* Number of hand-written 16-bit nop / bswaps found. */
168 int found_16bit;
170 /* Number of hand-written 32-bit nop / bswaps found. */
171 int found_32bit;
173 /* Number of hand-written 64-bit nop / bswaps found. */
174 int found_64bit;
175 } nop_stats, bswap_stats;
177 static struct
179 /* Number of widening multiplication ops inserted. */
180 int widen_mults_inserted;
182 /* Number of integer multiply-and-accumulate ops inserted. */
183 int maccs_inserted;
185 /* Number of fp fused multiply-add ops inserted. */
186 int fmas_inserted;
187 } widen_mul_stats;
189 /* The instance of "struct occurrence" representing the highest
190 interesting block in the dominator tree. */
191 static struct occurrence *occ_head;
193 /* Allocation pool for getting instances of "struct occurrence". */
194 static object_allocator<occurrence> *occ_pool;
198 /* Allocate and return a new struct occurrence for basic block BB, and
199 whose children list is headed by CHILDREN. */
200 static struct occurrence *
201 occ_new (basic_block bb, struct occurrence *children)
203 struct occurrence *occ;
205 bb->aux = occ = occ_pool->allocate ();
206 memset (occ, 0, sizeof (struct occurrence));
208 occ->bb = bb;
209 occ->children = children;
210 return occ;
214 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
215 list of "struct occurrence"s, one per basic block, having IDOM as
216 their common dominator.
218 We try to insert NEW_OCC as deep as possible in the tree, and we also
219 insert any other block that is a common dominator for BB and one
220 block already in the tree. */
222 static void
223 insert_bb (struct occurrence *new_occ, basic_block idom,
224 struct occurrence **p_head)
226 struct occurrence *occ, **p_occ;
228 for (p_occ = p_head; (occ = *p_occ) != NULL; )
230 basic_block bb = new_occ->bb, occ_bb = occ->bb;
231 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
232 if (dom == bb)
234 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
235 from its list. */
236 *p_occ = occ->next;
237 occ->next = new_occ->children;
238 new_occ->children = occ;
240 /* Try the next block (it may as well be dominated by BB). */
243 else if (dom == occ_bb)
245 /* OCC_BB dominates BB. Tail recurse to look deeper. */
246 insert_bb (new_occ, dom, &occ->children);
247 return;
250 else if (dom != idom)
252 gcc_assert (!dom->aux);
254 /* There is a dominator between IDOM and BB, add it and make
255 two children out of NEW_OCC and OCC. First, remove OCC from
256 its list. */
257 *p_occ = occ->next;
258 new_occ->next = occ;
259 occ->next = NULL;
261 /* None of the previous blocks has DOM as a dominator: if we tail
262 recursed, we would reexamine them uselessly. Just switch BB with
263 DOM, and go on looking for blocks dominated by DOM. */
264 new_occ = occ_new (dom, new_occ);
267 else
269 /* Nothing special, go on with the next element. */
270 p_occ = &occ->next;
274 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
275 new_occ->next = *p_head;
276 *p_head = new_occ;
279 /* Register that we found a division in BB. */
281 static inline void
282 register_division_in (basic_block bb)
284 struct occurrence *occ;
286 occ = (struct occurrence *) bb->aux;
287 if (!occ)
289 occ = occ_new (bb, NULL);
290 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
293 occ->bb_has_division = true;
294 occ->num_divisions++;
298 /* Compute the number of divisions that postdominate each block in OCC and
299 its children. */
301 static void
302 compute_merit (struct occurrence *occ)
304 struct occurrence *occ_child;
305 basic_block dom = occ->bb;
307 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
309 basic_block bb;
310 if (occ_child->children)
311 compute_merit (occ_child);
313 if (flag_exceptions)
314 bb = single_noncomplex_succ (dom);
315 else
316 bb = dom;
318 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
319 occ->num_divisions += occ_child->num_divisions;
324 /* Return whether USE_STMT is a floating-point division by DEF. */
325 static inline bool
326 is_division_by (gimple *use_stmt, tree def)
328 return is_gimple_assign (use_stmt)
329 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
330 && gimple_assign_rhs2 (use_stmt) == def
331 /* Do not recognize x / x as valid division, as we are getting
332 confused later by replacing all immediate uses x in such
333 a stmt. */
334 && gimple_assign_rhs1 (use_stmt) != def;
337 /* Walk the subset of the dominator tree rooted at OCC, setting the
338 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
339 the given basic block. The field may be left NULL, of course,
340 if it is not possible or profitable to do the optimization.
342 DEF_BSI is an iterator pointing at the statement defining DEF.
343 If RECIP_DEF is set, a dominator already has a computation that can
344 be used. */
346 static void
347 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
348 tree def, tree recip_def, int threshold)
350 tree type;
351 gassign *new_stmt;
352 gimple_stmt_iterator gsi;
353 struct occurrence *occ_child;
355 if (!recip_def
356 && (occ->bb_has_division || !flag_trapping_math)
357 && occ->num_divisions >= threshold)
359 /* Make a variable with the replacement and substitute it. */
360 type = TREE_TYPE (def);
361 recip_def = create_tmp_reg (type, "reciptmp");
362 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
363 build_one_cst (type), def);
365 if (occ->bb_has_division)
367 /* Case 1: insert before an existing division. */
368 gsi = gsi_after_labels (occ->bb);
369 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
370 gsi_next (&gsi);
372 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
374 else if (def_gsi && occ->bb == def_gsi->bb)
376 /* Case 2: insert right after the definition. Note that this will
377 never happen if the definition statement can throw, because in
378 that case the sole successor of the statement's basic block will
379 dominate all the uses as well. */
380 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
382 else
384 /* Case 3: insert in a basic block not containing defs/uses. */
385 gsi = gsi_after_labels (occ->bb);
386 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
389 reciprocal_stats.rdivs_inserted++;
391 occ->recip_def_stmt = new_stmt;
394 occ->recip_def = recip_def;
395 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
396 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
400 /* Replace the division at USE_P with a multiplication by the reciprocal, if
401 possible. */
403 static inline void
404 replace_reciprocal (use_operand_p use_p)
406 gimple *use_stmt = USE_STMT (use_p);
407 basic_block bb = gimple_bb (use_stmt);
408 struct occurrence *occ = (struct occurrence *) bb->aux;
410 if (optimize_bb_for_speed_p (bb)
411 && occ->recip_def && use_stmt != occ->recip_def_stmt)
413 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
414 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
415 SET_USE (use_p, occ->recip_def);
416 fold_stmt_inplace (&gsi);
417 update_stmt (use_stmt);
422 /* Free OCC and return one more "struct occurrence" to be freed. */
424 static struct occurrence *
425 free_bb (struct occurrence *occ)
427 struct occurrence *child, *next;
429 /* First get the two pointers hanging off OCC. */
430 next = occ->next;
431 child = occ->children;
432 occ->bb->aux = NULL;
433 occ_pool->remove (occ);
435 /* Now ensure that we don't recurse unless it is necessary. */
436 if (!child)
437 return next;
438 else
440 while (next)
441 next = free_bb (next);
443 return child;
448 /* Look for floating-point divisions among DEF's uses, and try to
449 replace them by multiplications with the reciprocal. Add
450 as many statements computing the reciprocal as needed.
452 DEF must be a GIMPLE register of a floating-point type. */
454 static void
455 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
457 use_operand_p use_p;
458 imm_use_iterator use_iter;
459 struct occurrence *occ;
460 int count = 0, threshold;
462 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
464 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
466 gimple *use_stmt = USE_STMT (use_p);
467 if (is_division_by (use_stmt, def))
469 register_division_in (gimple_bb (use_stmt));
470 count++;
474 /* Do the expensive part only if we can hope to optimize something. */
475 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
476 if (count >= threshold)
478 gimple *use_stmt;
479 for (occ = occ_head; occ; occ = occ->next)
481 compute_merit (occ);
482 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
485 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
487 if (is_division_by (use_stmt, def))
489 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
490 replace_reciprocal (use_p);
495 for (occ = occ_head; occ; )
496 occ = free_bb (occ);
498 occ_head = NULL;
501 /* Return an internal function that implements the reciprocal of CALL,
502 or IFN_LAST if there is no such function that the target supports. */
504 internal_fn
505 internal_fn_reciprocal (gcall *call)
507 internal_fn ifn;
509 switch (gimple_call_combined_fn (call))
511 CASE_CFN_SQRT:
512 ifn = IFN_RSQRT;
513 break;
515 default:
516 return IFN_LAST;
519 tree_pair types = direct_internal_fn_types (ifn, call);
520 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
521 return IFN_LAST;
523 return ifn;
526 /* Go through all the floating-point SSA_NAMEs, and call
527 execute_cse_reciprocals_1 on each of them. */
528 namespace {
530 const pass_data pass_data_cse_reciprocals =
532 GIMPLE_PASS, /* type */
533 "recip", /* name */
534 OPTGROUP_NONE, /* optinfo_flags */
535 TV_NONE, /* tv_id */
536 PROP_ssa, /* properties_required */
537 0, /* properties_provided */
538 0, /* properties_destroyed */
539 0, /* todo_flags_start */
540 TODO_update_ssa, /* todo_flags_finish */
543 class pass_cse_reciprocals : public gimple_opt_pass
545 public:
546 pass_cse_reciprocals (gcc::context *ctxt)
547 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
550 /* opt_pass methods: */
551 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
552 virtual unsigned int execute (function *);
554 }; // class pass_cse_reciprocals
556 unsigned int
557 pass_cse_reciprocals::execute (function *fun)
559 basic_block bb;
560 tree arg;
562 occ_pool = new object_allocator<occurrence> ("dominators for recip");
564 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
565 calculate_dominance_info (CDI_DOMINATORS);
566 calculate_dominance_info (CDI_POST_DOMINATORS);
568 if (flag_checking)
569 FOR_EACH_BB_FN (bb, fun)
570 gcc_assert (!bb->aux);
572 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
573 if (FLOAT_TYPE_P (TREE_TYPE (arg))
574 && is_gimple_reg (arg))
576 tree name = ssa_default_def (fun, arg);
577 if (name)
578 execute_cse_reciprocals_1 (NULL, name);
581 FOR_EACH_BB_FN (bb, fun)
583 tree def;
585 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
586 gsi_next (&gsi))
588 gphi *phi = gsi.phi ();
589 def = PHI_RESULT (phi);
590 if (! virtual_operand_p (def)
591 && FLOAT_TYPE_P (TREE_TYPE (def)))
592 execute_cse_reciprocals_1 (NULL, def);
595 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
596 gsi_next (&gsi))
598 gimple *stmt = gsi_stmt (gsi);
600 if (gimple_has_lhs (stmt)
601 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
602 && FLOAT_TYPE_P (TREE_TYPE (def))
603 && TREE_CODE (def) == SSA_NAME)
604 execute_cse_reciprocals_1 (&gsi, def);
607 if (optimize_bb_for_size_p (bb))
608 continue;
610 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
611 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
612 gsi_next (&gsi))
614 gimple *stmt = gsi_stmt (gsi);
616 if (is_gimple_assign (stmt)
617 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
619 tree arg1 = gimple_assign_rhs2 (stmt);
620 gimple *stmt1;
622 if (TREE_CODE (arg1) != SSA_NAME)
623 continue;
625 stmt1 = SSA_NAME_DEF_STMT (arg1);
627 if (is_gimple_call (stmt1)
628 && gimple_call_lhs (stmt1))
630 bool fail;
631 imm_use_iterator ui;
632 use_operand_p use_p;
633 tree fndecl = NULL_TREE;
635 gcall *call = as_a <gcall *> (stmt1);
636 internal_fn ifn = internal_fn_reciprocal (call);
637 if (ifn == IFN_LAST)
639 fndecl = gimple_call_fndecl (call);
640 if (!fndecl
641 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
642 continue;
643 fndecl = targetm.builtin_reciprocal (fndecl);
644 if (!fndecl)
645 continue;
648 /* Check that all uses of the SSA name are divisions,
649 otherwise replacing the defining statement will do
650 the wrong thing. */
651 fail = false;
652 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
654 gimple *stmt2 = USE_STMT (use_p);
655 if (is_gimple_debug (stmt2))
656 continue;
657 if (!is_gimple_assign (stmt2)
658 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
659 || gimple_assign_rhs1 (stmt2) == arg1
660 || gimple_assign_rhs2 (stmt2) != arg1)
662 fail = true;
663 break;
666 if (fail)
667 continue;
669 gimple_replace_ssa_lhs (call, arg1);
670 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
672 auto_vec<tree, 4> args;
673 for (unsigned int i = 0;
674 i < gimple_call_num_args (call); i++)
675 args.safe_push (gimple_call_arg (call, i));
676 gcall *stmt2;
677 if (ifn == IFN_LAST)
678 stmt2 = gimple_build_call_vec (fndecl, args);
679 else
680 stmt2 = gimple_build_call_internal_vec (ifn, args);
681 gimple_call_set_lhs (stmt2, arg1);
682 if (gimple_vdef (call))
684 gimple_set_vdef (stmt2, gimple_vdef (call));
685 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
687 gimple_set_vuse (stmt2, gimple_vuse (call));
688 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
689 gsi_replace (&gsi2, stmt2, true);
691 else
693 if (ifn == IFN_LAST)
694 gimple_call_set_fndecl (call, fndecl);
695 else
696 gimple_call_set_internal_fn (call, ifn);
697 update_stmt (call);
699 reciprocal_stats.rfuncs_inserted++;
701 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
703 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
704 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
705 fold_stmt_inplace (&gsi);
706 update_stmt (stmt);
713 statistics_counter_event (fun, "reciprocal divs inserted",
714 reciprocal_stats.rdivs_inserted);
715 statistics_counter_event (fun, "reciprocal functions inserted",
716 reciprocal_stats.rfuncs_inserted);
718 free_dominance_info (CDI_DOMINATORS);
719 free_dominance_info (CDI_POST_DOMINATORS);
720 delete occ_pool;
721 return 0;
724 } // anon namespace
726 gimple_opt_pass *
727 make_pass_cse_reciprocals (gcc::context *ctxt)
729 return new pass_cse_reciprocals (ctxt);
732 /* Records an occurrence at statement USE_STMT in the vector of trees
733 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
734 is not yet initialized. Returns true if the occurrence was pushed on
735 the vector. Adjusts *TOP_BB to be the basic block dominating all
736 statements in the vector. */
738 static bool
739 maybe_record_sincos (vec<gimple *> *stmts,
740 basic_block *top_bb, gimple *use_stmt)
742 basic_block use_bb = gimple_bb (use_stmt);
743 if (*top_bb
744 && (*top_bb == use_bb
745 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
746 stmts->safe_push (use_stmt);
747 else if (!*top_bb
748 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
750 stmts->safe_push (use_stmt);
751 *top_bb = use_bb;
753 else
754 return false;
756 return true;
759 /* Look for sin, cos and cexpi calls with the same argument NAME and
760 create a single call to cexpi CSEing the result in this case.
761 We first walk over all immediate uses of the argument collecting
762 statements that we can CSE in a vector and in a second pass replace
763 the statement rhs with a REALPART or IMAGPART expression on the
764 result of the cexpi call we insert before the use statement that
765 dominates all other candidates. */
767 static bool
768 execute_cse_sincos_1 (tree name)
770 gimple_stmt_iterator gsi;
771 imm_use_iterator use_iter;
772 tree fndecl, res, type;
773 gimple *def_stmt, *use_stmt, *stmt;
774 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
775 auto_vec<gimple *> stmts;
776 basic_block top_bb = NULL;
777 int i;
778 bool cfg_changed = false;
780 type = TREE_TYPE (name);
781 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
783 if (gimple_code (use_stmt) != GIMPLE_CALL
784 || !gimple_call_lhs (use_stmt))
785 continue;
787 switch (gimple_call_combined_fn (use_stmt))
789 CASE_CFN_COS:
790 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
791 break;
793 CASE_CFN_SIN:
794 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
795 break;
797 CASE_CFN_CEXPI:
798 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
799 break;
801 default:;
805 if (seen_cos + seen_sin + seen_cexpi <= 1)
806 return false;
808 /* Simply insert cexpi at the beginning of top_bb but not earlier than
809 the name def statement. */
810 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
811 if (!fndecl)
812 return false;
813 stmt = gimple_build_call (fndecl, 1, name);
814 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
815 gimple_call_set_lhs (stmt, res);
817 def_stmt = SSA_NAME_DEF_STMT (name);
818 if (!SSA_NAME_IS_DEFAULT_DEF (name)
819 && gimple_code (def_stmt) != GIMPLE_PHI
820 && gimple_bb (def_stmt) == top_bb)
822 gsi = gsi_for_stmt (def_stmt);
823 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
825 else
827 gsi = gsi_after_labels (top_bb);
828 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
830 sincos_stats.inserted++;
832 /* And adjust the recorded old call sites. */
833 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
835 tree rhs = NULL;
837 switch (gimple_call_combined_fn (use_stmt))
839 CASE_CFN_COS:
840 rhs = fold_build1 (REALPART_EXPR, type, res);
841 break;
843 CASE_CFN_SIN:
844 rhs = fold_build1 (IMAGPART_EXPR, type, res);
845 break;
847 CASE_CFN_CEXPI:
848 rhs = res;
849 break;
851 default:;
852 gcc_unreachable ();
855 /* Replace call with a copy. */
856 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
858 gsi = gsi_for_stmt (use_stmt);
859 gsi_replace (&gsi, stmt, true);
860 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
861 cfg_changed = true;
864 return cfg_changed;
867 /* To evaluate powi(x,n), the floating point value x raised to the
868 constant integer exponent n, we use a hybrid algorithm that
869 combines the "window method" with look-up tables. For an
870 introduction to exponentiation algorithms and "addition chains",
871 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
872 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
873 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
874 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
876 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
877 multiplications to inline before calling the system library's pow
878 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
879 so this default never requires calling pow, powf or powl. */
881 #ifndef POWI_MAX_MULTS
882 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
883 #endif
885 /* The size of the "optimal power tree" lookup table. All
886 exponents less than this value are simply looked up in the
887 powi_table below. This threshold is also used to size the
888 cache of pseudo registers that hold intermediate results. */
889 #define POWI_TABLE_SIZE 256
891 /* The size, in bits of the window, used in the "window method"
892 exponentiation algorithm. This is equivalent to a radix of
893 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
894 #define POWI_WINDOW_SIZE 3
896 /* The following table is an efficient representation of an
897 "optimal power tree". For each value, i, the corresponding
898 value, j, in the table states than an optimal evaluation
899 sequence for calculating pow(x,i) can be found by evaluating
900 pow(x,j)*pow(x,i-j). An optimal power tree for the first
901 100 integers is given in Knuth's "Seminumerical algorithms". */
903 static const unsigned char powi_table[POWI_TABLE_SIZE] =
905 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
906 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
907 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
908 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
909 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
910 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
911 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
912 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
913 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
914 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
915 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
916 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
917 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
918 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
919 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
920 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
921 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
922 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
923 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
924 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
925 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
926 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
927 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
928 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
929 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
930 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
931 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
932 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
933 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
934 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
935 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
936 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
940 /* Return the number of multiplications required to calculate
941 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
942 subroutine of powi_cost. CACHE is an array indicating
943 which exponents have already been calculated. */
945 static int
946 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
948 /* If we've already calculated this exponent, then this evaluation
949 doesn't require any additional multiplications. */
950 if (cache[n])
951 return 0;
953 cache[n] = true;
954 return powi_lookup_cost (n - powi_table[n], cache)
955 + powi_lookup_cost (powi_table[n], cache) + 1;
958 /* Return the number of multiplications required to calculate
959 powi(x,n) for an arbitrary x, given the exponent N. This
960 function needs to be kept in sync with powi_as_mults below. */
962 static int
963 powi_cost (HOST_WIDE_INT n)
965 bool cache[POWI_TABLE_SIZE];
966 unsigned HOST_WIDE_INT digit;
967 unsigned HOST_WIDE_INT val;
968 int result;
970 if (n == 0)
971 return 0;
973 /* Ignore the reciprocal when calculating the cost. */
974 val = (n < 0) ? -n : n;
976 /* Initialize the exponent cache. */
977 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
978 cache[1] = true;
980 result = 0;
982 while (val >= POWI_TABLE_SIZE)
984 if (val & 1)
986 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
987 result += powi_lookup_cost (digit, cache)
988 + POWI_WINDOW_SIZE + 1;
989 val >>= POWI_WINDOW_SIZE;
991 else
993 val >>= 1;
994 result++;
998 return result + powi_lookup_cost (val, cache);
1001 /* Recursive subroutine of powi_as_mults. This function takes the
1002 array, CACHE, of already calculated exponents and an exponent N and
1003 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1005 static tree
1006 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1007 HOST_WIDE_INT n, tree *cache)
1009 tree op0, op1, ssa_target;
1010 unsigned HOST_WIDE_INT digit;
1011 gassign *mult_stmt;
1013 if (n < POWI_TABLE_SIZE && cache[n])
1014 return cache[n];
1016 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1018 if (n < POWI_TABLE_SIZE)
1020 cache[n] = ssa_target;
1021 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1022 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1024 else if (n & 1)
1026 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1030 else
1032 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1033 op1 = op0;
1036 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1037 gimple_set_location (mult_stmt, loc);
1038 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1040 return ssa_target;
1043 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1044 This function needs to be kept in sync with powi_cost above. */
1046 static tree
1047 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1048 tree arg0, HOST_WIDE_INT n)
1050 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1051 gassign *div_stmt;
1052 tree target;
1054 if (n == 0)
1055 return build_real (type, dconst1);
1057 memset (cache, 0, sizeof (cache));
1058 cache[1] = arg0;
1060 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1061 if (n >= 0)
1062 return result;
1064 /* If the original exponent was negative, reciprocate the result. */
1065 target = make_temp_ssa_name (type, NULL, "powmult");
1066 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1067 build_real (type, dconst1), result);
1068 gimple_set_location (div_stmt, loc);
1069 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1071 return target;
1074 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1075 location info LOC. If the arguments are appropriate, create an
1076 equivalent sequence of statements prior to GSI using an optimal
1077 number of multiplications, and return an expession holding the
1078 result. */
1080 static tree
1081 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1082 tree arg0, HOST_WIDE_INT n)
1084 /* Avoid largest negative number. */
1085 if (n != -n
1086 && ((n >= -1 && n <= 2)
1087 || (optimize_function_for_speed_p (cfun)
1088 && powi_cost (n) <= POWI_MAX_MULTS)))
1089 return powi_as_mults (gsi, loc, arg0, n);
1091 return NULL_TREE;
1094 /* Build a gimple call statement that calls FN with argument ARG.
1095 Set the lhs of the call statement to a fresh SSA name. Insert the
1096 statement prior to GSI's current position, and return the fresh
1097 SSA name. */
1099 static tree
1100 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1101 tree fn, tree arg)
1103 gcall *call_stmt;
1104 tree ssa_target;
1106 call_stmt = gimple_build_call (fn, 1, arg);
1107 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1108 gimple_set_lhs (call_stmt, ssa_target);
1109 gimple_set_location (call_stmt, loc);
1110 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1112 return ssa_target;
1115 /* Build a gimple binary operation with the given CODE and arguments
1116 ARG0, ARG1, assigning the result to a new SSA name for variable
1117 TARGET. Insert the statement prior to GSI's current position, and
1118 return the fresh SSA name.*/
1120 static tree
1121 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1122 const char *name, enum tree_code code,
1123 tree arg0, tree arg1)
1125 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1126 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1127 gimple_set_location (stmt, loc);
1128 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1129 return result;
1132 /* Build a gimple reference operation with the given CODE and argument
1133 ARG, assigning the result to a new SSA name of TYPE with NAME.
1134 Insert the statement prior to GSI's current position, and return
1135 the fresh SSA name. */
1137 static inline tree
1138 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1139 const char *name, enum tree_code code, tree arg0)
1141 tree result = make_temp_ssa_name (type, NULL, name);
1142 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1143 gimple_set_location (stmt, loc);
1144 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1145 return result;
1148 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1149 prior to GSI's current position, and return the fresh SSA name. */
1151 static tree
1152 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1153 tree type, tree val)
1155 tree result = make_ssa_name (type);
1156 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1157 gimple_set_location (stmt, loc);
1158 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1159 return result;
1162 struct pow_synth_sqrt_info
1164 bool *factors;
1165 unsigned int deepest;
1166 unsigned int num_mults;
1169 /* Return true iff the real value C can be represented as a
1170 sum of powers of 0.5 up to N. That is:
1171 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1172 Record in INFO the various parameters of the synthesis algorithm such
1173 as the factors a[i], the maximum 0.5 power and the number of
1174 multiplications that will be required. */
1176 bool
1177 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1178 struct pow_synth_sqrt_info *info)
1180 REAL_VALUE_TYPE factor = dconsthalf;
1181 REAL_VALUE_TYPE remainder = c;
1183 info->deepest = 0;
1184 info->num_mults = 0;
1185 memset (info->factors, 0, n * sizeof (bool));
1187 for (unsigned i = 0; i < n; i++)
1189 REAL_VALUE_TYPE res;
1191 /* If something inexact happened bail out now. */
1192 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1193 return false;
1195 /* We have hit zero. The number is representable as a sum
1196 of powers of 0.5. */
1197 if (real_equal (&res, &dconst0))
1199 info->factors[i] = true;
1200 info->deepest = i + 1;
1201 return true;
1203 else if (!REAL_VALUE_NEGATIVE (res))
1205 remainder = res;
1206 info->factors[i] = true;
1207 info->num_mults++;
1209 else
1210 info->factors[i] = false;
1212 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1214 return false;
1217 /* Return the tree corresponding to FN being applied
1218 to ARG N times at GSI and LOC.
1219 Look up previous results from CACHE if need be.
1220 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1222 static tree
1223 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1224 tree fn, location_t loc, tree *cache)
1226 tree res = cache[n];
1227 if (!res)
1229 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1230 res = build_and_insert_call (gsi, loc, fn, prev);
1231 cache[n] = res;
1234 return res;
1237 /* Print to STREAM the repeated application of function FNAME to ARG
1238 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1239 "foo (foo (x))". */
1241 static void
1242 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1243 unsigned int n)
1245 if (n == 0)
1246 fprintf (stream, "%s", arg);
1247 else
1249 fprintf (stream, "%s (", fname);
1250 print_nested_fn (stream, fname, arg, n - 1);
1251 fprintf (stream, ")");
1255 /* Print to STREAM the fractional sequence of sqrt chains
1256 applied to ARG, described by INFO. Used for the dump file. */
1258 static void
1259 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1260 struct pow_synth_sqrt_info *info)
1262 for (unsigned int i = 0; i < info->deepest; i++)
1264 bool is_set = info->factors[i];
1265 if (is_set)
1267 print_nested_fn (stream, "sqrt", arg, i + 1);
1268 if (i != info->deepest - 1)
1269 fprintf (stream, " * ");
1274 /* Print to STREAM a representation of raising ARG to an integer
1275 power N. Used for the dump file. */
1277 static void
1278 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1280 if (n > 1)
1281 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1282 else if (n == 1)
1283 fprintf (stream, "%s", arg);
1286 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1287 square roots. Place at GSI and LOC. Limit the maximum depth
1288 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1289 result of the expanded sequence or NULL_TREE if the expansion failed.
1291 This routine assumes that ARG1 is a real number with a fractional part
1292 (the integer exponent case will have been handled earlier in
1293 gimple_expand_builtin_pow).
1295 For ARG1 > 0.0:
1296 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1297 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1298 FRAC_PART == ARG1 - WHOLE_PART:
1299 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1300 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1301 if it can be expressed as such, that is if FRAC_PART satisfies:
1302 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1303 where integer a[i] is either 0 or 1.
1305 Example:
1306 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1307 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1309 For ARG1 < 0.0 there are two approaches:
1310 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1311 is calculated as above.
1313 Example:
1314 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1315 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1317 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1318 FRAC_PART := ARG1 - WHOLE_PART
1319 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1320 Example:
1321 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1322 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1324 For ARG1 < 0.0 we choose between (A) and (B) depending on
1325 how many multiplications we'd have to do.
1326 So, for the example in (B): POW (x, -5.875), if we were to
1327 follow algorithm (A) we would produce:
1328 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1329 which contains more multiplications than approach (B).
1331 Hopefully, this approach will eliminate potentially expensive POW library
1332 calls when unsafe floating point math is enabled and allow the compiler to
1333 further optimise the multiplies, square roots and divides produced by this
1334 function. */
1336 static tree
1337 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1338 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1340 tree type = TREE_TYPE (arg0);
1341 machine_mode mode = TYPE_MODE (type);
1342 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1343 bool one_over = true;
1345 if (!sqrtfn)
1346 return NULL_TREE;
1348 if (TREE_CODE (arg1) != REAL_CST)
1349 return NULL_TREE;
1351 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1353 gcc_assert (max_depth > 0);
1354 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1356 struct pow_synth_sqrt_info synth_info;
1357 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1358 synth_info.deepest = 0;
1359 synth_info.num_mults = 0;
1361 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1362 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1364 /* The whole and fractional parts of exp. */
1365 REAL_VALUE_TYPE whole_part;
1366 REAL_VALUE_TYPE frac_part;
1368 real_floor (&whole_part, mode, &exp);
1369 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1372 REAL_VALUE_TYPE ceil_whole = dconst0;
1373 REAL_VALUE_TYPE ceil_fract = dconst0;
1375 if (neg_exp)
1377 real_ceil (&ceil_whole, mode, &exp);
1378 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1381 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1382 return NULL_TREE;
1384 /* Check whether it's more profitable to not use 1.0 / ... */
1385 if (neg_exp)
1387 struct pow_synth_sqrt_info alt_synth_info;
1388 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1389 alt_synth_info.deepest = 0;
1390 alt_synth_info.num_mults = 0;
1392 if (representable_as_half_series_p (ceil_fract, max_depth,
1393 &alt_synth_info)
1394 && alt_synth_info.deepest <= synth_info.deepest
1395 && alt_synth_info.num_mults < synth_info.num_mults)
1397 whole_part = ceil_whole;
1398 frac_part = ceil_fract;
1399 synth_info.deepest = alt_synth_info.deepest;
1400 synth_info.num_mults = alt_synth_info.num_mults;
1401 memcpy (synth_info.factors, alt_synth_info.factors,
1402 (max_depth + 1) * sizeof (bool));
1403 one_over = false;
1407 HOST_WIDE_INT n = real_to_integer (&whole_part);
1408 REAL_VALUE_TYPE cint;
1409 real_from_integer (&cint, VOIDmode, n, SIGNED);
1411 if (!real_identical (&whole_part, &cint))
1412 return NULL_TREE;
1414 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1415 return NULL_TREE;
1417 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1419 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1421 /* Calculate the integer part of the exponent. */
1422 if (n > 1)
1424 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1425 if (!integer_res)
1426 return NULL_TREE;
1429 if (dump_file)
1431 char string[64];
1433 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1434 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1436 if (neg_exp)
1438 if (one_over)
1440 fprintf (dump_file, "1.0 / (");
1441 dump_integer_part (dump_file, "x", n);
1442 if (n > 0)
1443 fprintf (dump_file, " * ");
1444 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1445 fprintf (dump_file, ")");
1447 else
1449 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1450 fprintf (dump_file, " / (");
1451 dump_integer_part (dump_file, "x", n);
1452 fprintf (dump_file, ")");
1455 else
1457 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1458 if (n > 0)
1459 fprintf (dump_file, " * ");
1460 dump_integer_part (dump_file, "x", n);
1463 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1467 tree fract_res = NULL_TREE;
1468 cache[0] = arg0;
1470 /* Calculate the fractional part of the exponent. */
1471 for (unsigned i = 0; i < synth_info.deepest; i++)
1473 if (synth_info.factors[i])
1475 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1477 if (!fract_res)
1478 fract_res = sqrt_chain;
1480 else
1481 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1482 fract_res, sqrt_chain);
1486 tree res = NULL_TREE;
1488 if (neg_exp)
1490 if (one_over)
1492 if (n > 0)
1493 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1494 fract_res, integer_res);
1495 else
1496 res = fract_res;
1498 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1499 build_real (type, dconst1), res);
1501 else
1503 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1504 fract_res, integer_res);
1507 else
1508 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1509 fract_res, integer_res);
1510 return res;
1513 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1514 with location info LOC. If possible, create an equivalent and
1515 less expensive sequence of statements prior to GSI, and return an
1516 expession holding the result. */
1518 static tree
1519 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1520 tree arg0, tree arg1)
1522 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1523 REAL_VALUE_TYPE c2, dconst3;
1524 HOST_WIDE_INT n;
1525 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1526 machine_mode mode;
1527 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1528 bool hw_sqrt_exists, c_is_int, c2_is_int;
1530 dconst1_4 = dconst1;
1531 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1533 /* If the exponent isn't a constant, there's nothing of interest
1534 to be done. */
1535 if (TREE_CODE (arg1) != REAL_CST)
1536 return NULL_TREE;
1538 /* Don't perform the operation if flag_signaling_nans is on
1539 and the operand is a signaling NaN. */
1540 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1541 && ((TREE_CODE (arg0) == REAL_CST
1542 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1543 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1544 return NULL_TREE;
1546 /* If the exponent is equivalent to an integer, expand to an optimal
1547 multiplication sequence when profitable. */
1548 c = TREE_REAL_CST (arg1);
1549 n = real_to_integer (&c);
1550 real_from_integer (&cint, VOIDmode, n, SIGNED);
1551 c_is_int = real_identical (&c, &cint);
1553 if (c_is_int
1554 && ((n >= -1 && n <= 2)
1555 || (flag_unsafe_math_optimizations
1556 && speed_p
1557 && powi_cost (n) <= POWI_MAX_MULTS)))
1558 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1560 /* Attempt various optimizations using sqrt and cbrt. */
1561 type = TREE_TYPE (arg0);
1562 mode = TYPE_MODE (type);
1563 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1565 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1566 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1567 sqrt(-0) = -0. */
1568 if (sqrtfn
1569 && real_equal (&c, &dconsthalf)
1570 && !HONOR_SIGNED_ZEROS (mode))
1571 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1573 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1575 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1576 optimizations since 1./3. is not exactly representable. If x
1577 is negative and finite, the correct value of pow(x,1./3.) is
1578 a NaN with the "invalid" exception raised, because the value
1579 of 1./3. actually has an even denominator. The correct value
1580 of cbrt(x) is a negative real value. */
1581 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1582 dconst1_3 = real_value_truncate (mode, dconst_third ());
1584 if (flag_unsafe_math_optimizations
1585 && cbrtfn
1586 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1587 && real_equal (&c, &dconst1_3))
1588 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1590 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1591 if we don't have a hardware sqrt insn. */
1592 dconst1_6 = dconst1_3;
1593 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1595 if (flag_unsafe_math_optimizations
1596 && sqrtfn
1597 && cbrtfn
1598 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1599 && speed_p
1600 && hw_sqrt_exists
1601 && real_equal (&c, &dconst1_6))
1603 /* sqrt(x) */
1604 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1606 /* cbrt(sqrt(x)) */
1607 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1611 /* Attempt to expand the POW as a product of square root chains.
1612 Expand the 0.25 case even when otpimising for size. */
1613 if (flag_unsafe_math_optimizations
1614 && sqrtfn
1615 && hw_sqrt_exists
1616 && (speed_p || real_equal (&c, &dconst1_4))
1617 && !HONOR_SIGNED_ZEROS (mode))
1619 unsigned int max_depth = speed_p
1620 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1621 : 2;
1623 tree expand_with_sqrts
1624 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1626 if (expand_with_sqrts)
1627 return expand_with_sqrts;
1630 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1631 n = real_to_integer (&c2);
1632 real_from_integer (&cint, VOIDmode, n, SIGNED);
1633 c2_is_int = real_identical (&c2, &cint);
1635 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1637 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1638 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1640 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1641 different from pow(x, 1./3.) due to rounding and behavior with
1642 negative x, we need to constrain this transformation to unsafe
1643 math and positive x or finite math. */
1644 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1645 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1646 real_round (&c2, mode, &c2);
1647 n = real_to_integer (&c2);
1648 real_from_integer (&cint, VOIDmode, n, SIGNED);
1649 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1650 real_convert (&c2, mode, &c2);
1652 if (flag_unsafe_math_optimizations
1653 && cbrtfn
1654 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1655 && real_identical (&c2, &c)
1656 && !c2_is_int
1657 && optimize_function_for_speed_p (cfun)
1658 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1660 tree powi_x_ndiv3 = NULL_TREE;
1662 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1663 possible or profitable, give up. Skip the degenerate case when
1664 abs(n) < 3, where the result is always 1. */
1665 if (absu_hwi (n) >= 3)
1667 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1668 abs_hwi (n / 3));
1669 if (!powi_x_ndiv3)
1670 return NULL_TREE;
1673 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1674 as that creates an unnecessary variable. Instead, just produce
1675 either cbrt(x) or cbrt(x) * cbrt(x). */
1676 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1678 if (absu_hwi (n) % 3 == 1)
1679 powi_cbrt_x = cbrt_x;
1680 else
1681 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1682 cbrt_x, cbrt_x);
1684 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1685 if (absu_hwi (n) < 3)
1686 result = powi_cbrt_x;
1687 else
1688 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1689 powi_x_ndiv3, powi_cbrt_x);
1691 /* If n is negative, reciprocate the result. */
1692 if (n < 0)
1693 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1694 build_real (type, dconst1), result);
1696 return result;
1699 /* No optimizations succeeded. */
1700 return NULL_TREE;
1703 /* ARG is the argument to a cabs builtin call in GSI with location info
1704 LOC. Create a sequence of statements prior to GSI that calculates
1705 sqrt(R*R + I*I), where R and I are the real and imaginary components
1706 of ARG, respectively. Return an expression holding the result. */
1708 static tree
1709 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1711 tree real_part, imag_part, addend1, addend2, sum, result;
1712 tree type = TREE_TYPE (TREE_TYPE (arg));
1713 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1714 machine_mode mode = TYPE_MODE (type);
1716 if (!flag_unsafe_math_optimizations
1717 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1718 || !sqrtfn
1719 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1720 return NULL_TREE;
1722 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1723 REALPART_EXPR, arg);
1724 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1725 real_part, real_part);
1726 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1727 IMAGPART_EXPR, arg);
1728 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1729 imag_part, imag_part);
1730 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1731 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1733 return result;
1736 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1737 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1738 an optimal number of multiplies, when n is a constant. */
1740 namespace {
1742 const pass_data pass_data_cse_sincos =
1744 GIMPLE_PASS, /* type */
1745 "sincos", /* name */
1746 OPTGROUP_NONE, /* optinfo_flags */
1747 TV_NONE, /* tv_id */
1748 PROP_ssa, /* properties_required */
1749 PROP_gimple_opt_math, /* properties_provided */
1750 0, /* properties_destroyed */
1751 0, /* todo_flags_start */
1752 TODO_update_ssa, /* todo_flags_finish */
1755 class pass_cse_sincos : public gimple_opt_pass
1757 public:
1758 pass_cse_sincos (gcc::context *ctxt)
1759 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1762 /* opt_pass methods: */
1763 virtual bool gate (function *)
1765 /* We no longer require either sincos or cexp, since powi expansion
1766 piggybacks on this pass. */
1767 return optimize;
1770 virtual unsigned int execute (function *);
1772 }; // class pass_cse_sincos
1774 unsigned int
1775 pass_cse_sincos::execute (function *fun)
1777 basic_block bb;
1778 bool cfg_changed = false;
1780 calculate_dominance_info (CDI_DOMINATORS);
1781 memset (&sincos_stats, 0, sizeof (sincos_stats));
1783 FOR_EACH_BB_FN (bb, fun)
1785 gimple_stmt_iterator gsi;
1786 bool cleanup_eh = false;
1788 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1790 gimple *stmt = gsi_stmt (gsi);
1792 /* Only the last stmt in a bb could throw, no need to call
1793 gimple_purge_dead_eh_edges if we change something in the middle
1794 of a basic block. */
1795 cleanup_eh = false;
1797 if (is_gimple_call (stmt)
1798 && gimple_call_lhs (stmt))
1800 tree arg, arg0, arg1, result;
1801 HOST_WIDE_INT n;
1802 location_t loc;
1804 switch (gimple_call_combined_fn (stmt))
1806 CASE_CFN_COS:
1807 CASE_CFN_SIN:
1808 CASE_CFN_CEXPI:
1809 /* Make sure we have either sincos or cexp. */
1810 if (!targetm.libc_has_function (function_c99_math_complex)
1811 && !targetm.libc_has_function (function_sincos))
1812 break;
1814 arg = gimple_call_arg (stmt, 0);
1815 if (TREE_CODE (arg) == SSA_NAME)
1816 cfg_changed |= execute_cse_sincos_1 (arg);
1817 break;
1819 CASE_CFN_POW:
1820 arg0 = gimple_call_arg (stmt, 0);
1821 arg1 = gimple_call_arg (stmt, 1);
1823 loc = gimple_location (stmt);
1824 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1826 if (result)
1828 tree lhs = gimple_get_lhs (stmt);
1829 gassign *new_stmt = gimple_build_assign (lhs, result);
1830 gimple_set_location (new_stmt, loc);
1831 unlink_stmt_vdef (stmt);
1832 gsi_replace (&gsi, new_stmt, true);
1833 cleanup_eh = true;
1834 if (gimple_vdef (stmt))
1835 release_ssa_name (gimple_vdef (stmt));
1837 break;
1839 CASE_CFN_POWI:
1840 arg0 = gimple_call_arg (stmt, 0);
1841 arg1 = gimple_call_arg (stmt, 1);
1842 loc = gimple_location (stmt);
1844 if (real_minus_onep (arg0))
1846 tree t0, t1, cond, one, minus_one;
1847 gassign *stmt;
1849 t0 = TREE_TYPE (arg0);
1850 t1 = TREE_TYPE (arg1);
1851 one = build_real (t0, dconst1);
1852 minus_one = build_real (t0, dconstm1);
1854 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1855 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1856 arg1, build_int_cst (t1, 1));
1857 gimple_set_location (stmt, loc);
1858 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1860 result = make_temp_ssa_name (t0, NULL, "powi");
1861 stmt = gimple_build_assign (result, COND_EXPR, cond,
1862 minus_one, one);
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1866 else
1868 if (!tree_fits_shwi_p (arg1))
1869 break;
1871 n = tree_to_shwi (arg1);
1872 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1875 if (result)
1877 tree lhs = gimple_get_lhs (stmt);
1878 gassign *new_stmt = gimple_build_assign (lhs, result);
1879 gimple_set_location (new_stmt, loc);
1880 unlink_stmt_vdef (stmt);
1881 gsi_replace (&gsi, new_stmt, true);
1882 cleanup_eh = true;
1883 if (gimple_vdef (stmt))
1884 release_ssa_name (gimple_vdef (stmt));
1886 break;
1888 CASE_CFN_CABS:
1889 arg0 = gimple_call_arg (stmt, 0);
1890 loc = gimple_location (stmt);
1891 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1893 if (result)
1895 tree lhs = gimple_get_lhs (stmt);
1896 gassign *new_stmt = gimple_build_assign (lhs, result);
1897 gimple_set_location (new_stmt, loc);
1898 unlink_stmt_vdef (stmt);
1899 gsi_replace (&gsi, new_stmt, true);
1900 cleanup_eh = true;
1901 if (gimple_vdef (stmt))
1902 release_ssa_name (gimple_vdef (stmt));
1904 break;
1906 default:;
1910 if (cleanup_eh)
1911 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1914 statistics_counter_event (fun, "sincos statements inserted",
1915 sincos_stats.inserted);
1917 return cfg_changed ? TODO_cleanup_cfg : 0;
1920 } // anon namespace
1922 gimple_opt_pass *
1923 make_pass_cse_sincos (gcc::context *ctxt)
1925 return new pass_cse_sincos (ctxt);
1928 /* A symbolic number is used to detect byte permutation and selection
1929 patterns. Therefore the field N contains an artificial number
1930 consisting of octet sized markers:
1932 0 - target byte has the value 0
1933 FF - target byte has an unknown value (eg. due to sign extension)
1934 1..size - marker value is the target byte index minus one.
1936 To detect permutations on memory sources (arrays and structures), a symbolic
1937 number is also associated a base address (the array or structure the load is
1938 made from), an offset from the base address and a range which gives the
1939 difference between the highest and lowest accessed memory location to make
1940 such a symbolic number. The range is thus different from size which reflects
1941 the size of the type of current expression. Note that for non memory source,
1942 range holds the same value as size.
1944 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1945 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1946 still have a size of 2 but this time a range of 1. */
1948 struct symbolic_number {
1949 uint64_t n;
1950 tree type;
1951 tree base_addr;
1952 tree offset;
1953 HOST_WIDE_INT bytepos;
1954 tree alias_set;
1955 tree vuse;
1956 unsigned HOST_WIDE_INT range;
1959 #define BITS_PER_MARKER 8
1960 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1961 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1962 #define HEAD_MARKER(n, size) \
1963 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1965 /* The number which the find_bswap_or_nop_1 result should match in
1966 order to have a nop. The number is masked according to the size of
1967 the symbolic number before using it. */
1968 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1969 (uint64_t)0x08070605 << 32 | 0x04030201)
1971 /* The number which the find_bswap_or_nop_1 result should match in
1972 order to have a byte swap. The number is masked according to the
1973 size of the symbolic number before using it. */
1974 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1975 (uint64_t)0x01020304 << 32 | 0x05060708)
1977 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1978 number N. Return false if the requested operation is not permitted
1979 on a symbolic number. */
1981 static inline bool
1982 do_shift_rotate (enum tree_code code,
1983 struct symbolic_number *n,
1984 int count)
1986 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
1987 unsigned head_marker;
1989 if (count % BITS_PER_UNIT != 0)
1990 return false;
1991 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
1993 /* Zero out the extra bits of N in order to avoid them being shifted
1994 into the significant bits. */
1995 if (size < 64 / BITS_PER_MARKER)
1996 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
1998 switch (code)
2000 case LSHIFT_EXPR:
2001 n->n <<= count;
2002 break;
2003 case RSHIFT_EXPR:
2004 head_marker = HEAD_MARKER (n->n, size);
2005 n->n >>= count;
2006 /* Arithmetic shift of signed type: result is dependent on the value. */
2007 if (!TYPE_UNSIGNED (n->type) && head_marker)
2008 for (i = 0; i < count / BITS_PER_MARKER; i++)
2009 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2010 << ((size - 1 - i) * BITS_PER_MARKER);
2011 break;
2012 case LROTATE_EXPR:
2013 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2014 break;
2015 case RROTATE_EXPR:
2016 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2017 break;
2018 default:
2019 return false;
2021 /* Zero unused bits for size. */
2022 if (size < 64 / BITS_PER_MARKER)
2023 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2024 return true;
2027 /* Perform sanity checking for the symbolic number N and the gimple
2028 statement STMT. */
2030 static inline bool
2031 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2033 tree lhs_type;
2035 lhs_type = gimple_expr_type (stmt);
2037 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2038 return false;
2040 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2041 return false;
2043 return true;
2046 /* Initialize the symbolic number N for the bswap pass from the base element
2047 SRC manipulated by the bitwise OR expression. */
2049 static bool
2050 init_symbolic_number (struct symbolic_number *n, tree src)
2052 int size;
2054 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2056 /* Set up the symbolic number N by setting each byte to a value between 1 and
2057 the byte size of rhs1. The highest order byte is set to n->size and the
2058 lowest order byte to 1. */
2059 n->type = TREE_TYPE (src);
2060 size = TYPE_PRECISION (n->type);
2061 if (size % BITS_PER_UNIT != 0)
2062 return false;
2063 size /= BITS_PER_UNIT;
2064 if (size > 64 / BITS_PER_MARKER)
2065 return false;
2066 n->range = size;
2067 n->n = CMPNOP;
2069 if (size < 64 / BITS_PER_MARKER)
2070 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2072 return true;
2075 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2076 the answer. If so, REF is that memory source and the base of the memory area
2077 accessed and the offset of the access from that base are recorded in N. */
2079 bool
2080 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2082 /* Leaf node is an array or component ref. Memorize its base and
2083 offset from base to compare to other such leaf node. */
2084 HOST_WIDE_INT bitsize, bitpos;
2085 machine_mode mode;
2086 int unsignedp, reversep, volatilep;
2087 tree offset, base_addr;
2089 /* Not prepared to handle PDP endian. */
2090 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2091 return false;
2093 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2094 return false;
2096 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2097 &unsignedp, &reversep, &volatilep, false);
2099 if (TREE_CODE (base_addr) == MEM_REF)
2101 offset_int bit_offset = 0;
2102 tree off = TREE_OPERAND (base_addr, 1);
2104 if (!integer_zerop (off))
2106 offset_int boff, coff = mem_ref_offset (base_addr);
2107 boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
2108 bit_offset += boff;
2111 base_addr = TREE_OPERAND (base_addr, 0);
2113 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2114 if (wi::neg_p (bit_offset))
2116 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2117 offset_int tem = bit_offset.and_not (mask);
2118 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2119 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2120 bit_offset -= tem;
2121 tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
2122 if (offset)
2123 offset = size_binop (PLUS_EXPR, offset,
2124 wide_int_to_tree (sizetype, tem));
2125 else
2126 offset = wide_int_to_tree (sizetype, tem);
2129 bitpos += bit_offset.to_shwi ();
2132 if (bitpos % BITS_PER_UNIT)
2133 return false;
2134 if (bitsize % BITS_PER_UNIT)
2135 return false;
2136 if (reversep)
2137 return false;
2139 if (!init_symbolic_number (n, ref))
2140 return false;
2141 n->base_addr = base_addr;
2142 n->offset = offset;
2143 n->bytepos = bitpos / BITS_PER_UNIT;
2144 n->alias_set = reference_alias_ptr_type (ref);
2145 n->vuse = gimple_vuse (stmt);
2146 return true;
2149 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2150 symbolic number N1 and N2 whose source statements are respectively
2151 SOURCE_STMT1 and SOURCE_STMT2. */
2153 static gimple *
2154 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2155 gimple *source_stmt2, struct symbolic_number *n2,
2156 struct symbolic_number *n)
2158 int i, size;
2159 uint64_t mask;
2160 gimple *source_stmt;
2161 struct symbolic_number *n_start;
2163 /* Sources are different, cancel bswap if they are not memory location with
2164 the same base (array, structure, ...). */
2165 if (gimple_assign_rhs1 (source_stmt1) != gimple_assign_rhs1 (source_stmt2))
2167 uint64_t inc;
2168 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2169 struct symbolic_number *toinc_n_ptr, *n_end;
2171 if (!n1->base_addr || !n2->base_addr
2172 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2173 return NULL;
2175 if (!n1->offset != !n2->offset
2176 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2177 return NULL;
2179 if (n1->bytepos < n2->bytepos)
2181 n_start = n1;
2182 start_sub = n2->bytepos - n1->bytepos;
2183 source_stmt = source_stmt1;
2185 else
2187 n_start = n2;
2188 start_sub = n1->bytepos - n2->bytepos;
2189 source_stmt = source_stmt2;
2192 /* Find the highest address at which a load is performed and
2193 compute related info. */
2194 end1 = n1->bytepos + (n1->range - 1);
2195 end2 = n2->bytepos + (n2->range - 1);
2196 if (end1 < end2)
2198 end = end2;
2199 end_sub = end2 - end1;
2201 else
2203 end = end1;
2204 end_sub = end1 - end2;
2206 n_end = (end2 > end1) ? n2 : n1;
2208 /* Find symbolic number whose lsb is the most significant. */
2209 if (BYTES_BIG_ENDIAN)
2210 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2211 else
2212 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2214 n->range = end - n_start->bytepos + 1;
2216 /* Check that the range of memory covered can be represented by
2217 a symbolic number. */
2218 if (n->range > 64 / BITS_PER_MARKER)
2219 return NULL;
2221 /* Reinterpret byte marks in symbolic number holding the value of
2222 bigger weight according to target endianness. */
2223 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2224 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2225 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2227 unsigned marker
2228 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2229 if (marker && marker != MARKER_BYTE_UNKNOWN)
2230 toinc_n_ptr->n += inc;
2233 else
2235 n->range = n1->range;
2236 n_start = n1;
2237 source_stmt = source_stmt1;
2240 if (!n1->alias_set
2241 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2242 n->alias_set = n1->alias_set;
2243 else
2244 n->alias_set = ptr_type_node;
2245 n->vuse = n_start->vuse;
2246 n->base_addr = n_start->base_addr;
2247 n->offset = n_start->offset;
2248 n->bytepos = n_start->bytepos;
2249 n->type = n_start->type;
2250 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2252 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2254 uint64_t masked1, masked2;
2256 masked1 = n1->n & mask;
2257 masked2 = n2->n & mask;
2258 if (masked1 && masked2 && masked1 != masked2)
2259 return NULL;
2261 n->n = n1->n | n2->n;
2263 return source_stmt;
2266 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2267 the operation given by the rhs of STMT on the result. If the operation
2268 could successfully be executed the function returns a gimple stmt whose
2269 rhs's first tree is the expression of the source operand and NULL
2270 otherwise. */
2272 static gimple *
2273 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2275 enum tree_code code;
2276 tree rhs1, rhs2 = NULL;
2277 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2278 enum gimple_rhs_class rhs_class;
2280 if (!limit || !is_gimple_assign (stmt))
2281 return NULL;
2283 rhs1 = gimple_assign_rhs1 (stmt);
2285 if (find_bswap_or_nop_load (stmt, rhs1, n))
2286 return stmt;
2288 if (TREE_CODE (rhs1) != SSA_NAME)
2289 return NULL;
2291 code = gimple_assign_rhs_code (stmt);
2292 rhs_class = gimple_assign_rhs_class (stmt);
2293 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2295 if (rhs_class == GIMPLE_BINARY_RHS)
2296 rhs2 = gimple_assign_rhs2 (stmt);
2298 /* Handle unary rhs and binary rhs with integer constants as second
2299 operand. */
2301 if (rhs_class == GIMPLE_UNARY_RHS
2302 || (rhs_class == GIMPLE_BINARY_RHS
2303 && TREE_CODE (rhs2) == INTEGER_CST))
2305 if (code != BIT_AND_EXPR
2306 && code != LSHIFT_EXPR
2307 && code != RSHIFT_EXPR
2308 && code != LROTATE_EXPR
2309 && code != RROTATE_EXPR
2310 && !CONVERT_EXPR_CODE_P (code))
2311 return NULL;
2313 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2315 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2316 we have to initialize the symbolic number. */
2317 if (!source_stmt1)
2319 if (gimple_assign_load_p (stmt)
2320 || !init_symbolic_number (n, rhs1))
2321 return NULL;
2322 source_stmt1 = stmt;
2325 switch (code)
2327 case BIT_AND_EXPR:
2329 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2330 uint64_t val = int_cst_value (rhs2), mask = 0;
2331 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2333 /* Only constants masking full bytes are allowed. */
2334 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2335 if ((val & tmp) != 0 && (val & tmp) != tmp)
2336 return NULL;
2337 else if (val & tmp)
2338 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2340 n->n &= mask;
2342 break;
2343 case LSHIFT_EXPR:
2344 case RSHIFT_EXPR:
2345 case LROTATE_EXPR:
2346 case RROTATE_EXPR:
2347 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2348 return NULL;
2349 break;
2350 CASE_CONVERT:
2352 int i, type_size, old_type_size;
2353 tree type;
2355 type = gimple_expr_type (stmt);
2356 type_size = TYPE_PRECISION (type);
2357 if (type_size % BITS_PER_UNIT != 0)
2358 return NULL;
2359 type_size /= BITS_PER_UNIT;
2360 if (type_size > 64 / BITS_PER_MARKER)
2361 return NULL;
2363 /* Sign extension: result is dependent on the value. */
2364 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2365 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2366 && HEAD_MARKER (n->n, old_type_size))
2367 for (i = 0; i < type_size - old_type_size; i++)
2368 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2369 << ((type_size - 1 - i) * BITS_PER_MARKER);
2371 if (type_size < 64 / BITS_PER_MARKER)
2373 /* If STMT casts to a smaller type mask out the bits not
2374 belonging to the target type. */
2375 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2377 n->type = type;
2378 if (!n->base_addr)
2379 n->range = type_size;
2381 break;
2382 default:
2383 return NULL;
2385 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2388 /* Handle binary rhs. */
2390 if (rhs_class == GIMPLE_BINARY_RHS)
2392 struct symbolic_number n1, n2;
2393 gimple *source_stmt, *source_stmt2;
2395 if (code != BIT_IOR_EXPR)
2396 return NULL;
2398 if (TREE_CODE (rhs2) != SSA_NAME)
2399 return NULL;
2401 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2403 switch (code)
2405 case BIT_IOR_EXPR:
2406 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2408 if (!source_stmt1)
2409 return NULL;
2411 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2413 if (!source_stmt2)
2414 return NULL;
2416 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2417 return NULL;
2419 if (!n1.vuse != !n2.vuse
2420 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2421 return NULL;
2423 source_stmt
2424 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2426 if (!source_stmt)
2427 return NULL;
2429 if (!verify_symbolic_number_p (n, stmt))
2430 return NULL;
2432 break;
2433 default:
2434 return NULL;
2436 return source_stmt;
2438 return NULL;
2441 /* Check if STMT completes a bswap implementation or a read in a given
2442 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2443 accordingly. It also sets N to represent the kind of operations
2444 performed: size of the resulting expression and whether it works on
2445 a memory source, and if so alias-set and vuse. At last, the
2446 function returns a stmt whose rhs's first tree is the source
2447 expression. */
2449 static gimple *
2450 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2452 unsigned rsize;
2453 uint64_t tmpn, mask;
2454 /* The number which the find_bswap_or_nop_1 result should match in order
2455 to have a full byte swap. The number is shifted to the right
2456 according to the size of the symbolic number before using it. */
2457 uint64_t cmpxchg = CMPXCHG;
2458 uint64_t cmpnop = CMPNOP;
2460 gimple *source_stmt;
2461 int limit;
2463 /* The last parameter determines the depth search limit. It usually
2464 correlates directly to the number n of bytes to be touched. We
2465 increase that number by log2(n) + 1 here in order to also
2466 cover signed -> unsigned conversions of the src operand as can be seen
2467 in libgcc, and for initial shift/and operation of the src operand. */
2468 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2469 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2470 source_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2472 if (!source_stmt)
2473 return NULL;
2475 /* Find real size of result (highest non-zero byte). */
2476 if (n->base_addr)
2477 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2478 else
2479 rsize = n->range;
2481 /* Zero out the bits corresponding to untouched bytes in original gimple
2482 expression. */
2483 if (n->range < (int) sizeof (int64_t))
2485 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2486 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2487 cmpnop &= mask;
2490 /* Zero out the bits corresponding to unused bytes in the result of the
2491 gimple expression. */
2492 if (rsize < n->range)
2494 if (BYTES_BIG_ENDIAN)
2496 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2497 cmpxchg &= mask;
2498 cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
2500 else
2502 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2503 cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
2504 cmpnop &= mask;
2506 n->range = rsize;
2509 /* A complete byte swap should make the symbolic number to start with
2510 the largest digit in the highest order byte. Unchanged symbolic
2511 number indicates a read with same endianness as target architecture. */
2512 if (n->n == cmpnop)
2513 *bswap = false;
2514 else if (n->n == cmpxchg)
2515 *bswap = true;
2516 else
2517 return NULL;
2519 /* Useless bit manipulation performed by code. */
2520 if (!n->base_addr && n->n == cmpnop)
2521 return NULL;
2523 n->range *= BITS_PER_UNIT;
2524 return source_stmt;
2527 namespace {
2529 const pass_data pass_data_optimize_bswap =
2531 GIMPLE_PASS, /* type */
2532 "bswap", /* name */
2533 OPTGROUP_NONE, /* optinfo_flags */
2534 TV_NONE, /* tv_id */
2535 PROP_ssa, /* properties_required */
2536 0, /* properties_provided */
2537 0, /* properties_destroyed */
2538 0, /* todo_flags_start */
2539 0, /* todo_flags_finish */
2542 class pass_optimize_bswap : public gimple_opt_pass
2544 public:
2545 pass_optimize_bswap (gcc::context *ctxt)
2546 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2549 /* opt_pass methods: */
2550 virtual bool gate (function *)
2552 return flag_expensive_optimizations && optimize;
2555 virtual unsigned int execute (function *);
2557 }; // class pass_optimize_bswap
2559 /* Perform the bswap optimization: replace the expression computed in the rhs
2560 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2561 Which of these alternatives replace the rhs is given by N->base_addr (non
2562 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2563 load to perform are also given in N while the builtin bswap invoke is given
2564 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2565 load statements involved to construct the rhs in CUR_STMT and N->range gives
2566 the size of the rhs expression for maintaining some statistics.
2568 Note that if the replacement involve a load, CUR_STMT is moved just after
2569 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2570 changing of basic block. */
2572 static bool
2573 bswap_replace (gimple *cur_stmt, gimple *src_stmt, tree fndecl,
2574 tree bswap_type, tree load_type, struct symbolic_number *n,
2575 bool bswap)
2577 gimple_stmt_iterator gsi;
2578 tree src, tmp, tgt;
2579 gimple *bswap_stmt;
2581 gsi = gsi_for_stmt (cur_stmt);
2582 src = gimple_assign_rhs1 (src_stmt);
2583 tgt = gimple_assign_lhs (cur_stmt);
2585 /* Need to load the value from memory first. */
2586 if (n->base_addr)
2588 gimple_stmt_iterator gsi_ins = gsi_for_stmt (src_stmt);
2589 tree addr_expr, addr_tmp, val_expr, val_tmp;
2590 tree load_offset_ptr, aligned_load_type;
2591 gimple *addr_stmt, *load_stmt;
2592 unsigned align;
2593 HOST_WIDE_INT load_offset = 0;
2595 align = get_object_alignment (src);
2596 /* If the new access is smaller than the original one, we need
2597 to perform big endian adjustment. */
2598 if (BYTES_BIG_ENDIAN)
2600 HOST_WIDE_INT bitsize, bitpos;
2601 machine_mode mode;
2602 int unsignedp, reversep, volatilep;
2603 tree offset;
2605 get_inner_reference (src, &bitsize, &bitpos, &offset, &mode,
2606 &unsignedp, &reversep, &volatilep, false);
2607 if (n->range < (unsigned HOST_WIDE_INT) bitsize)
2609 load_offset = (bitsize - n->range) / BITS_PER_UNIT;
2610 unsigned HOST_WIDE_INT l
2611 = (load_offset * BITS_PER_UNIT) & (align - 1);
2612 if (l)
2613 align = l & -l;
2617 if (bswap
2618 && align < GET_MODE_ALIGNMENT (TYPE_MODE (load_type))
2619 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
2620 return false;
2622 /* Move cur_stmt just before one of the load of the original
2623 to ensure it has the same VUSE. See PR61517 for what could
2624 go wrong. */
2625 gsi_move_before (&gsi, &gsi_ins);
2626 gsi = gsi_for_stmt (cur_stmt);
2628 /* Compute address to load from and cast according to the size
2629 of the load. */
2630 addr_expr = build_fold_addr_expr (unshare_expr (src));
2631 if (is_gimple_mem_ref_addr (addr_expr))
2632 addr_tmp = addr_expr;
2633 else
2635 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2636 "load_src");
2637 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2638 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2641 /* Perform the load. */
2642 aligned_load_type = load_type;
2643 if (align < TYPE_ALIGN (load_type))
2644 aligned_load_type = build_aligned_type (load_type, align);
2645 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2646 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2647 load_offset_ptr);
2649 if (!bswap)
2651 if (n->range == 16)
2652 nop_stats.found_16bit++;
2653 else if (n->range == 32)
2654 nop_stats.found_32bit++;
2655 else
2657 gcc_assert (n->range == 64);
2658 nop_stats.found_64bit++;
2661 /* Convert the result of load if necessary. */
2662 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2664 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2665 "load_dst");
2666 load_stmt = gimple_build_assign (val_tmp, val_expr);
2667 gimple_set_vuse (load_stmt, n->vuse);
2668 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2669 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2671 else
2673 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2674 gimple_set_vuse (cur_stmt, n->vuse);
2676 update_stmt (cur_stmt);
2678 if (dump_file)
2680 fprintf (dump_file,
2681 "%d bit load in target endianness found at: ",
2682 (int) n->range);
2683 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2685 return true;
2687 else
2689 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2690 load_stmt = gimple_build_assign (val_tmp, val_expr);
2691 gimple_set_vuse (load_stmt, n->vuse);
2692 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2694 src = val_tmp;
2697 if (n->range == 16)
2698 bswap_stats.found_16bit++;
2699 else if (n->range == 32)
2700 bswap_stats.found_32bit++;
2701 else
2703 gcc_assert (n->range == 64);
2704 bswap_stats.found_64bit++;
2707 tmp = src;
2709 /* Convert the src expression if necessary. */
2710 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2712 gimple *convert_stmt;
2714 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2715 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2716 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2719 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2720 are considered as rotation of 2N bit values by N bits is generally not
2721 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2722 gives 0x03040102 while a bswap for that value is 0x04030201. */
2723 if (bswap && n->range == 16)
2725 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2726 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2727 bswap_stmt = gimple_build_assign (NULL, src);
2729 else
2730 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2732 tmp = tgt;
2734 /* Convert the result if necessary. */
2735 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2737 gimple *convert_stmt;
2739 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2740 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2741 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2744 gimple_set_lhs (bswap_stmt, tmp);
2746 if (dump_file)
2748 fprintf (dump_file, "%d bit bswap implementation found at: ",
2749 (int) n->range);
2750 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2753 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2754 gsi_remove (&gsi, true);
2755 return true;
2758 /* Find manual byte swap implementations as well as load in a given
2759 endianness. Byte swaps are turned into a bswap builtin invokation
2760 while endian loads are converted to bswap builtin invokation or
2761 simple load according to the target endianness. */
2763 unsigned int
2764 pass_optimize_bswap::execute (function *fun)
2766 basic_block bb;
2767 bool bswap32_p, bswap64_p;
2768 bool changed = false;
2769 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2771 if (BITS_PER_UNIT != 8)
2772 return 0;
2774 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2775 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2776 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2777 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2778 || (bswap32_p && word_mode == SImode)));
2780 /* Determine the argument type of the builtins. The code later on
2781 assumes that the return and argument type are the same. */
2782 if (bswap32_p)
2784 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2785 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2788 if (bswap64_p)
2790 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2791 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2794 memset (&nop_stats, 0, sizeof (nop_stats));
2795 memset (&bswap_stats, 0, sizeof (bswap_stats));
2797 FOR_EACH_BB_FN (bb, fun)
2799 gimple_stmt_iterator gsi;
2801 /* We do a reverse scan for bswap patterns to make sure we get the
2802 widest match. As bswap pattern matching doesn't handle previously
2803 inserted smaller bswap replacements as sub-patterns, the wider
2804 variant wouldn't be detected. */
2805 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2807 gimple *src_stmt, *cur_stmt = gsi_stmt (gsi);
2808 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2809 enum tree_code code;
2810 struct symbolic_number n;
2811 bool bswap;
2813 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2814 might be moved to a different basic block by bswap_replace and gsi
2815 must not points to it if that's the case. Moving the gsi_prev
2816 there make sure that gsi points to the statement previous to
2817 cur_stmt while still making sure that all statements are
2818 considered in this basic block. */
2819 gsi_prev (&gsi);
2821 if (!is_gimple_assign (cur_stmt))
2822 continue;
2824 code = gimple_assign_rhs_code (cur_stmt);
2825 switch (code)
2827 case LROTATE_EXPR:
2828 case RROTATE_EXPR:
2829 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2830 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2831 % BITS_PER_UNIT)
2832 continue;
2833 /* Fall through. */
2834 case BIT_IOR_EXPR:
2835 break;
2836 default:
2837 continue;
2840 src_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2842 if (!src_stmt)
2843 continue;
2845 switch (n.range)
2847 case 16:
2848 /* Already in canonical form, nothing to do. */
2849 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2850 continue;
2851 load_type = bswap_type = uint16_type_node;
2852 break;
2853 case 32:
2854 load_type = uint32_type_node;
2855 if (bswap32_p)
2857 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2858 bswap_type = bswap32_type;
2860 break;
2861 case 64:
2862 load_type = uint64_type_node;
2863 if (bswap64_p)
2865 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2866 bswap_type = bswap64_type;
2868 break;
2869 default:
2870 continue;
2873 if (bswap && !fndecl && n.range != 16)
2874 continue;
2876 if (bswap_replace (cur_stmt, src_stmt, fndecl, bswap_type, load_type,
2877 &n, bswap))
2878 changed = true;
2882 statistics_counter_event (fun, "16-bit nop implementations found",
2883 nop_stats.found_16bit);
2884 statistics_counter_event (fun, "32-bit nop implementations found",
2885 nop_stats.found_32bit);
2886 statistics_counter_event (fun, "64-bit nop implementations found",
2887 nop_stats.found_64bit);
2888 statistics_counter_event (fun, "16-bit bswap implementations found",
2889 bswap_stats.found_16bit);
2890 statistics_counter_event (fun, "32-bit bswap implementations found",
2891 bswap_stats.found_32bit);
2892 statistics_counter_event (fun, "64-bit bswap implementations found",
2893 bswap_stats.found_64bit);
2895 return (changed ? TODO_update_ssa : 0);
2898 } // anon namespace
2900 gimple_opt_pass *
2901 make_pass_optimize_bswap (gcc::context *ctxt)
2903 return new pass_optimize_bswap (ctxt);
2906 /* Return true if stmt is a type conversion operation that can be stripped
2907 when used in a widening multiply operation. */
2908 static bool
2909 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2911 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2913 if (TREE_CODE (result_type) == INTEGER_TYPE)
2915 tree op_type;
2916 tree inner_op_type;
2918 if (!CONVERT_EXPR_CODE_P (rhs_code))
2919 return false;
2921 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2923 /* If the type of OP has the same precision as the result, then
2924 we can strip this conversion. The multiply operation will be
2925 selected to create the correct extension as a by-product. */
2926 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2927 return true;
2929 /* We can also strip a conversion if it preserves the signed-ness of
2930 the operation and doesn't narrow the range. */
2931 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2933 /* If the inner-most type is unsigned, then we can strip any
2934 intermediate widening operation. If it's signed, then the
2935 intermediate widening operation must also be signed. */
2936 if ((TYPE_UNSIGNED (inner_op_type)
2937 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2938 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2939 return true;
2941 return false;
2944 return rhs_code == FIXED_CONVERT_EXPR;
2947 /* Return true if RHS is a suitable operand for a widening multiplication,
2948 assuming a target type of TYPE.
2949 There are two cases:
2951 - RHS makes some value at least twice as wide. Store that value
2952 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2954 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2955 but leave *TYPE_OUT untouched. */
2957 static bool
2958 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2959 tree *new_rhs_out)
2961 gimple *stmt;
2962 tree type1, rhs1;
2964 if (TREE_CODE (rhs) == SSA_NAME)
2966 stmt = SSA_NAME_DEF_STMT (rhs);
2967 if (is_gimple_assign (stmt))
2969 if (! widening_mult_conversion_strippable_p (type, stmt))
2970 rhs1 = rhs;
2971 else
2973 rhs1 = gimple_assign_rhs1 (stmt);
2975 if (TREE_CODE (rhs1) == INTEGER_CST)
2977 *new_rhs_out = rhs1;
2978 *type_out = NULL;
2979 return true;
2983 else
2984 rhs1 = rhs;
2986 type1 = TREE_TYPE (rhs1);
2988 if (TREE_CODE (type1) != TREE_CODE (type)
2989 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2990 return false;
2992 *new_rhs_out = rhs1;
2993 *type_out = type1;
2994 return true;
2997 if (TREE_CODE (rhs) == INTEGER_CST)
2999 *new_rhs_out = rhs;
3000 *type_out = NULL;
3001 return true;
3004 return false;
3007 /* Return true if STMT performs a widening multiplication, assuming the
3008 output type is TYPE. If so, store the unwidened types of the operands
3009 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3010 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3011 and *TYPE2_OUT would give the operands of the multiplication. */
3013 static bool
3014 is_widening_mult_p (gimple *stmt,
3015 tree *type1_out, tree *rhs1_out,
3016 tree *type2_out, tree *rhs2_out)
3018 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3020 if (TREE_CODE (type) != INTEGER_TYPE
3021 && TREE_CODE (type) != FIXED_POINT_TYPE)
3022 return false;
3024 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3025 rhs1_out))
3026 return false;
3028 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3029 rhs2_out))
3030 return false;
3032 if (*type1_out == NULL)
3034 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3035 return false;
3036 *type1_out = *type2_out;
3039 if (*type2_out == NULL)
3041 if (!int_fits_type_p (*rhs2_out, *type1_out))
3042 return false;
3043 *type2_out = *type1_out;
3046 /* Ensure that the larger of the two operands comes first. */
3047 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3049 std::swap (*type1_out, *type2_out);
3050 std::swap (*rhs1_out, *rhs2_out);
3053 return true;
3056 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3057 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3058 value is true iff we converted the statement. */
3060 static bool
3061 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3063 tree lhs, rhs1, rhs2, type, type1, type2;
3064 enum insn_code handler;
3065 machine_mode to_mode, from_mode, actual_mode;
3066 optab op;
3067 int actual_precision;
3068 location_t loc = gimple_location (stmt);
3069 bool from_unsigned1, from_unsigned2;
3071 lhs = gimple_assign_lhs (stmt);
3072 type = TREE_TYPE (lhs);
3073 if (TREE_CODE (type) != INTEGER_TYPE)
3074 return false;
3076 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3077 return false;
3079 to_mode = TYPE_MODE (type);
3080 from_mode = TYPE_MODE (type1);
3081 from_unsigned1 = TYPE_UNSIGNED (type1);
3082 from_unsigned2 = TYPE_UNSIGNED (type2);
3084 if (from_unsigned1 && from_unsigned2)
3085 op = umul_widen_optab;
3086 else if (!from_unsigned1 && !from_unsigned2)
3087 op = smul_widen_optab;
3088 else
3089 op = usmul_widen_optab;
3091 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3092 0, &actual_mode);
3094 if (handler == CODE_FOR_nothing)
3096 if (op != smul_widen_optab)
3098 /* We can use a signed multiply with unsigned types as long as
3099 there is a wider mode to use, or it is the smaller of the two
3100 types that is unsigned. Note that type1 >= type2, always. */
3101 if ((TYPE_UNSIGNED (type1)
3102 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3103 || (TYPE_UNSIGNED (type2)
3104 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3106 from_mode = GET_MODE_WIDER_MODE (from_mode);
3107 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3108 return false;
3111 op = smul_widen_optab;
3112 handler = find_widening_optab_handler_and_mode (op, to_mode,
3113 from_mode, 0,
3114 &actual_mode);
3116 if (handler == CODE_FOR_nothing)
3117 return false;
3119 from_unsigned1 = from_unsigned2 = false;
3121 else
3122 return false;
3125 /* Ensure that the inputs to the handler are in the correct precison
3126 for the opcode. This will be the full mode size. */
3127 actual_precision = GET_MODE_PRECISION (actual_mode);
3128 if (2 * actual_precision > TYPE_PRECISION (type))
3129 return false;
3130 if (actual_precision != TYPE_PRECISION (type1)
3131 || from_unsigned1 != TYPE_UNSIGNED (type1))
3132 rhs1 = build_and_insert_cast (gsi, loc,
3133 build_nonstandard_integer_type
3134 (actual_precision, from_unsigned1), rhs1);
3135 if (actual_precision != TYPE_PRECISION (type2)
3136 || from_unsigned2 != TYPE_UNSIGNED (type2))
3137 rhs2 = build_and_insert_cast (gsi, loc,
3138 build_nonstandard_integer_type
3139 (actual_precision, from_unsigned2), rhs2);
3141 /* Handle constants. */
3142 if (TREE_CODE (rhs1) == INTEGER_CST)
3143 rhs1 = fold_convert (type1, rhs1);
3144 if (TREE_CODE (rhs2) == INTEGER_CST)
3145 rhs2 = fold_convert (type2, rhs2);
3147 gimple_assign_set_rhs1 (stmt, rhs1);
3148 gimple_assign_set_rhs2 (stmt, rhs2);
3149 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3150 update_stmt (stmt);
3151 widen_mul_stats.widen_mults_inserted++;
3152 return true;
3155 /* Process a single gimple statement STMT, which is found at the
3156 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3157 rhs (given by CODE), and try to convert it into a
3158 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3159 is true iff we converted the statement. */
3161 static bool
3162 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3163 enum tree_code code)
3165 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3166 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3167 tree type, type1, type2, optype;
3168 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3169 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3170 optab this_optab;
3171 enum tree_code wmult_code;
3172 enum insn_code handler;
3173 machine_mode to_mode, from_mode, actual_mode;
3174 location_t loc = gimple_location (stmt);
3175 int actual_precision;
3176 bool from_unsigned1, from_unsigned2;
3178 lhs = gimple_assign_lhs (stmt);
3179 type = TREE_TYPE (lhs);
3180 if (TREE_CODE (type) != INTEGER_TYPE
3181 && TREE_CODE (type) != FIXED_POINT_TYPE)
3182 return false;
3184 if (code == MINUS_EXPR)
3185 wmult_code = WIDEN_MULT_MINUS_EXPR;
3186 else
3187 wmult_code = WIDEN_MULT_PLUS_EXPR;
3189 rhs1 = gimple_assign_rhs1 (stmt);
3190 rhs2 = gimple_assign_rhs2 (stmt);
3192 if (TREE_CODE (rhs1) == SSA_NAME)
3194 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3195 if (is_gimple_assign (rhs1_stmt))
3196 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3199 if (TREE_CODE (rhs2) == SSA_NAME)
3201 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3202 if (is_gimple_assign (rhs2_stmt))
3203 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3206 /* Allow for one conversion statement between the multiply
3207 and addition/subtraction statement. If there are more than
3208 one conversions then we assume they would invalidate this
3209 transformation. If that's not the case then they should have
3210 been folded before now. */
3211 if (CONVERT_EXPR_CODE_P (rhs1_code))
3213 conv1_stmt = rhs1_stmt;
3214 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3215 if (TREE_CODE (rhs1) == SSA_NAME)
3217 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3218 if (is_gimple_assign (rhs1_stmt))
3219 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3221 else
3222 return false;
3224 if (CONVERT_EXPR_CODE_P (rhs2_code))
3226 conv2_stmt = rhs2_stmt;
3227 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3228 if (TREE_CODE (rhs2) == SSA_NAME)
3230 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3231 if (is_gimple_assign (rhs2_stmt))
3232 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3234 else
3235 return false;
3238 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3239 is_widening_mult_p, but we still need the rhs returns.
3241 It might also appear that it would be sufficient to use the existing
3242 operands of the widening multiply, but that would limit the choice of
3243 multiply-and-accumulate instructions.
3245 If the widened-multiplication result has more than one uses, it is
3246 probably wiser not to do the conversion. */
3247 if (code == PLUS_EXPR
3248 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3250 if (!has_single_use (rhs1)
3251 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3252 &type2, &mult_rhs2))
3253 return false;
3254 add_rhs = rhs2;
3255 conv_stmt = conv1_stmt;
3257 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3259 if (!has_single_use (rhs2)
3260 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3261 &type2, &mult_rhs2))
3262 return false;
3263 add_rhs = rhs1;
3264 conv_stmt = conv2_stmt;
3266 else
3267 return false;
3269 to_mode = TYPE_MODE (type);
3270 from_mode = TYPE_MODE (type1);
3271 from_unsigned1 = TYPE_UNSIGNED (type1);
3272 from_unsigned2 = TYPE_UNSIGNED (type2);
3273 optype = type1;
3275 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3276 if (from_unsigned1 != from_unsigned2)
3278 if (!INTEGRAL_TYPE_P (type))
3279 return false;
3280 /* We can use a signed multiply with unsigned types as long as
3281 there is a wider mode to use, or it is the smaller of the two
3282 types that is unsigned. Note that type1 >= type2, always. */
3283 if ((from_unsigned1
3284 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3285 || (from_unsigned2
3286 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3288 from_mode = GET_MODE_WIDER_MODE (from_mode);
3289 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3290 return false;
3293 from_unsigned1 = from_unsigned2 = false;
3294 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3295 false);
3298 /* If there was a conversion between the multiply and addition
3299 then we need to make sure it fits a multiply-and-accumulate.
3300 The should be a single mode change which does not change the
3301 value. */
3302 if (conv_stmt)
3304 /* We use the original, unmodified data types for this. */
3305 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3306 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3307 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3308 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3310 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3312 /* Conversion is a truncate. */
3313 if (TYPE_PRECISION (to_type) < data_size)
3314 return false;
3316 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3318 /* Conversion is an extend. Check it's the right sort. */
3319 if (TYPE_UNSIGNED (from_type) != is_unsigned
3320 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3321 return false;
3323 /* else convert is a no-op for our purposes. */
3326 /* Verify that the machine can perform a widening multiply
3327 accumulate in this mode/signedness combination, otherwise
3328 this transformation is likely to pessimize code. */
3329 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3330 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3331 from_mode, 0, &actual_mode);
3333 if (handler == CODE_FOR_nothing)
3334 return false;
3336 /* Ensure that the inputs to the handler are in the correct precison
3337 for the opcode. This will be the full mode size. */
3338 actual_precision = GET_MODE_PRECISION (actual_mode);
3339 if (actual_precision != TYPE_PRECISION (type1)
3340 || from_unsigned1 != TYPE_UNSIGNED (type1))
3341 mult_rhs1 = build_and_insert_cast (gsi, loc,
3342 build_nonstandard_integer_type
3343 (actual_precision, from_unsigned1),
3344 mult_rhs1);
3345 if (actual_precision != TYPE_PRECISION (type2)
3346 || from_unsigned2 != TYPE_UNSIGNED (type2))
3347 mult_rhs2 = build_and_insert_cast (gsi, loc,
3348 build_nonstandard_integer_type
3349 (actual_precision, from_unsigned2),
3350 mult_rhs2);
3352 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3353 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3355 /* Handle constants. */
3356 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3357 mult_rhs1 = fold_convert (type1, mult_rhs1);
3358 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3359 mult_rhs2 = fold_convert (type2, mult_rhs2);
3361 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3362 add_rhs);
3363 update_stmt (gsi_stmt (*gsi));
3364 widen_mul_stats.maccs_inserted++;
3365 return true;
3368 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3369 with uses in additions and subtractions to form fused multiply-add
3370 operations. Returns true if successful and MUL_STMT should be removed. */
3372 static bool
3373 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3375 tree mul_result = gimple_get_lhs (mul_stmt);
3376 tree type = TREE_TYPE (mul_result);
3377 gimple *use_stmt, *neguse_stmt;
3378 gassign *fma_stmt;
3379 use_operand_p use_p;
3380 imm_use_iterator imm_iter;
3382 if (FLOAT_TYPE_P (type)
3383 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3384 return false;
3386 /* We don't want to do bitfield reduction ops. */
3387 if (INTEGRAL_TYPE_P (type)
3388 && (TYPE_PRECISION (type)
3389 != GET_MODE_PRECISION (TYPE_MODE (type))))
3390 return false;
3392 /* If the target doesn't support it, don't generate it. We assume that
3393 if fma isn't available then fms, fnma or fnms are not either. */
3394 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3395 return false;
3397 /* If the multiplication has zero uses, it is kept around probably because
3398 of -fnon-call-exceptions. Don't optimize it away in that case,
3399 it is DCE job. */
3400 if (has_zero_uses (mul_result))
3401 return false;
3403 /* Make sure that the multiplication statement becomes dead after
3404 the transformation, thus that all uses are transformed to FMAs.
3405 This means we assume that an FMA operation has the same cost
3406 as an addition. */
3407 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3409 enum tree_code use_code;
3410 tree result = mul_result;
3411 bool negate_p = false;
3413 use_stmt = USE_STMT (use_p);
3415 if (is_gimple_debug (use_stmt))
3416 continue;
3418 /* For now restrict this operations to single basic blocks. In theory
3419 we would want to support sinking the multiplication in
3420 m = a*b;
3421 if ()
3422 ma = m + c;
3423 else
3424 d = m;
3425 to form a fma in the then block and sink the multiplication to the
3426 else block. */
3427 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3428 return false;
3430 if (!is_gimple_assign (use_stmt))
3431 return false;
3433 use_code = gimple_assign_rhs_code (use_stmt);
3435 /* A negate on the multiplication leads to FNMA. */
3436 if (use_code == NEGATE_EXPR)
3438 ssa_op_iter iter;
3439 use_operand_p usep;
3441 result = gimple_assign_lhs (use_stmt);
3443 /* Make sure the negate statement becomes dead with this
3444 single transformation. */
3445 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3446 &use_p, &neguse_stmt))
3447 return false;
3449 /* Make sure the multiplication isn't also used on that stmt. */
3450 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3451 if (USE_FROM_PTR (usep) == mul_result)
3452 return false;
3454 /* Re-validate. */
3455 use_stmt = neguse_stmt;
3456 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3457 return false;
3458 if (!is_gimple_assign (use_stmt))
3459 return false;
3461 use_code = gimple_assign_rhs_code (use_stmt);
3462 negate_p = true;
3465 switch (use_code)
3467 case MINUS_EXPR:
3468 if (gimple_assign_rhs2 (use_stmt) == result)
3469 negate_p = !negate_p;
3470 break;
3471 case PLUS_EXPR:
3472 break;
3473 default:
3474 /* FMA can only be formed from PLUS and MINUS. */
3475 return false;
3478 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3479 by a MULT_EXPR that we'll visit later, we might be able to
3480 get a more profitable match with fnma.
3481 OTOH, if we don't, a negate / fma pair has likely lower latency
3482 that a mult / subtract pair. */
3483 if (use_code == MINUS_EXPR && !negate_p
3484 && gimple_assign_rhs1 (use_stmt) == result
3485 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3486 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3488 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3490 if (TREE_CODE (rhs2) == SSA_NAME)
3492 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3493 if (has_single_use (rhs2)
3494 && is_gimple_assign (stmt2)
3495 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3496 return false;
3500 /* We can't handle a * b + a * b. */
3501 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3502 return false;
3504 /* While it is possible to validate whether or not the exact form
3505 that we've recognized is available in the backend, the assumption
3506 is that the transformation is never a loss. For instance, suppose
3507 the target only has the plain FMA pattern available. Consider
3508 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3509 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3510 still have 3 operations, but in the FMA form the two NEGs are
3511 independent and could be run in parallel. */
3514 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3516 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3517 enum tree_code use_code;
3518 tree addop, mulop1 = op1, result = mul_result;
3519 bool negate_p = false;
3521 if (is_gimple_debug (use_stmt))
3522 continue;
3524 use_code = gimple_assign_rhs_code (use_stmt);
3525 if (use_code == NEGATE_EXPR)
3527 result = gimple_assign_lhs (use_stmt);
3528 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3529 gsi_remove (&gsi, true);
3530 release_defs (use_stmt);
3532 use_stmt = neguse_stmt;
3533 gsi = gsi_for_stmt (use_stmt);
3534 use_code = gimple_assign_rhs_code (use_stmt);
3535 negate_p = true;
3538 if (gimple_assign_rhs1 (use_stmt) == result)
3540 addop = gimple_assign_rhs2 (use_stmt);
3541 /* a * b - c -> a * b + (-c) */
3542 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3543 addop = force_gimple_operand_gsi (&gsi,
3544 build1 (NEGATE_EXPR,
3545 type, addop),
3546 true, NULL_TREE, true,
3547 GSI_SAME_STMT);
3549 else
3551 addop = gimple_assign_rhs1 (use_stmt);
3552 /* a - b * c -> (-b) * c + a */
3553 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3554 negate_p = !negate_p;
3557 if (negate_p)
3558 mulop1 = force_gimple_operand_gsi (&gsi,
3559 build1 (NEGATE_EXPR,
3560 type, mulop1),
3561 true, NULL_TREE, true,
3562 GSI_SAME_STMT);
3564 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3565 FMA_EXPR, mulop1, op2, addop);
3566 gsi_replace (&gsi, fma_stmt, true);
3567 widen_mul_stats.fmas_inserted++;
3570 return true;
3574 /* Helper function of match_uaddsub_overflow. Return 1
3575 if USE_STMT is unsigned overflow check ovf != 0 for
3576 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3577 and 0 otherwise. */
3579 static int
3580 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3582 enum tree_code ccode = ERROR_MARK;
3583 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3584 if (gimple_code (use_stmt) == GIMPLE_COND)
3586 ccode = gimple_cond_code (use_stmt);
3587 crhs1 = gimple_cond_lhs (use_stmt);
3588 crhs2 = gimple_cond_rhs (use_stmt);
3590 else if (is_gimple_assign (use_stmt))
3592 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3594 ccode = gimple_assign_rhs_code (use_stmt);
3595 crhs1 = gimple_assign_rhs1 (use_stmt);
3596 crhs2 = gimple_assign_rhs2 (use_stmt);
3598 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3600 tree cond = gimple_assign_rhs1 (use_stmt);
3601 if (COMPARISON_CLASS_P (cond))
3603 ccode = TREE_CODE (cond);
3604 crhs1 = TREE_OPERAND (cond, 0);
3605 crhs2 = TREE_OPERAND (cond, 1);
3607 else
3608 return 0;
3610 else
3611 return 0;
3613 else
3614 return 0;
3616 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3617 return 0;
3619 enum tree_code code = gimple_assign_rhs_code (stmt);
3620 tree lhs = gimple_assign_lhs (stmt);
3621 tree rhs1 = gimple_assign_rhs1 (stmt);
3622 tree rhs2 = gimple_assign_rhs2 (stmt);
3624 switch (ccode)
3626 case GT_EXPR:
3627 case LE_EXPR:
3628 /* r = a - b; r > a or r <= a
3629 r = a + b; a > r or a <= r or b > r or b <= r. */
3630 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3631 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3632 && crhs2 == lhs))
3633 return ccode == GT_EXPR ? 1 : -1;
3634 break;
3635 case LT_EXPR:
3636 case GE_EXPR:
3637 /* r = a - b; a < r or a >= r
3638 r = a + b; r < a or r >= a or r < b or r >= b. */
3639 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3640 || (code == PLUS_EXPR && crhs1 == lhs
3641 && (crhs2 == rhs1 || crhs2 == rhs2)))
3642 return ccode == LT_EXPR ? 1 : -1;
3643 break;
3644 default:
3645 break;
3647 return 0;
3650 /* Recognize for unsigned x
3651 x = y - z;
3652 if (x > y)
3653 where there are other uses of x and replace it with
3654 _7 = SUB_OVERFLOW (y, z);
3655 x = REALPART_EXPR <_7>;
3656 _8 = IMAGPART_EXPR <_7>;
3657 if (_8)
3658 and similarly for addition. */
3660 static bool
3661 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3662 enum tree_code code)
3664 tree lhs = gimple_assign_lhs (stmt);
3665 tree type = TREE_TYPE (lhs);
3666 use_operand_p use_p;
3667 imm_use_iterator iter;
3668 bool use_seen = false;
3669 bool ovf_use_seen = false;
3670 gimple *use_stmt;
3672 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3673 if (!INTEGRAL_TYPE_P (type)
3674 || !TYPE_UNSIGNED (type)
3675 || has_zero_uses (lhs)
3676 || has_single_use (lhs)
3677 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3678 TYPE_MODE (type)) == CODE_FOR_nothing)
3679 return false;
3681 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3683 use_stmt = USE_STMT (use_p);
3684 if (is_gimple_debug (use_stmt))
3685 continue;
3687 if (uaddsub_overflow_check_p (stmt, use_stmt))
3688 ovf_use_seen = true;
3689 else
3690 use_seen = true;
3691 if (ovf_use_seen && use_seen)
3692 break;
3695 if (!ovf_use_seen || !use_seen)
3696 return false;
3698 tree ctype = build_complex_type (type);
3699 tree rhs1 = gimple_assign_rhs1 (stmt);
3700 tree rhs2 = gimple_assign_rhs2 (stmt);
3701 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3702 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3703 2, rhs1, rhs2);
3704 tree ctmp = make_ssa_name (ctype);
3705 gimple_call_set_lhs (g, ctmp);
3706 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3707 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3708 build1 (REALPART_EXPR, type, ctmp));
3709 gsi_replace (gsi, g2, true);
3710 tree ovf = make_ssa_name (type);
3711 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3712 build1 (IMAGPART_EXPR, type, ctmp));
3713 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3715 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3717 if (is_gimple_debug (use_stmt))
3718 continue;
3720 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3721 if (ovf_use == 0)
3722 continue;
3723 if (gimple_code (use_stmt) == GIMPLE_COND)
3725 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3726 gimple_cond_set_lhs (cond_stmt, ovf);
3727 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3728 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3730 else
3732 gcc_checking_assert (is_gimple_assign (use_stmt));
3733 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3735 gimple_assign_set_rhs1 (use_stmt, ovf);
3736 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3737 gimple_assign_set_rhs_code (use_stmt,
3738 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3740 else
3742 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3743 == COND_EXPR);
3744 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3745 boolean_type_node, ovf,
3746 build_int_cst (type, 0));
3747 gimple_assign_set_rhs1 (use_stmt, cond);
3750 update_stmt (use_stmt);
3752 return true;
3756 /* Find integer multiplications where the operands are extended from
3757 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3758 where appropriate. */
3760 namespace {
3762 const pass_data pass_data_optimize_widening_mul =
3764 GIMPLE_PASS, /* type */
3765 "widening_mul", /* name */
3766 OPTGROUP_NONE, /* optinfo_flags */
3767 TV_NONE, /* tv_id */
3768 PROP_ssa, /* properties_required */
3769 0, /* properties_provided */
3770 0, /* properties_destroyed */
3771 0, /* todo_flags_start */
3772 TODO_update_ssa, /* todo_flags_finish */
3775 class pass_optimize_widening_mul : public gimple_opt_pass
3777 public:
3778 pass_optimize_widening_mul (gcc::context *ctxt)
3779 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3782 /* opt_pass methods: */
3783 virtual bool gate (function *)
3785 return flag_expensive_optimizations && optimize;
3788 virtual unsigned int execute (function *);
3790 }; // class pass_optimize_widening_mul
3792 unsigned int
3793 pass_optimize_widening_mul::execute (function *fun)
3795 basic_block bb;
3796 bool cfg_changed = false;
3798 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3800 FOR_EACH_BB_FN (bb, fun)
3802 gimple_stmt_iterator gsi;
3804 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3806 gimple *stmt = gsi_stmt (gsi);
3807 enum tree_code code;
3809 if (is_gimple_assign (stmt))
3811 code = gimple_assign_rhs_code (stmt);
3812 switch (code)
3814 case MULT_EXPR:
3815 if (!convert_mult_to_widen (stmt, &gsi)
3816 && convert_mult_to_fma (stmt,
3817 gimple_assign_rhs1 (stmt),
3818 gimple_assign_rhs2 (stmt)))
3820 gsi_remove (&gsi, true);
3821 release_defs (stmt);
3822 continue;
3824 break;
3826 case PLUS_EXPR:
3827 case MINUS_EXPR:
3828 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3829 match_uaddsub_overflow (&gsi, stmt, code);
3830 break;
3832 default:;
3835 else if (is_gimple_call (stmt)
3836 && gimple_call_lhs (stmt))
3838 tree fndecl = gimple_call_fndecl (stmt);
3839 if (fndecl
3840 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3842 switch (DECL_FUNCTION_CODE (fndecl))
3844 case BUILT_IN_POWF:
3845 case BUILT_IN_POW:
3846 case BUILT_IN_POWL:
3847 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3848 && real_equal
3849 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3850 &dconst2)
3851 && convert_mult_to_fma (stmt,
3852 gimple_call_arg (stmt, 0),
3853 gimple_call_arg (stmt, 0)))
3855 unlink_stmt_vdef (stmt);
3856 if (gsi_remove (&gsi, true)
3857 && gimple_purge_dead_eh_edges (bb))
3858 cfg_changed = true;
3859 release_defs (stmt);
3860 continue;
3862 break;
3864 default:;
3868 gsi_next (&gsi);
3872 statistics_counter_event (fun, "widening multiplications inserted",
3873 widen_mul_stats.widen_mults_inserted);
3874 statistics_counter_event (fun, "widening maccs inserted",
3875 widen_mul_stats.maccs_inserted);
3876 statistics_counter_event (fun, "fused multiply-adds inserted",
3877 widen_mul_stats.fmas_inserted);
3879 return cfg_changed ? TODO_cleanup_cfg : 0;
3882 } // anon namespace
3884 gimple_opt_pass *
3885 make_pass_optimize_widening_mul (gcc::context *ctxt)
3887 return new pass_optimize_widening_mul (ctxt);