2016-11-10 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blobc315da88ce4feea1196a0416e4ea02e2a75a4377
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
121 division. */
122 struct occurrence {
123 /* The basic block represented by this structure. */
124 basic_block bb;
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
127 inserted in BB. */
128 tree recip_def;
130 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
131 was inserted in BB. */
132 gimple *recip_def_stmt;
134 /* Pointer to a list of "struct occurrence"s for blocks dominated
135 by BB. */
136 struct occurrence *children;
138 /* Pointer to the next "struct occurrence"s in the list of blocks
139 sharing a common dominator. */
140 struct occurrence *next;
142 /* The number of divisions that are in BB before compute_merit. The
143 number of divisions that are in BB or post-dominate it after
144 compute_merit. */
145 int num_divisions;
147 /* True if the basic block has a division, false if it is a common
148 dominator for basic blocks that do. If it is false and trapping
149 math is active, BB is not a candidate for inserting a reciprocal. */
150 bool bb_has_division;
153 static struct
155 /* Number of 1.0/X ops inserted. */
156 int rdivs_inserted;
158 /* Number of 1.0/FUNC ops inserted. */
159 int rfuncs_inserted;
160 } reciprocal_stats;
162 static struct
164 /* Number of cexpi calls inserted. */
165 int inserted;
166 } sincos_stats;
168 static struct
170 /* Number of hand-written 16-bit nop / bswaps found. */
171 int found_16bit;
173 /* Number of hand-written 32-bit nop / bswaps found. */
174 int found_32bit;
176 /* Number of hand-written 64-bit nop / bswaps found. */
177 int found_64bit;
178 } nop_stats, bswap_stats;
180 static struct
182 /* Number of widening multiplication ops inserted. */
183 int widen_mults_inserted;
185 /* Number of integer multiply-and-accumulate ops inserted. */
186 int maccs_inserted;
188 /* Number of fp fused multiply-add ops inserted. */
189 int fmas_inserted;
191 /* Number of divmod calls inserted. */
192 int divmod_calls_inserted;
193 } widen_mul_stats;
195 /* The instance of "struct occurrence" representing the highest
196 interesting block in the dominator tree. */
197 static struct occurrence *occ_head;
199 /* Allocation pool for getting instances of "struct occurrence". */
200 static object_allocator<occurrence> *occ_pool;
204 /* Allocate and return a new struct occurrence for basic block BB, and
205 whose children list is headed by CHILDREN. */
206 static struct occurrence *
207 occ_new (basic_block bb, struct occurrence *children)
209 struct occurrence *occ;
211 bb->aux = occ = occ_pool->allocate ();
212 memset (occ, 0, sizeof (struct occurrence));
214 occ->bb = bb;
215 occ->children = children;
216 return occ;
220 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
221 list of "struct occurrence"s, one per basic block, having IDOM as
222 their common dominator.
224 We try to insert NEW_OCC as deep as possible in the tree, and we also
225 insert any other block that is a common dominator for BB and one
226 block already in the tree. */
228 static void
229 insert_bb (struct occurrence *new_occ, basic_block idom,
230 struct occurrence **p_head)
232 struct occurrence *occ, **p_occ;
234 for (p_occ = p_head; (occ = *p_occ) != NULL; )
236 basic_block bb = new_occ->bb, occ_bb = occ->bb;
237 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
238 if (dom == bb)
240 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
241 from its list. */
242 *p_occ = occ->next;
243 occ->next = new_occ->children;
244 new_occ->children = occ;
246 /* Try the next block (it may as well be dominated by BB). */
249 else if (dom == occ_bb)
251 /* OCC_BB dominates BB. Tail recurse to look deeper. */
252 insert_bb (new_occ, dom, &occ->children);
253 return;
256 else if (dom != idom)
258 gcc_assert (!dom->aux);
260 /* There is a dominator between IDOM and BB, add it and make
261 two children out of NEW_OCC and OCC. First, remove OCC from
262 its list. */
263 *p_occ = occ->next;
264 new_occ->next = occ;
265 occ->next = NULL;
267 /* None of the previous blocks has DOM as a dominator: if we tail
268 recursed, we would reexamine them uselessly. Just switch BB with
269 DOM, and go on looking for blocks dominated by DOM. */
270 new_occ = occ_new (dom, new_occ);
273 else
275 /* Nothing special, go on with the next element. */
276 p_occ = &occ->next;
280 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
281 new_occ->next = *p_head;
282 *p_head = new_occ;
285 /* Register that we found a division in BB. */
287 static inline void
288 register_division_in (basic_block bb)
290 struct occurrence *occ;
292 occ = (struct occurrence *) bb->aux;
293 if (!occ)
295 occ = occ_new (bb, NULL);
296 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
299 occ->bb_has_division = true;
300 occ->num_divisions++;
304 /* Compute the number of divisions that postdominate each block in OCC and
305 its children. */
307 static void
308 compute_merit (struct occurrence *occ)
310 struct occurrence *occ_child;
311 basic_block dom = occ->bb;
313 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
315 basic_block bb;
316 if (occ_child->children)
317 compute_merit (occ_child);
319 if (flag_exceptions)
320 bb = single_noncomplex_succ (dom);
321 else
322 bb = dom;
324 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
325 occ->num_divisions += occ_child->num_divisions;
330 /* Return whether USE_STMT is a floating-point division by DEF. */
331 static inline bool
332 is_division_by (gimple *use_stmt, tree def)
334 return is_gimple_assign (use_stmt)
335 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
336 && gimple_assign_rhs2 (use_stmt) == def
337 /* Do not recognize x / x as valid division, as we are getting
338 confused later by replacing all immediate uses x in such
339 a stmt. */
340 && gimple_assign_rhs1 (use_stmt) != def;
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345 the given basic block. The field may be left NULL, of course,
346 if it is not possible or profitable to do the optimization.
348 DEF_BSI is an iterator pointing at the statement defining DEF.
349 If RECIP_DEF is set, a dominator already has a computation that can
350 be used. */
352 static void
353 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
354 tree def, tree recip_def, int threshold)
356 tree type;
357 gassign *new_stmt;
358 gimple_stmt_iterator gsi;
359 struct occurrence *occ_child;
361 if (!recip_def
362 && (occ->bb_has_division || !flag_trapping_math)
363 && occ->num_divisions >= threshold)
365 /* Make a variable with the replacement and substitute it. */
366 type = TREE_TYPE (def);
367 recip_def = create_tmp_reg (type, "reciptmp");
368 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
369 build_one_cst (type), def);
371 if (occ->bb_has_division)
373 /* Case 1: insert before an existing division. */
374 gsi = gsi_after_labels (occ->bb);
375 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
376 gsi_next (&gsi);
378 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
380 else if (def_gsi && occ->bb == def_gsi->bb)
382 /* Case 2: insert right after the definition. Note that this will
383 never happen if the definition statement can throw, because in
384 that case the sole successor of the statement's basic block will
385 dominate all the uses as well. */
386 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
388 else
390 /* Case 3: insert in a basic block not containing defs/uses. */
391 gsi = gsi_after_labels (occ->bb);
392 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
395 reciprocal_stats.rdivs_inserted++;
397 occ->recip_def_stmt = new_stmt;
400 occ->recip_def = recip_def;
401 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
402 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
407 possible. */
409 static inline void
410 replace_reciprocal (use_operand_p use_p)
412 gimple *use_stmt = USE_STMT (use_p);
413 basic_block bb = gimple_bb (use_stmt);
414 struct occurrence *occ = (struct occurrence *) bb->aux;
416 if (optimize_bb_for_speed_p (bb)
417 && occ->recip_def && use_stmt != occ->recip_def_stmt)
419 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
420 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
421 SET_USE (use_p, occ->recip_def);
422 fold_stmt_inplace (&gsi);
423 update_stmt (use_stmt);
428 /* Free OCC and return one more "struct occurrence" to be freed. */
430 static struct occurrence *
431 free_bb (struct occurrence *occ)
433 struct occurrence *child, *next;
435 /* First get the two pointers hanging off OCC. */
436 next = occ->next;
437 child = occ->children;
438 occ->bb->aux = NULL;
439 occ_pool->remove (occ);
441 /* Now ensure that we don't recurse unless it is necessary. */
442 if (!child)
443 return next;
444 else
446 while (next)
447 next = free_bb (next);
449 return child;
454 /* Look for floating-point divisions among DEF's uses, and try to
455 replace them by multiplications with the reciprocal. Add
456 as many statements computing the reciprocal as needed.
458 DEF must be a GIMPLE register of a floating-point type. */
460 static void
461 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
463 use_operand_p use_p;
464 imm_use_iterator use_iter;
465 struct occurrence *occ;
466 int count = 0, threshold;
468 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
470 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
472 gimple *use_stmt = USE_STMT (use_p);
473 if (is_division_by (use_stmt, def))
475 register_division_in (gimple_bb (use_stmt));
476 count++;
480 /* Do the expensive part only if we can hope to optimize something. */
481 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
482 if (count >= threshold)
484 gimple *use_stmt;
485 for (occ = occ_head; occ; occ = occ->next)
487 compute_merit (occ);
488 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
491 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
493 if (is_division_by (use_stmt, def))
495 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
496 replace_reciprocal (use_p);
501 for (occ = occ_head; occ; )
502 occ = free_bb (occ);
504 occ_head = NULL;
507 /* Return an internal function that implements the reciprocal of CALL,
508 or IFN_LAST if there is no such function that the target supports. */
510 internal_fn
511 internal_fn_reciprocal (gcall *call)
513 internal_fn ifn;
515 switch (gimple_call_combined_fn (call))
517 CASE_CFN_SQRT:
518 ifn = IFN_RSQRT;
519 break;
521 default:
522 return IFN_LAST;
525 tree_pair types = direct_internal_fn_types (ifn, call);
526 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
527 return IFN_LAST;
529 return ifn;
532 /* Go through all the floating-point SSA_NAMEs, and call
533 execute_cse_reciprocals_1 on each of them. */
534 namespace {
536 const pass_data pass_data_cse_reciprocals =
538 GIMPLE_PASS, /* type */
539 "recip", /* name */
540 OPTGROUP_NONE, /* optinfo_flags */
541 TV_NONE, /* tv_id */
542 PROP_ssa, /* properties_required */
543 0, /* properties_provided */
544 0, /* properties_destroyed */
545 0, /* todo_flags_start */
546 TODO_update_ssa, /* todo_flags_finish */
549 class pass_cse_reciprocals : public gimple_opt_pass
551 public:
552 pass_cse_reciprocals (gcc::context *ctxt)
553 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
556 /* opt_pass methods: */
557 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
558 virtual unsigned int execute (function *);
560 }; // class pass_cse_reciprocals
562 unsigned int
563 pass_cse_reciprocals::execute (function *fun)
565 basic_block bb;
566 tree arg;
568 occ_pool = new object_allocator<occurrence> ("dominators for recip");
570 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
571 calculate_dominance_info (CDI_DOMINATORS);
572 calculate_dominance_info (CDI_POST_DOMINATORS);
574 if (flag_checking)
575 FOR_EACH_BB_FN (bb, fun)
576 gcc_assert (!bb->aux);
578 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
579 if (FLOAT_TYPE_P (TREE_TYPE (arg))
580 && is_gimple_reg (arg))
582 tree name = ssa_default_def (fun, arg);
583 if (name)
584 execute_cse_reciprocals_1 (NULL, name);
587 FOR_EACH_BB_FN (bb, fun)
589 tree def;
591 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
592 gsi_next (&gsi))
594 gphi *phi = gsi.phi ();
595 def = PHI_RESULT (phi);
596 if (! virtual_operand_p (def)
597 && FLOAT_TYPE_P (TREE_TYPE (def)))
598 execute_cse_reciprocals_1 (NULL, def);
601 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
602 gsi_next (&gsi))
604 gimple *stmt = gsi_stmt (gsi);
606 if (gimple_has_lhs (stmt)
607 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
608 && FLOAT_TYPE_P (TREE_TYPE (def))
609 && TREE_CODE (def) == SSA_NAME)
610 execute_cse_reciprocals_1 (&gsi, def);
613 if (optimize_bb_for_size_p (bb))
614 continue;
616 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
617 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
618 gsi_next (&gsi))
620 gimple *stmt = gsi_stmt (gsi);
622 if (is_gimple_assign (stmt)
623 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
625 tree arg1 = gimple_assign_rhs2 (stmt);
626 gimple *stmt1;
628 if (TREE_CODE (arg1) != SSA_NAME)
629 continue;
631 stmt1 = SSA_NAME_DEF_STMT (arg1);
633 if (is_gimple_call (stmt1)
634 && gimple_call_lhs (stmt1))
636 bool fail;
637 imm_use_iterator ui;
638 use_operand_p use_p;
639 tree fndecl = NULL_TREE;
641 gcall *call = as_a <gcall *> (stmt1);
642 internal_fn ifn = internal_fn_reciprocal (call);
643 if (ifn == IFN_LAST)
645 fndecl = gimple_call_fndecl (call);
646 if (!fndecl
647 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
648 continue;
649 fndecl = targetm.builtin_reciprocal (fndecl);
650 if (!fndecl)
651 continue;
654 /* Check that all uses of the SSA name are divisions,
655 otherwise replacing the defining statement will do
656 the wrong thing. */
657 fail = false;
658 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
660 gimple *stmt2 = USE_STMT (use_p);
661 if (is_gimple_debug (stmt2))
662 continue;
663 if (!is_gimple_assign (stmt2)
664 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
665 || gimple_assign_rhs1 (stmt2) == arg1
666 || gimple_assign_rhs2 (stmt2) != arg1)
668 fail = true;
669 break;
672 if (fail)
673 continue;
675 gimple_replace_ssa_lhs (call, arg1);
676 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
678 auto_vec<tree, 4> args;
679 for (unsigned int i = 0;
680 i < gimple_call_num_args (call); i++)
681 args.safe_push (gimple_call_arg (call, i));
682 gcall *stmt2;
683 if (ifn == IFN_LAST)
684 stmt2 = gimple_build_call_vec (fndecl, args);
685 else
686 stmt2 = gimple_build_call_internal_vec (ifn, args);
687 gimple_call_set_lhs (stmt2, arg1);
688 if (gimple_vdef (call))
690 gimple_set_vdef (stmt2, gimple_vdef (call));
691 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
693 gimple_set_vuse (stmt2, gimple_vuse (call));
694 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
695 gsi_replace (&gsi2, stmt2, true);
697 else
699 if (ifn == IFN_LAST)
700 gimple_call_set_fndecl (call, fndecl);
701 else
702 gimple_call_set_internal_fn (call, ifn);
703 update_stmt (call);
705 reciprocal_stats.rfuncs_inserted++;
707 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
709 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
710 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
711 fold_stmt_inplace (&gsi);
712 update_stmt (stmt);
719 statistics_counter_event (fun, "reciprocal divs inserted",
720 reciprocal_stats.rdivs_inserted);
721 statistics_counter_event (fun, "reciprocal functions inserted",
722 reciprocal_stats.rfuncs_inserted);
724 free_dominance_info (CDI_DOMINATORS);
725 free_dominance_info (CDI_POST_DOMINATORS);
726 delete occ_pool;
727 return 0;
730 } // anon namespace
732 gimple_opt_pass *
733 make_pass_cse_reciprocals (gcc::context *ctxt)
735 return new pass_cse_reciprocals (ctxt);
738 /* Records an occurrence at statement USE_STMT in the vector of trees
739 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
740 is not yet initialized. Returns true if the occurrence was pushed on
741 the vector. Adjusts *TOP_BB to be the basic block dominating all
742 statements in the vector. */
744 static bool
745 maybe_record_sincos (vec<gimple *> *stmts,
746 basic_block *top_bb, gimple *use_stmt)
748 basic_block use_bb = gimple_bb (use_stmt);
749 if (*top_bb
750 && (*top_bb == use_bb
751 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
752 stmts->safe_push (use_stmt);
753 else if (!*top_bb
754 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
756 stmts->safe_push (use_stmt);
757 *top_bb = use_bb;
759 else
760 return false;
762 return true;
765 /* Look for sin, cos and cexpi calls with the same argument NAME and
766 create a single call to cexpi CSEing the result in this case.
767 We first walk over all immediate uses of the argument collecting
768 statements that we can CSE in a vector and in a second pass replace
769 the statement rhs with a REALPART or IMAGPART expression on the
770 result of the cexpi call we insert before the use statement that
771 dominates all other candidates. */
773 static bool
774 execute_cse_sincos_1 (tree name)
776 gimple_stmt_iterator gsi;
777 imm_use_iterator use_iter;
778 tree fndecl, res, type;
779 gimple *def_stmt, *use_stmt, *stmt;
780 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
781 auto_vec<gimple *> stmts;
782 basic_block top_bb = NULL;
783 int i;
784 bool cfg_changed = false;
786 type = TREE_TYPE (name);
787 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
789 if (gimple_code (use_stmt) != GIMPLE_CALL
790 || !gimple_call_lhs (use_stmt))
791 continue;
793 switch (gimple_call_combined_fn (use_stmt))
795 CASE_CFN_COS:
796 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
797 break;
799 CASE_CFN_SIN:
800 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
801 break;
803 CASE_CFN_CEXPI:
804 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
805 break;
807 default:;
811 if (seen_cos + seen_sin + seen_cexpi <= 1)
812 return false;
814 /* Simply insert cexpi at the beginning of top_bb but not earlier than
815 the name def statement. */
816 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
817 if (!fndecl)
818 return false;
819 stmt = gimple_build_call (fndecl, 1, name);
820 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
821 gimple_call_set_lhs (stmt, res);
823 def_stmt = SSA_NAME_DEF_STMT (name);
824 if (!SSA_NAME_IS_DEFAULT_DEF (name)
825 && gimple_code (def_stmt) != GIMPLE_PHI
826 && gimple_bb (def_stmt) == top_bb)
828 gsi = gsi_for_stmt (def_stmt);
829 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
831 else
833 gsi = gsi_after_labels (top_bb);
834 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
836 sincos_stats.inserted++;
838 /* And adjust the recorded old call sites. */
839 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
841 tree rhs = NULL;
843 switch (gimple_call_combined_fn (use_stmt))
845 CASE_CFN_COS:
846 rhs = fold_build1 (REALPART_EXPR, type, res);
847 break;
849 CASE_CFN_SIN:
850 rhs = fold_build1 (IMAGPART_EXPR, type, res);
851 break;
853 CASE_CFN_CEXPI:
854 rhs = res;
855 break;
857 default:;
858 gcc_unreachable ();
861 /* Replace call with a copy. */
862 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
864 gsi = gsi_for_stmt (use_stmt);
865 gsi_replace (&gsi, stmt, true);
866 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
867 cfg_changed = true;
870 return cfg_changed;
873 /* To evaluate powi(x,n), the floating point value x raised to the
874 constant integer exponent n, we use a hybrid algorithm that
875 combines the "window method" with look-up tables. For an
876 introduction to exponentiation algorithms and "addition chains",
877 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
878 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
879 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
880 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
882 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
883 multiplications to inline before calling the system library's pow
884 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
885 so this default never requires calling pow, powf or powl. */
887 #ifndef POWI_MAX_MULTS
888 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
889 #endif
891 /* The size of the "optimal power tree" lookup table. All
892 exponents less than this value are simply looked up in the
893 powi_table below. This threshold is also used to size the
894 cache of pseudo registers that hold intermediate results. */
895 #define POWI_TABLE_SIZE 256
897 /* The size, in bits of the window, used in the "window method"
898 exponentiation algorithm. This is equivalent to a radix of
899 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
900 #define POWI_WINDOW_SIZE 3
902 /* The following table is an efficient representation of an
903 "optimal power tree". For each value, i, the corresponding
904 value, j, in the table states than an optimal evaluation
905 sequence for calculating pow(x,i) can be found by evaluating
906 pow(x,j)*pow(x,i-j). An optimal power tree for the first
907 100 integers is given in Knuth's "Seminumerical algorithms". */
909 static const unsigned char powi_table[POWI_TABLE_SIZE] =
911 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
912 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
913 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
914 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
915 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
916 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
917 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
918 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
919 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
920 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
921 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
922 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
923 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
924 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
925 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
926 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
927 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
928 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
929 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
930 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
931 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
932 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
933 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
934 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
935 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
936 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
937 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
938 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
939 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
940 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
941 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
942 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
946 /* Return the number of multiplications required to calculate
947 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
948 subroutine of powi_cost. CACHE is an array indicating
949 which exponents have already been calculated. */
951 static int
952 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
954 /* If we've already calculated this exponent, then this evaluation
955 doesn't require any additional multiplications. */
956 if (cache[n])
957 return 0;
959 cache[n] = true;
960 return powi_lookup_cost (n - powi_table[n], cache)
961 + powi_lookup_cost (powi_table[n], cache) + 1;
964 /* Return the number of multiplications required to calculate
965 powi(x,n) for an arbitrary x, given the exponent N. This
966 function needs to be kept in sync with powi_as_mults below. */
968 static int
969 powi_cost (HOST_WIDE_INT n)
971 bool cache[POWI_TABLE_SIZE];
972 unsigned HOST_WIDE_INT digit;
973 unsigned HOST_WIDE_INT val;
974 int result;
976 if (n == 0)
977 return 0;
979 /* Ignore the reciprocal when calculating the cost. */
980 val = (n < 0) ? -n : n;
982 /* Initialize the exponent cache. */
983 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
984 cache[1] = true;
986 result = 0;
988 while (val >= POWI_TABLE_SIZE)
990 if (val & 1)
992 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
993 result += powi_lookup_cost (digit, cache)
994 + POWI_WINDOW_SIZE + 1;
995 val >>= POWI_WINDOW_SIZE;
997 else
999 val >>= 1;
1000 result++;
1004 return result + powi_lookup_cost (val, cache);
1007 /* Recursive subroutine of powi_as_mults. This function takes the
1008 array, CACHE, of already calculated exponents and an exponent N and
1009 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1011 static tree
1012 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1013 HOST_WIDE_INT n, tree *cache)
1015 tree op0, op1, ssa_target;
1016 unsigned HOST_WIDE_INT digit;
1017 gassign *mult_stmt;
1019 if (n < POWI_TABLE_SIZE && cache[n])
1020 return cache[n];
1022 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1024 if (n < POWI_TABLE_SIZE)
1026 cache[n] = ssa_target;
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1030 else if (n & 1)
1032 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1033 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1034 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1036 else
1038 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1039 op1 = op0;
1042 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1043 gimple_set_location (mult_stmt, loc);
1044 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1046 return ssa_target;
1049 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1050 This function needs to be kept in sync with powi_cost above. */
1052 static tree
1053 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1054 tree arg0, HOST_WIDE_INT n)
1056 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1057 gassign *div_stmt;
1058 tree target;
1060 if (n == 0)
1061 return build_real (type, dconst1);
1063 memset (cache, 0, sizeof (cache));
1064 cache[1] = arg0;
1066 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1067 if (n >= 0)
1068 return result;
1070 /* If the original exponent was negative, reciprocate the result. */
1071 target = make_temp_ssa_name (type, NULL, "powmult");
1072 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1073 build_real (type, dconst1), result);
1074 gimple_set_location (div_stmt, loc);
1075 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1077 return target;
1080 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1081 location info LOC. If the arguments are appropriate, create an
1082 equivalent sequence of statements prior to GSI using an optimal
1083 number of multiplications, and return an expession holding the
1084 result. */
1086 static tree
1087 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1088 tree arg0, HOST_WIDE_INT n)
1090 /* Avoid largest negative number. */
1091 if (n != -n
1092 && ((n >= -1 && n <= 2)
1093 || (optimize_function_for_speed_p (cfun)
1094 && powi_cost (n) <= POWI_MAX_MULTS)))
1095 return powi_as_mults (gsi, loc, arg0, n);
1097 return NULL_TREE;
1100 /* Build a gimple call statement that calls FN with argument ARG.
1101 Set the lhs of the call statement to a fresh SSA name. Insert the
1102 statement prior to GSI's current position, and return the fresh
1103 SSA name. */
1105 static tree
1106 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1107 tree fn, tree arg)
1109 gcall *call_stmt;
1110 tree ssa_target;
1112 call_stmt = gimple_build_call (fn, 1, arg);
1113 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1114 gimple_set_lhs (call_stmt, ssa_target);
1115 gimple_set_location (call_stmt, loc);
1116 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1118 return ssa_target;
1121 /* Build a gimple binary operation with the given CODE and arguments
1122 ARG0, ARG1, assigning the result to a new SSA name for variable
1123 TARGET. Insert the statement prior to GSI's current position, and
1124 return the fresh SSA name.*/
1126 static tree
1127 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1128 const char *name, enum tree_code code,
1129 tree arg0, tree arg1)
1131 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1132 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1133 gimple_set_location (stmt, loc);
1134 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1135 return result;
1138 /* Build a gimple reference operation with the given CODE and argument
1139 ARG, assigning the result to a new SSA name of TYPE with NAME.
1140 Insert the statement prior to GSI's current position, and return
1141 the fresh SSA name. */
1143 static inline tree
1144 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1145 const char *name, enum tree_code code, tree arg0)
1147 tree result = make_temp_ssa_name (type, NULL, name);
1148 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1149 gimple_set_location (stmt, loc);
1150 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1151 return result;
1154 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1155 prior to GSI's current position, and return the fresh SSA name. */
1157 static tree
1158 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1159 tree type, tree val)
1161 tree result = make_ssa_name (type);
1162 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1163 gimple_set_location (stmt, loc);
1164 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1165 return result;
1168 struct pow_synth_sqrt_info
1170 bool *factors;
1171 unsigned int deepest;
1172 unsigned int num_mults;
1175 /* Return true iff the real value C can be represented as a
1176 sum of powers of 0.5 up to N. That is:
1177 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1178 Record in INFO the various parameters of the synthesis algorithm such
1179 as the factors a[i], the maximum 0.5 power and the number of
1180 multiplications that will be required. */
1182 bool
1183 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1184 struct pow_synth_sqrt_info *info)
1186 REAL_VALUE_TYPE factor = dconsthalf;
1187 REAL_VALUE_TYPE remainder = c;
1189 info->deepest = 0;
1190 info->num_mults = 0;
1191 memset (info->factors, 0, n * sizeof (bool));
1193 for (unsigned i = 0; i < n; i++)
1195 REAL_VALUE_TYPE res;
1197 /* If something inexact happened bail out now. */
1198 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1199 return false;
1201 /* We have hit zero. The number is representable as a sum
1202 of powers of 0.5. */
1203 if (real_equal (&res, &dconst0))
1205 info->factors[i] = true;
1206 info->deepest = i + 1;
1207 return true;
1209 else if (!REAL_VALUE_NEGATIVE (res))
1211 remainder = res;
1212 info->factors[i] = true;
1213 info->num_mults++;
1215 else
1216 info->factors[i] = false;
1218 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1220 return false;
1223 /* Return the tree corresponding to FN being applied
1224 to ARG N times at GSI and LOC.
1225 Look up previous results from CACHE if need be.
1226 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1228 static tree
1229 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1230 tree fn, location_t loc, tree *cache)
1232 tree res = cache[n];
1233 if (!res)
1235 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1236 res = build_and_insert_call (gsi, loc, fn, prev);
1237 cache[n] = res;
1240 return res;
1243 /* Print to STREAM the repeated application of function FNAME to ARG
1244 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1245 "foo (foo (x))". */
1247 static void
1248 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1249 unsigned int n)
1251 if (n == 0)
1252 fprintf (stream, "%s", arg);
1253 else
1255 fprintf (stream, "%s (", fname);
1256 print_nested_fn (stream, fname, arg, n - 1);
1257 fprintf (stream, ")");
1261 /* Print to STREAM the fractional sequence of sqrt chains
1262 applied to ARG, described by INFO. Used for the dump file. */
1264 static void
1265 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1266 struct pow_synth_sqrt_info *info)
1268 for (unsigned int i = 0; i < info->deepest; i++)
1270 bool is_set = info->factors[i];
1271 if (is_set)
1273 print_nested_fn (stream, "sqrt", arg, i + 1);
1274 if (i != info->deepest - 1)
1275 fprintf (stream, " * ");
1280 /* Print to STREAM a representation of raising ARG to an integer
1281 power N. Used for the dump file. */
1283 static void
1284 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1286 if (n > 1)
1287 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1288 else if (n == 1)
1289 fprintf (stream, "%s", arg);
1292 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1293 square roots. Place at GSI and LOC. Limit the maximum depth
1294 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1295 result of the expanded sequence or NULL_TREE if the expansion failed.
1297 This routine assumes that ARG1 is a real number with a fractional part
1298 (the integer exponent case will have been handled earlier in
1299 gimple_expand_builtin_pow).
1301 For ARG1 > 0.0:
1302 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1303 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1304 FRAC_PART == ARG1 - WHOLE_PART:
1305 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1306 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1307 if it can be expressed as such, that is if FRAC_PART satisfies:
1308 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1309 where integer a[i] is either 0 or 1.
1311 Example:
1312 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1313 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1315 For ARG1 < 0.0 there are two approaches:
1316 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1317 is calculated as above.
1319 Example:
1320 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1321 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1323 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1324 FRAC_PART := ARG1 - WHOLE_PART
1325 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1326 Example:
1327 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1328 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1330 For ARG1 < 0.0 we choose between (A) and (B) depending on
1331 how many multiplications we'd have to do.
1332 So, for the example in (B): POW (x, -5.875), if we were to
1333 follow algorithm (A) we would produce:
1334 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1335 which contains more multiplications than approach (B).
1337 Hopefully, this approach will eliminate potentially expensive POW library
1338 calls when unsafe floating point math is enabled and allow the compiler to
1339 further optimise the multiplies, square roots and divides produced by this
1340 function. */
1342 static tree
1343 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1344 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1346 tree type = TREE_TYPE (arg0);
1347 machine_mode mode = TYPE_MODE (type);
1348 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1349 bool one_over = true;
1351 if (!sqrtfn)
1352 return NULL_TREE;
1354 if (TREE_CODE (arg1) != REAL_CST)
1355 return NULL_TREE;
1357 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1359 gcc_assert (max_depth > 0);
1360 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1362 struct pow_synth_sqrt_info synth_info;
1363 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1364 synth_info.deepest = 0;
1365 synth_info.num_mults = 0;
1367 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1368 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1370 /* The whole and fractional parts of exp. */
1371 REAL_VALUE_TYPE whole_part;
1372 REAL_VALUE_TYPE frac_part;
1374 real_floor (&whole_part, mode, &exp);
1375 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1378 REAL_VALUE_TYPE ceil_whole = dconst0;
1379 REAL_VALUE_TYPE ceil_fract = dconst0;
1381 if (neg_exp)
1383 real_ceil (&ceil_whole, mode, &exp);
1384 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1387 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1388 return NULL_TREE;
1390 /* Check whether it's more profitable to not use 1.0 / ... */
1391 if (neg_exp)
1393 struct pow_synth_sqrt_info alt_synth_info;
1394 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1395 alt_synth_info.deepest = 0;
1396 alt_synth_info.num_mults = 0;
1398 if (representable_as_half_series_p (ceil_fract, max_depth,
1399 &alt_synth_info)
1400 && alt_synth_info.deepest <= synth_info.deepest
1401 && alt_synth_info.num_mults < synth_info.num_mults)
1403 whole_part = ceil_whole;
1404 frac_part = ceil_fract;
1405 synth_info.deepest = alt_synth_info.deepest;
1406 synth_info.num_mults = alt_synth_info.num_mults;
1407 memcpy (synth_info.factors, alt_synth_info.factors,
1408 (max_depth + 1) * sizeof (bool));
1409 one_over = false;
1413 HOST_WIDE_INT n = real_to_integer (&whole_part);
1414 REAL_VALUE_TYPE cint;
1415 real_from_integer (&cint, VOIDmode, n, SIGNED);
1417 if (!real_identical (&whole_part, &cint))
1418 return NULL_TREE;
1420 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1421 return NULL_TREE;
1423 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1425 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1427 /* Calculate the integer part of the exponent. */
1428 if (n > 1)
1430 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1431 if (!integer_res)
1432 return NULL_TREE;
1435 if (dump_file)
1437 char string[64];
1439 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1440 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1442 if (neg_exp)
1444 if (one_over)
1446 fprintf (dump_file, "1.0 / (");
1447 dump_integer_part (dump_file, "x", n);
1448 if (n > 0)
1449 fprintf (dump_file, " * ");
1450 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1451 fprintf (dump_file, ")");
1453 else
1455 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1456 fprintf (dump_file, " / (");
1457 dump_integer_part (dump_file, "x", n);
1458 fprintf (dump_file, ")");
1461 else
1463 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1464 if (n > 0)
1465 fprintf (dump_file, " * ");
1466 dump_integer_part (dump_file, "x", n);
1469 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1473 tree fract_res = NULL_TREE;
1474 cache[0] = arg0;
1476 /* Calculate the fractional part of the exponent. */
1477 for (unsigned i = 0; i < synth_info.deepest; i++)
1479 if (synth_info.factors[i])
1481 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1483 if (!fract_res)
1484 fract_res = sqrt_chain;
1486 else
1487 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1488 fract_res, sqrt_chain);
1492 tree res = NULL_TREE;
1494 if (neg_exp)
1496 if (one_over)
1498 if (n > 0)
1499 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1500 fract_res, integer_res);
1501 else
1502 res = fract_res;
1504 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1505 build_real (type, dconst1), res);
1507 else
1509 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1510 fract_res, integer_res);
1513 else
1514 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1515 fract_res, integer_res);
1516 return res;
1519 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1520 with location info LOC. If possible, create an equivalent and
1521 less expensive sequence of statements prior to GSI, and return an
1522 expession holding the result. */
1524 static tree
1525 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1526 tree arg0, tree arg1)
1528 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1529 REAL_VALUE_TYPE c2, dconst3;
1530 HOST_WIDE_INT n;
1531 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1532 machine_mode mode;
1533 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1534 bool hw_sqrt_exists, c_is_int, c2_is_int;
1536 dconst1_4 = dconst1;
1537 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1539 /* If the exponent isn't a constant, there's nothing of interest
1540 to be done. */
1541 if (TREE_CODE (arg1) != REAL_CST)
1542 return NULL_TREE;
1544 /* Don't perform the operation if flag_signaling_nans is on
1545 and the operand is a signaling NaN. */
1546 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1547 && ((TREE_CODE (arg0) == REAL_CST
1548 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1549 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1550 return NULL_TREE;
1552 /* If the exponent is equivalent to an integer, expand to an optimal
1553 multiplication sequence when profitable. */
1554 c = TREE_REAL_CST (arg1);
1555 n = real_to_integer (&c);
1556 real_from_integer (&cint, VOIDmode, n, SIGNED);
1557 c_is_int = real_identical (&c, &cint);
1559 if (c_is_int
1560 && ((n >= -1 && n <= 2)
1561 || (flag_unsafe_math_optimizations
1562 && speed_p
1563 && powi_cost (n) <= POWI_MAX_MULTS)))
1564 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1566 /* Attempt various optimizations using sqrt and cbrt. */
1567 type = TREE_TYPE (arg0);
1568 mode = TYPE_MODE (type);
1569 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1571 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1572 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1573 sqrt(-0) = -0. */
1574 if (sqrtfn
1575 && real_equal (&c, &dconsthalf)
1576 && !HONOR_SIGNED_ZEROS (mode))
1577 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1579 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1581 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1582 optimizations since 1./3. is not exactly representable. If x
1583 is negative and finite, the correct value of pow(x,1./3.) is
1584 a NaN with the "invalid" exception raised, because the value
1585 of 1./3. actually has an even denominator. The correct value
1586 of cbrt(x) is a negative real value. */
1587 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1588 dconst1_3 = real_value_truncate (mode, dconst_third ());
1590 if (flag_unsafe_math_optimizations
1591 && cbrtfn
1592 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1593 && real_equal (&c, &dconst1_3))
1594 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1596 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1597 if we don't have a hardware sqrt insn. */
1598 dconst1_6 = dconst1_3;
1599 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1601 if (flag_unsafe_math_optimizations
1602 && sqrtfn
1603 && cbrtfn
1604 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1605 && speed_p
1606 && hw_sqrt_exists
1607 && real_equal (&c, &dconst1_6))
1609 /* sqrt(x) */
1610 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1612 /* cbrt(sqrt(x)) */
1613 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1617 /* Attempt to expand the POW as a product of square root chains.
1618 Expand the 0.25 case even when otpimising for size. */
1619 if (flag_unsafe_math_optimizations
1620 && sqrtfn
1621 && hw_sqrt_exists
1622 && (speed_p || real_equal (&c, &dconst1_4))
1623 && !HONOR_SIGNED_ZEROS (mode))
1625 unsigned int max_depth = speed_p
1626 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1627 : 2;
1629 tree expand_with_sqrts
1630 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1632 if (expand_with_sqrts)
1633 return expand_with_sqrts;
1636 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1637 n = real_to_integer (&c2);
1638 real_from_integer (&cint, VOIDmode, n, SIGNED);
1639 c2_is_int = real_identical (&c2, &cint);
1641 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1643 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1644 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1646 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1647 different from pow(x, 1./3.) due to rounding and behavior with
1648 negative x, we need to constrain this transformation to unsafe
1649 math and positive x or finite math. */
1650 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1651 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1652 real_round (&c2, mode, &c2);
1653 n = real_to_integer (&c2);
1654 real_from_integer (&cint, VOIDmode, n, SIGNED);
1655 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1656 real_convert (&c2, mode, &c2);
1658 if (flag_unsafe_math_optimizations
1659 && cbrtfn
1660 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1661 && real_identical (&c2, &c)
1662 && !c2_is_int
1663 && optimize_function_for_speed_p (cfun)
1664 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1666 tree powi_x_ndiv3 = NULL_TREE;
1668 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1669 possible or profitable, give up. Skip the degenerate case when
1670 abs(n) < 3, where the result is always 1. */
1671 if (absu_hwi (n) >= 3)
1673 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1674 abs_hwi (n / 3));
1675 if (!powi_x_ndiv3)
1676 return NULL_TREE;
1679 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1680 as that creates an unnecessary variable. Instead, just produce
1681 either cbrt(x) or cbrt(x) * cbrt(x). */
1682 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1684 if (absu_hwi (n) % 3 == 1)
1685 powi_cbrt_x = cbrt_x;
1686 else
1687 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1688 cbrt_x, cbrt_x);
1690 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1691 if (absu_hwi (n) < 3)
1692 result = powi_cbrt_x;
1693 else
1694 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1695 powi_x_ndiv3, powi_cbrt_x);
1697 /* If n is negative, reciprocate the result. */
1698 if (n < 0)
1699 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1700 build_real (type, dconst1), result);
1702 return result;
1705 /* No optimizations succeeded. */
1706 return NULL_TREE;
1709 /* ARG is the argument to a cabs builtin call in GSI with location info
1710 LOC. Create a sequence of statements prior to GSI that calculates
1711 sqrt(R*R + I*I), where R and I are the real and imaginary components
1712 of ARG, respectively. Return an expression holding the result. */
1714 static tree
1715 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1717 tree real_part, imag_part, addend1, addend2, sum, result;
1718 tree type = TREE_TYPE (TREE_TYPE (arg));
1719 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1720 machine_mode mode = TYPE_MODE (type);
1722 if (!flag_unsafe_math_optimizations
1723 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1724 || !sqrtfn
1725 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1726 return NULL_TREE;
1728 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1729 REALPART_EXPR, arg);
1730 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1731 real_part, real_part);
1732 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1733 IMAGPART_EXPR, arg);
1734 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1735 imag_part, imag_part);
1736 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1737 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1739 return result;
1742 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1743 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1744 an optimal number of multiplies, when n is a constant. */
1746 namespace {
1748 const pass_data pass_data_cse_sincos =
1750 GIMPLE_PASS, /* type */
1751 "sincos", /* name */
1752 OPTGROUP_NONE, /* optinfo_flags */
1753 TV_NONE, /* tv_id */
1754 PROP_ssa, /* properties_required */
1755 PROP_gimple_opt_math, /* properties_provided */
1756 0, /* properties_destroyed */
1757 0, /* todo_flags_start */
1758 TODO_update_ssa, /* todo_flags_finish */
1761 class pass_cse_sincos : public gimple_opt_pass
1763 public:
1764 pass_cse_sincos (gcc::context *ctxt)
1765 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1768 /* opt_pass methods: */
1769 virtual bool gate (function *)
1771 /* We no longer require either sincos or cexp, since powi expansion
1772 piggybacks on this pass. */
1773 return optimize;
1776 virtual unsigned int execute (function *);
1778 }; // class pass_cse_sincos
1780 unsigned int
1781 pass_cse_sincos::execute (function *fun)
1783 basic_block bb;
1784 bool cfg_changed = false;
1786 calculate_dominance_info (CDI_DOMINATORS);
1787 memset (&sincos_stats, 0, sizeof (sincos_stats));
1789 FOR_EACH_BB_FN (bb, fun)
1791 gimple_stmt_iterator gsi;
1792 bool cleanup_eh = false;
1794 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1796 gimple *stmt = gsi_stmt (gsi);
1798 /* Only the last stmt in a bb could throw, no need to call
1799 gimple_purge_dead_eh_edges if we change something in the middle
1800 of a basic block. */
1801 cleanup_eh = false;
1803 if (is_gimple_call (stmt)
1804 && gimple_call_lhs (stmt))
1806 tree arg, arg0, arg1, result;
1807 HOST_WIDE_INT n;
1808 location_t loc;
1810 switch (gimple_call_combined_fn (stmt))
1812 CASE_CFN_COS:
1813 CASE_CFN_SIN:
1814 CASE_CFN_CEXPI:
1815 /* Make sure we have either sincos or cexp. */
1816 if (!targetm.libc_has_function (function_c99_math_complex)
1817 && !targetm.libc_has_function (function_sincos))
1818 break;
1820 arg = gimple_call_arg (stmt, 0);
1821 if (TREE_CODE (arg) == SSA_NAME)
1822 cfg_changed |= execute_cse_sincos_1 (arg);
1823 break;
1825 CASE_CFN_POW:
1826 arg0 = gimple_call_arg (stmt, 0);
1827 arg1 = gimple_call_arg (stmt, 1);
1829 loc = gimple_location (stmt);
1830 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1832 if (result)
1834 tree lhs = gimple_get_lhs (stmt);
1835 gassign *new_stmt = gimple_build_assign (lhs, result);
1836 gimple_set_location (new_stmt, loc);
1837 unlink_stmt_vdef (stmt);
1838 gsi_replace (&gsi, new_stmt, true);
1839 cleanup_eh = true;
1840 if (gimple_vdef (stmt))
1841 release_ssa_name (gimple_vdef (stmt));
1843 break;
1845 CASE_CFN_POWI:
1846 arg0 = gimple_call_arg (stmt, 0);
1847 arg1 = gimple_call_arg (stmt, 1);
1848 loc = gimple_location (stmt);
1850 if (real_minus_onep (arg0))
1852 tree t0, t1, cond, one, minus_one;
1853 gassign *stmt;
1855 t0 = TREE_TYPE (arg0);
1856 t1 = TREE_TYPE (arg1);
1857 one = build_real (t0, dconst1);
1858 minus_one = build_real (t0, dconstm1);
1860 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1861 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1862 arg1, build_int_cst (t1, 1));
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1866 result = make_temp_ssa_name (t0, NULL, "powi");
1867 stmt = gimple_build_assign (result, COND_EXPR, cond,
1868 minus_one, one);
1869 gimple_set_location (stmt, loc);
1870 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1872 else
1874 if (!tree_fits_shwi_p (arg1))
1875 break;
1877 n = tree_to_shwi (arg1);
1878 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1881 if (result)
1883 tree lhs = gimple_get_lhs (stmt);
1884 gassign *new_stmt = gimple_build_assign (lhs, result);
1885 gimple_set_location (new_stmt, loc);
1886 unlink_stmt_vdef (stmt);
1887 gsi_replace (&gsi, new_stmt, true);
1888 cleanup_eh = true;
1889 if (gimple_vdef (stmt))
1890 release_ssa_name (gimple_vdef (stmt));
1892 break;
1894 CASE_CFN_CABS:
1895 arg0 = gimple_call_arg (stmt, 0);
1896 loc = gimple_location (stmt);
1897 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1899 if (result)
1901 tree lhs = gimple_get_lhs (stmt);
1902 gassign *new_stmt = gimple_build_assign (lhs, result);
1903 gimple_set_location (new_stmt, loc);
1904 unlink_stmt_vdef (stmt);
1905 gsi_replace (&gsi, new_stmt, true);
1906 cleanup_eh = true;
1907 if (gimple_vdef (stmt))
1908 release_ssa_name (gimple_vdef (stmt));
1910 break;
1912 default:;
1916 if (cleanup_eh)
1917 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1920 statistics_counter_event (fun, "sincos statements inserted",
1921 sincos_stats.inserted);
1923 return cfg_changed ? TODO_cleanup_cfg : 0;
1926 } // anon namespace
1928 gimple_opt_pass *
1929 make_pass_cse_sincos (gcc::context *ctxt)
1931 return new pass_cse_sincos (ctxt);
1934 /* A symbolic number is used to detect byte permutation and selection
1935 patterns. Therefore the field N contains an artificial number
1936 consisting of octet sized markers:
1938 0 - target byte has the value 0
1939 FF - target byte has an unknown value (eg. due to sign extension)
1940 1..size - marker value is the target byte index minus one.
1942 To detect permutations on memory sources (arrays and structures), a symbolic
1943 number is also associated a base address (the array or structure the load is
1944 made from), an offset from the base address and a range which gives the
1945 difference between the highest and lowest accessed memory location to make
1946 such a symbolic number. The range is thus different from size which reflects
1947 the size of the type of current expression. Note that for non memory source,
1948 range holds the same value as size.
1950 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1951 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1952 still have a size of 2 but this time a range of 1. */
1954 struct symbolic_number {
1955 uint64_t n;
1956 tree type;
1957 tree base_addr;
1958 tree offset;
1959 HOST_WIDE_INT bytepos;
1960 tree alias_set;
1961 tree vuse;
1962 unsigned HOST_WIDE_INT range;
1965 #define BITS_PER_MARKER 8
1966 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1967 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1968 #define HEAD_MARKER(n, size) \
1969 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1971 /* The number which the find_bswap_or_nop_1 result should match in
1972 order to have a nop. The number is masked according to the size of
1973 the symbolic number before using it. */
1974 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1975 (uint64_t)0x08070605 << 32 | 0x04030201)
1977 /* The number which the find_bswap_or_nop_1 result should match in
1978 order to have a byte swap. The number is masked according to the
1979 size of the symbolic number before using it. */
1980 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1981 (uint64_t)0x01020304 << 32 | 0x05060708)
1983 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1984 number N. Return false if the requested operation is not permitted
1985 on a symbolic number. */
1987 static inline bool
1988 do_shift_rotate (enum tree_code code,
1989 struct symbolic_number *n,
1990 int count)
1992 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
1993 unsigned head_marker;
1995 if (count % BITS_PER_UNIT != 0)
1996 return false;
1997 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
1999 /* Zero out the extra bits of N in order to avoid them being shifted
2000 into the significant bits. */
2001 if (size < 64 / BITS_PER_MARKER)
2002 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2004 switch (code)
2006 case LSHIFT_EXPR:
2007 n->n <<= count;
2008 break;
2009 case RSHIFT_EXPR:
2010 head_marker = HEAD_MARKER (n->n, size);
2011 n->n >>= count;
2012 /* Arithmetic shift of signed type: result is dependent on the value. */
2013 if (!TYPE_UNSIGNED (n->type) && head_marker)
2014 for (i = 0; i < count / BITS_PER_MARKER; i++)
2015 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2016 << ((size - 1 - i) * BITS_PER_MARKER);
2017 break;
2018 case LROTATE_EXPR:
2019 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2020 break;
2021 case RROTATE_EXPR:
2022 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2023 break;
2024 default:
2025 return false;
2027 /* Zero unused bits for size. */
2028 if (size < 64 / BITS_PER_MARKER)
2029 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2030 return true;
2033 /* Perform sanity checking for the symbolic number N and the gimple
2034 statement STMT. */
2036 static inline bool
2037 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2039 tree lhs_type;
2041 lhs_type = gimple_expr_type (stmt);
2043 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2044 return false;
2046 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2047 return false;
2049 return true;
2052 /* Initialize the symbolic number N for the bswap pass from the base element
2053 SRC manipulated by the bitwise OR expression. */
2055 static bool
2056 init_symbolic_number (struct symbolic_number *n, tree src)
2058 int size;
2060 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
2061 return false;
2063 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2065 /* Set up the symbolic number N by setting each byte to a value between 1 and
2066 the byte size of rhs1. The highest order byte is set to n->size and the
2067 lowest order byte to 1. */
2068 n->type = TREE_TYPE (src);
2069 size = TYPE_PRECISION (n->type);
2070 if (size % BITS_PER_UNIT != 0)
2071 return false;
2072 size /= BITS_PER_UNIT;
2073 if (size > 64 / BITS_PER_MARKER)
2074 return false;
2075 n->range = size;
2076 n->n = CMPNOP;
2078 if (size < 64 / BITS_PER_MARKER)
2079 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2081 return true;
2084 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2085 the answer. If so, REF is that memory source and the base of the memory area
2086 accessed and the offset of the access from that base are recorded in N. */
2088 bool
2089 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2091 /* Leaf node is an array or component ref. Memorize its base and
2092 offset from base to compare to other such leaf node. */
2093 HOST_WIDE_INT bitsize, bitpos;
2094 machine_mode mode;
2095 int unsignedp, reversep, volatilep;
2096 tree offset, base_addr;
2098 /* Not prepared to handle PDP endian. */
2099 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2100 return false;
2102 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2103 return false;
2105 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2106 &unsignedp, &reversep, &volatilep);
2108 if (TREE_CODE (base_addr) == MEM_REF)
2110 offset_int bit_offset = 0;
2111 tree off = TREE_OPERAND (base_addr, 1);
2113 if (!integer_zerop (off))
2115 offset_int boff, coff = mem_ref_offset (base_addr);
2116 boff = coff << LOG2_BITS_PER_UNIT;
2117 bit_offset += boff;
2120 base_addr = TREE_OPERAND (base_addr, 0);
2122 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2123 if (wi::neg_p (bit_offset))
2125 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2126 offset_int tem = bit_offset.and_not (mask);
2127 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2128 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2129 bit_offset -= tem;
2130 tem >>= LOG2_BITS_PER_UNIT;
2131 if (offset)
2132 offset = size_binop (PLUS_EXPR, offset,
2133 wide_int_to_tree (sizetype, tem));
2134 else
2135 offset = wide_int_to_tree (sizetype, tem);
2138 bitpos += bit_offset.to_shwi ();
2141 if (bitpos % BITS_PER_UNIT)
2142 return false;
2143 if (bitsize % BITS_PER_UNIT)
2144 return false;
2145 if (reversep)
2146 return false;
2148 if (!init_symbolic_number (n, ref))
2149 return false;
2150 n->base_addr = base_addr;
2151 n->offset = offset;
2152 n->bytepos = bitpos / BITS_PER_UNIT;
2153 n->alias_set = reference_alias_ptr_type (ref);
2154 n->vuse = gimple_vuse (stmt);
2155 return true;
2158 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2159 symbolic number N1 and N2 whose source statements are respectively
2160 SOURCE_STMT1 and SOURCE_STMT2. */
2162 static gimple *
2163 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2164 gimple *source_stmt2, struct symbolic_number *n2,
2165 struct symbolic_number *n)
2167 int i, size;
2168 uint64_t mask;
2169 gimple *source_stmt;
2170 struct symbolic_number *n_start;
2172 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
2173 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2174 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2175 rhs1 = TREE_OPERAND (rhs1, 0);
2176 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
2177 if (TREE_CODE (rhs2) == BIT_FIELD_REF
2178 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
2179 rhs2 = TREE_OPERAND (rhs2, 0);
2181 /* Sources are different, cancel bswap if they are not memory location with
2182 the same base (array, structure, ...). */
2183 if (rhs1 != rhs2)
2185 uint64_t inc;
2186 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2187 struct symbolic_number *toinc_n_ptr, *n_end;
2189 if (!n1->base_addr || !n2->base_addr
2190 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2191 return NULL;
2193 if (!n1->offset != !n2->offset
2194 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2195 return NULL;
2197 if (n1->bytepos < n2->bytepos)
2199 n_start = n1;
2200 start_sub = n2->bytepos - n1->bytepos;
2201 source_stmt = source_stmt1;
2203 else
2205 n_start = n2;
2206 start_sub = n1->bytepos - n2->bytepos;
2207 source_stmt = source_stmt2;
2210 /* Find the highest address at which a load is performed and
2211 compute related info. */
2212 end1 = n1->bytepos + (n1->range - 1);
2213 end2 = n2->bytepos + (n2->range - 1);
2214 if (end1 < end2)
2216 end = end2;
2217 end_sub = end2 - end1;
2219 else
2221 end = end1;
2222 end_sub = end1 - end2;
2224 n_end = (end2 > end1) ? n2 : n1;
2226 /* Find symbolic number whose lsb is the most significant. */
2227 if (BYTES_BIG_ENDIAN)
2228 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2229 else
2230 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2232 n->range = end - n_start->bytepos + 1;
2234 /* Check that the range of memory covered can be represented by
2235 a symbolic number. */
2236 if (n->range > 64 / BITS_PER_MARKER)
2237 return NULL;
2239 /* Reinterpret byte marks in symbolic number holding the value of
2240 bigger weight according to target endianness. */
2241 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2242 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2243 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2245 unsigned marker
2246 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2247 if (marker && marker != MARKER_BYTE_UNKNOWN)
2248 toinc_n_ptr->n += inc;
2251 else
2253 n->range = n1->range;
2254 n_start = n1;
2255 source_stmt = source_stmt1;
2258 if (!n1->alias_set
2259 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2260 n->alias_set = n1->alias_set;
2261 else
2262 n->alias_set = ptr_type_node;
2263 n->vuse = n_start->vuse;
2264 n->base_addr = n_start->base_addr;
2265 n->offset = n_start->offset;
2266 n->bytepos = n_start->bytepos;
2267 n->type = n_start->type;
2268 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2270 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2272 uint64_t masked1, masked2;
2274 masked1 = n1->n & mask;
2275 masked2 = n2->n & mask;
2276 if (masked1 && masked2 && masked1 != masked2)
2277 return NULL;
2279 n->n = n1->n | n2->n;
2281 return source_stmt;
2284 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2285 the operation given by the rhs of STMT on the result. If the operation
2286 could successfully be executed the function returns a gimple stmt whose
2287 rhs's first tree is the expression of the source operand and NULL
2288 otherwise. */
2290 static gimple *
2291 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2293 enum tree_code code;
2294 tree rhs1, rhs2 = NULL;
2295 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2296 enum gimple_rhs_class rhs_class;
2298 if (!limit || !is_gimple_assign (stmt))
2299 return NULL;
2301 rhs1 = gimple_assign_rhs1 (stmt);
2303 if (find_bswap_or_nop_load (stmt, rhs1, n))
2304 return stmt;
2306 /* Handle BIT_FIELD_REF. */
2307 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2308 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2310 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
2311 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
2312 if (bitpos % BITS_PER_UNIT == 0
2313 && bitsize % BITS_PER_UNIT == 0
2314 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
2316 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
2317 if (BYTES_BIG_ENDIAN)
2318 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
2320 /* Shift. */
2321 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
2322 return NULL;
2324 /* Mask. */
2325 uint64_t mask = 0;
2326 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2327 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
2328 i++, tmp <<= BITS_PER_UNIT)
2329 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2330 n->n &= mask;
2332 /* Convert. */
2333 n->type = TREE_TYPE (rhs1);
2334 if (!n->base_addr)
2335 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2337 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
2340 return NULL;
2343 if (TREE_CODE (rhs1) != SSA_NAME)
2344 return NULL;
2346 code = gimple_assign_rhs_code (stmt);
2347 rhs_class = gimple_assign_rhs_class (stmt);
2348 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2350 if (rhs_class == GIMPLE_BINARY_RHS)
2351 rhs2 = gimple_assign_rhs2 (stmt);
2353 /* Handle unary rhs and binary rhs with integer constants as second
2354 operand. */
2356 if (rhs_class == GIMPLE_UNARY_RHS
2357 || (rhs_class == GIMPLE_BINARY_RHS
2358 && TREE_CODE (rhs2) == INTEGER_CST))
2360 if (code != BIT_AND_EXPR
2361 && code != LSHIFT_EXPR
2362 && code != RSHIFT_EXPR
2363 && code != LROTATE_EXPR
2364 && code != RROTATE_EXPR
2365 && !CONVERT_EXPR_CODE_P (code))
2366 return NULL;
2368 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2370 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2371 we have to initialize the symbolic number. */
2372 if (!source_stmt1)
2374 if (gimple_assign_load_p (stmt)
2375 || !init_symbolic_number (n, rhs1))
2376 return NULL;
2377 source_stmt1 = stmt;
2380 switch (code)
2382 case BIT_AND_EXPR:
2384 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2385 uint64_t val = int_cst_value (rhs2), mask = 0;
2386 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2388 /* Only constants masking full bytes are allowed. */
2389 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2390 if ((val & tmp) != 0 && (val & tmp) != tmp)
2391 return NULL;
2392 else if (val & tmp)
2393 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2395 n->n &= mask;
2397 break;
2398 case LSHIFT_EXPR:
2399 case RSHIFT_EXPR:
2400 case LROTATE_EXPR:
2401 case RROTATE_EXPR:
2402 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2403 return NULL;
2404 break;
2405 CASE_CONVERT:
2407 int i, type_size, old_type_size;
2408 tree type;
2410 type = gimple_expr_type (stmt);
2411 type_size = TYPE_PRECISION (type);
2412 if (type_size % BITS_PER_UNIT != 0)
2413 return NULL;
2414 type_size /= BITS_PER_UNIT;
2415 if (type_size > 64 / BITS_PER_MARKER)
2416 return NULL;
2418 /* Sign extension: result is dependent on the value. */
2419 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2420 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2421 && HEAD_MARKER (n->n, old_type_size))
2422 for (i = 0; i < type_size - old_type_size; i++)
2423 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2424 << ((type_size - 1 - i) * BITS_PER_MARKER);
2426 if (type_size < 64 / BITS_PER_MARKER)
2428 /* If STMT casts to a smaller type mask out the bits not
2429 belonging to the target type. */
2430 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2432 n->type = type;
2433 if (!n->base_addr)
2434 n->range = type_size;
2436 break;
2437 default:
2438 return NULL;
2440 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2443 /* Handle binary rhs. */
2445 if (rhs_class == GIMPLE_BINARY_RHS)
2447 struct symbolic_number n1, n2;
2448 gimple *source_stmt, *source_stmt2;
2450 if (code != BIT_IOR_EXPR)
2451 return NULL;
2453 if (TREE_CODE (rhs2) != SSA_NAME)
2454 return NULL;
2456 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2458 switch (code)
2460 case BIT_IOR_EXPR:
2461 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2463 if (!source_stmt1)
2464 return NULL;
2466 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2468 if (!source_stmt2)
2469 return NULL;
2471 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2472 return NULL;
2474 if (!n1.vuse != !n2.vuse
2475 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2476 return NULL;
2478 source_stmt
2479 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2481 if (!source_stmt)
2482 return NULL;
2484 if (!verify_symbolic_number_p (n, stmt))
2485 return NULL;
2487 break;
2488 default:
2489 return NULL;
2491 return source_stmt;
2493 return NULL;
2496 /* Check if STMT completes a bswap implementation or a read in a given
2497 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2498 accordingly. It also sets N to represent the kind of operations
2499 performed: size of the resulting expression and whether it works on
2500 a memory source, and if so alias-set and vuse. At last, the
2501 function returns a stmt whose rhs's first tree is the source
2502 expression. */
2504 static gimple *
2505 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2507 /* The number which the find_bswap_or_nop_1 result should match in order
2508 to have a full byte swap. The number is shifted to the right
2509 according to the size of the symbolic number before using it. */
2510 uint64_t cmpxchg = CMPXCHG;
2511 uint64_t cmpnop = CMPNOP;
2513 gimple *source_stmt;
2514 int limit;
2516 /* The last parameter determines the depth search limit. It usually
2517 correlates directly to the number n of bytes to be touched. We
2518 increase that number by log2(n) + 1 here in order to also
2519 cover signed -> unsigned conversions of the src operand as can be seen
2520 in libgcc, and for initial shift/and operation of the src operand. */
2521 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2522 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2523 source_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2525 if (!source_stmt)
2526 return NULL;
2528 /* Find real size of result (highest non-zero byte). */
2529 if (n->base_addr)
2531 unsigned HOST_WIDE_INT rsize;
2532 uint64_t tmpn;
2534 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2535 if (BYTES_BIG_ENDIAN && n->range != rsize)
2536 /* This implies an offset, which is currently not handled by
2537 bswap_replace. */
2538 return NULL;
2539 n->range = rsize;
2542 /* Zero out the extra bits of N and CMP*. */
2543 if (n->range < (int) sizeof (int64_t))
2545 uint64_t mask;
2547 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2548 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2549 cmpnop &= mask;
2552 /* A complete byte swap should make the symbolic number to start with
2553 the largest digit in the highest order byte. Unchanged symbolic
2554 number indicates a read with same endianness as target architecture. */
2555 if (n->n == cmpnop)
2556 *bswap = false;
2557 else if (n->n == cmpxchg)
2558 *bswap = true;
2559 else
2560 return NULL;
2562 /* Useless bit manipulation performed by code. */
2563 if (!n->base_addr && n->n == cmpnop)
2564 return NULL;
2566 n->range *= BITS_PER_UNIT;
2567 return source_stmt;
2570 namespace {
2572 const pass_data pass_data_optimize_bswap =
2574 GIMPLE_PASS, /* type */
2575 "bswap", /* name */
2576 OPTGROUP_NONE, /* optinfo_flags */
2577 TV_NONE, /* tv_id */
2578 PROP_ssa, /* properties_required */
2579 0, /* properties_provided */
2580 0, /* properties_destroyed */
2581 0, /* todo_flags_start */
2582 0, /* todo_flags_finish */
2585 class pass_optimize_bswap : public gimple_opt_pass
2587 public:
2588 pass_optimize_bswap (gcc::context *ctxt)
2589 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2592 /* opt_pass methods: */
2593 virtual bool gate (function *)
2595 return flag_expensive_optimizations && optimize;
2598 virtual unsigned int execute (function *);
2600 }; // class pass_optimize_bswap
2602 /* Perform the bswap optimization: replace the expression computed in the rhs
2603 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2604 Which of these alternatives replace the rhs is given by N->base_addr (non
2605 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2606 load to perform are also given in N while the builtin bswap invoke is given
2607 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2608 load statements involved to construct the rhs in CUR_STMT and N->range gives
2609 the size of the rhs expression for maintaining some statistics.
2611 Note that if the replacement involve a load, CUR_STMT is moved just after
2612 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2613 changing of basic block. */
2615 static bool
2616 bswap_replace (gimple *cur_stmt, gimple *src_stmt, tree fndecl,
2617 tree bswap_type, tree load_type, struct symbolic_number *n,
2618 bool bswap)
2620 gimple_stmt_iterator gsi;
2621 tree src, tmp, tgt;
2622 gimple *bswap_stmt;
2624 gsi = gsi_for_stmt (cur_stmt);
2625 src = gimple_assign_rhs1 (src_stmt);
2626 tgt = gimple_assign_lhs (cur_stmt);
2628 /* Need to load the value from memory first. */
2629 if (n->base_addr)
2631 gimple_stmt_iterator gsi_ins = gsi_for_stmt (src_stmt);
2632 tree addr_expr, addr_tmp, val_expr, val_tmp;
2633 tree load_offset_ptr, aligned_load_type;
2634 gimple *addr_stmt, *load_stmt;
2635 unsigned align;
2636 HOST_WIDE_INT load_offset = 0;
2638 align = get_object_alignment (src);
2639 /* If the new access is smaller than the original one, we need
2640 to perform big endian adjustment. */
2641 if (BYTES_BIG_ENDIAN)
2643 HOST_WIDE_INT bitsize, bitpos;
2644 machine_mode mode;
2645 int unsignedp, reversep, volatilep;
2646 tree offset;
2648 get_inner_reference (src, &bitsize, &bitpos, &offset, &mode,
2649 &unsignedp, &reversep, &volatilep);
2650 if (n->range < (unsigned HOST_WIDE_INT) bitsize)
2652 load_offset = (bitsize - n->range) / BITS_PER_UNIT;
2653 unsigned HOST_WIDE_INT l
2654 = (load_offset * BITS_PER_UNIT) & (align - 1);
2655 if (l)
2656 align = least_bit_hwi (l);
2660 if (bswap
2661 && align < GET_MODE_ALIGNMENT (TYPE_MODE (load_type))
2662 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
2663 return false;
2665 /* Move cur_stmt just before one of the load of the original
2666 to ensure it has the same VUSE. See PR61517 for what could
2667 go wrong. */
2668 if (gimple_bb (cur_stmt) != gimple_bb (src_stmt))
2669 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2670 gsi_move_before (&gsi, &gsi_ins);
2671 gsi = gsi_for_stmt (cur_stmt);
2673 /* Compute address to load from and cast according to the size
2674 of the load. */
2675 addr_expr = build_fold_addr_expr (unshare_expr (src));
2676 if (is_gimple_mem_ref_addr (addr_expr))
2677 addr_tmp = addr_expr;
2678 else
2680 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2681 "load_src");
2682 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2683 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2686 /* Perform the load. */
2687 aligned_load_type = load_type;
2688 if (align < TYPE_ALIGN (load_type))
2689 aligned_load_type = build_aligned_type (load_type, align);
2690 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2691 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2692 load_offset_ptr);
2694 if (!bswap)
2696 if (n->range == 16)
2697 nop_stats.found_16bit++;
2698 else if (n->range == 32)
2699 nop_stats.found_32bit++;
2700 else
2702 gcc_assert (n->range == 64);
2703 nop_stats.found_64bit++;
2706 /* Convert the result of load if necessary. */
2707 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2709 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2710 "load_dst");
2711 load_stmt = gimple_build_assign (val_tmp, val_expr);
2712 gimple_set_vuse (load_stmt, n->vuse);
2713 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2714 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2716 else
2718 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2719 gimple_set_vuse (cur_stmt, n->vuse);
2721 update_stmt (cur_stmt);
2723 if (dump_file)
2725 fprintf (dump_file,
2726 "%d bit load in target endianness found at: ",
2727 (int) n->range);
2728 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2730 return true;
2732 else
2734 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2735 load_stmt = gimple_build_assign (val_tmp, val_expr);
2736 gimple_set_vuse (load_stmt, n->vuse);
2737 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2739 src = val_tmp;
2741 else if (TREE_CODE (src) == BIT_FIELD_REF)
2742 src = TREE_OPERAND (src, 0);
2744 if (n->range == 16)
2745 bswap_stats.found_16bit++;
2746 else if (n->range == 32)
2747 bswap_stats.found_32bit++;
2748 else
2750 gcc_assert (n->range == 64);
2751 bswap_stats.found_64bit++;
2754 tmp = src;
2756 /* Convert the src expression if necessary. */
2757 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2759 gimple *convert_stmt;
2761 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2762 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2763 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2766 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2767 are considered as rotation of 2N bit values by N bits is generally not
2768 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2769 gives 0x03040102 while a bswap for that value is 0x04030201. */
2770 if (bswap && n->range == 16)
2772 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2773 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2774 bswap_stmt = gimple_build_assign (NULL, src);
2776 else
2777 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2779 tmp = tgt;
2781 /* Convert the result if necessary. */
2782 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2784 gimple *convert_stmt;
2786 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2787 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2788 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2791 gimple_set_lhs (bswap_stmt, tmp);
2793 if (dump_file)
2795 fprintf (dump_file, "%d bit bswap implementation found at: ",
2796 (int) n->range);
2797 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2800 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2801 gsi_remove (&gsi, true);
2802 return true;
2805 /* Find manual byte swap implementations as well as load in a given
2806 endianness. Byte swaps are turned into a bswap builtin invokation
2807 while endian loads are converted to bswap builtin invokation or
2808 simple load according to the target endianness. */
2810 unsigned int
2811 pass_optimize_bswap::execute (function *fun)
2813 basic_block bb;
2814 bool bswap32_p, bswap64_p;
2815 bool changed = false;
2816 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2818 if (BITS_PER_UNIT != 8)
2819 return 0;
2821 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2822 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2823 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2824 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2825 || (bswap32_p && word_mode == SImode)));
2827 /* Determine the argument type of the builtins. The code later on
2828 assumes that the return and argument type are the same. */
2829 if (bswap32_p)
2831 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2832 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2835 if (bswap64_p)
2837 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2838 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2841 memset (&nop_stats, 0, sizeof (nop_stats));
2842 memset (&bswap_stats, 0, sizeof (bswap_stats));
2844 FOR_EACH_BB_FN (bb, fun)
2846 gimple_stmt_iterator gsi;
2848 /* We do a reverse scan for bswap patterns to make sure we get the
2849 widest match. As bswap pattern matching doesn't handle previously
2850 inserted smaller bswap replacements as sub-patterns, the wider
2851 variant wouldn't be detected. */
2852 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2854 gimple *src_stmt, *cur_stmt = gsi_stmt (gsi);
2855 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2856 enum tree_code code;
2857 struct symbolic_number n;
2858 bool bswap;
2860 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2861 might be moved to a different basic block by bswap_replace and gsi
2862 must not points to it if that's the case. Moving the gsi_prev
2863 there make sure that gsi points to the statement previous to
2864 cur_stmt while still making sure that all statements are
2865 considered in this basic block. */
2866 gsi_prev (&gsi);
2868 if (!is_gimple_assign (cur_stmt))
2869 continue;
2871 code = gimple_assign_rhs_code (cur_stmt);
2872 switch (code)
2874 case LROTATE_EXPR:
2875 case RROTATE_EXPR:
2876 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2877 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2878 % BITS_PER_UNIT)
2879 continue;
2880 /* Fall through. */
2881 case BIT_IOR_EXPR:
2882 break;
2883 default:
2884 continue;
2887 src_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2889 if (!src_stmt)
2890 continue;
2892 switch (n.range)
2894 case 16:
2895 /* Already in canonical form, nothing to do. */
2896 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2897 continue;
2898 load_type = bswap_type = uint16_type_node;
2899 break;
2900 case 32:
2901 load_type = uint32_type_node;
2902 if (bswap32_p)
2904 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2905 bswap_type = bswap32_type;
2907 break;
2908 case 64:
2909 load_type = uint64_type_node;
2910 if (bswap64_p)
2912 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2913 bswap_type = bswap64_type;
2915 break;
2916 default:
2917 continue;
2920 if (bswap && !fndecl && n.range != 16)
2921 continue;
2923 if (bswap_replace (cur_stmt, src_stmt, fndecl, bswap_type, load_type,
2924 &n, bswap))
2925 changed = true;
2929 statistics_counter_event (fun, "16-bit nop implementations found",
2930 nop_stats.found_16bit);
2931 statistics_counter_event (fun, "32-bit nop implementations found",
2932 nop_stats.found_32bit);
2933 statistics_counter_event (fun, "64-bit nop implementations found",
2934 nop_stats.found_64bit);
2935 statistics_counter_event (fun, "16-bit bswap implementations found",
2936 bswap_stats.found_16bit);
2937 statistics_counter_event (fun, "32-bit bswap implementations found",
2938 bswap_stats.found_32bit);
2939 statistics_counter_event (fun, "64-bit bswap implementations found",
2940 bswap_stats.found_64bit);
2942 return (changed ? TODO_update_ssa : 0);
2945 } // anon namespace
2947 gimple_opt_pass *
2948 make_pass_optimize_bswap (gcc::context *ctxt)
2950 return new pass_optimize_bswap (ctxt);
2953 /* Return true if stmt is a type conversion operation that can be stripped
2954 when used in a widening multiply operation. */
2955 static bool
2956 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2958 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2960 if (TREE_CODE (result_type) == INTEGER_TYPE)
2962 tree op_type;
2963 tree inner_op_type;
2965 if (!CONVERT_EXPR_CODE_P (rhs_code))
2966 return false;
2968 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2970 /* If the type of OP has the same precision as the result, then
2971 we can strip this conversion. The multiply operation will be
2972 selected to create the correct extension as a by-product. */
2973 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2974 return true;
2976 /* We can also strip a conversion if it preserves the signed-ness of
2977 the operation and doesn't narrow the range. */
2978 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2980 /* If the inner-most type is unsigned, then we can strip any
2981 intermediate widening operation. If it's signed, then the
2982 intermediate widening operation must also be signed. */
2983 if ((TYPE_UNSIGNED (inner_op_type)
2984 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2985 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2986 return true;
2988 return false;
2991 return rhs_code == FIXED_CONVERT_EXPR;
2994 /* Return true if RHS is a suitable operand for a widening multiplication,
2995 assuming a target type of TYPE.
2996 There are two cases:
2998 - RHS makes some value at least twice as wide. Store that value
2999 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
3001 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
3002 but leave *TYPE_OUT untouched. */
3004 static bool
3005 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
3006 tree *new_rhs_out)
3008 gimple *stmt;
3009 tree type1, rhs1;
3011 if (TREE_CODE (rhs) == SSA_NAME)
3013 stmt = SSA_NAME_DEF_STMT (rhs);
3014 if (is_gimple_assign (stmt))
3016 if (! widening_mult_conversion_strippable_p (type, stmt))
3017 rhs1 = rhs;
3018 else
3020 rhs1 = gimple_assign_rhs1 (stmt);
3022 if (TREE_CODE (rhs1) == INTEGER_CST)
3024 *new_rhs_out = rhs1;
3025 *type_out = NULL;
3026 return true;
3030 else
3031 rhs1 = rhs;
3033 type1 = TREE_TYPE (rhs1);
3035 if (TREE_CODE (type1) != TREE_CODE (type)
3036 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3037 return false;
3039 *new_rhs_out = rhs1;
3040 *type_out = type1;
3041 return true;
3044 if (TREE_CODE (rhs) == INTEGER_CST)
3046 *new_rhs_out = rhs;
3047 *type_out = NULL;
3048 return true;
3051 return false;
3054 /* Return true if STMT performs a widening multiplication, assuming the
3055 output type is TYPE. If so, store the unwidened types of the operands
3056 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3057 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3058 and *TYPE2_OUT would give the operands of the multiplication. */
3060 static bool
3061 is_widening_mult_p (gimple *stmt,
3062 tree *type1_out, tree *rhs1_out,
3063 tree *type2_out, tree *rhs2_out)
3065 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3067 if (TREE_CODE (type) != INTEGER_TYPE
3068 && TREE_CODE (type) != FIXED_POINT_TYPE)
3069 return false;
3071 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3072 rhs1_out))
3073 return false;
3075 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3076 rhs2_out))
3077 return false;
3079 if (*type1_out == NULL)
3081 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3082 return false;
3083 *type1_out = *type2_out;
3086 if (*type2_out == NULL)
3088 if (!int_fits_type_p (*rhs2_out, *type1_out))
3089 return false;
3090 *type2_out = *type1_out;
3093 /* Ensure that the larger of the two operands comes first. */
3094 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3096 std::swap (*type1_out, *type2_out);
3097 std::swap (*rhs1_out, *rhs2_out);
3100 return true;
3103 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3104 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3105 value is true iff we converted the statement. */
3107 static bool
3108 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3110 tree lhs, rhs1, rhs2, type, type1, type2;
3111 enum insn_code handler;
3112 machine_mode to_mode, from_mode, actual_mode;
3113 optab op;
3114 int actual_precision;
3115 location_t loc = gimple_location (stmt);
3116 bool from_unsigned1, from_unsigned2;
3118 lhs = gimple_assign_lhs (stmt);
3119 type = TREE_TYPE (lhs);
3120 if (TREE_CODE (type) != INTEGER_TYPE)
3121 return false;
3123 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3124 return false;
3126 to_mode = TYPE_MODE (type);
3127 from_mode = TYPE_MODE (type1);
3128 from_unsigned1 = TYPE_UNSIGNED (type1);
3129 from_unsigned2 = TYPE_UNSIGNED (type2);
3131 if (from_unsigned1 && from_unsigned2)
3132 op = umul_widen_optab;
3133 else if (!from_unsigned1 && !from_unsigned2)
3134 op = smul_widen_optab;
3135 else
3136 op = usmul_widen_optab;
3138 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3139 0, &actual_mode);
3141 if (handler == CODE_FOR_nothing)
3143 if (op != smul_widen_optab)
3145 /* We can use a signed multiply with unsigned types as long as
3146 there is a wider mode to use, or it is the smaller of the two
3147 types that is unsigned. Note that type1 >= type2, always. */
3148 if ((TYPE_UNSIGNED (type1)
3149 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3150 || (TYPE_UNSIGNED (type2)
3151 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3153 from_mode = GET_MODE_WIDER_MODE (from_mode);
3154 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3155 return false;
3158 op = smul_widen_optab;
3159 handler = find_widening_optab_handler_and_mode (op, to_mode,
3160 from_mode, 0,
3161 &actual_mode);
3163 if (handler == CODE_FOR_nothing)
3164 return false;
3166 from_unsigned1 = from_unsigned2 = false;
3168 else
3169 return false;
3172 /* Ensure that the inputs to the handler are in the correct precison
3173 for the opcode. This will be the full mode size. */
3174 actual_precision = GET_MODE_PRECISION (actual_mode);
3175 if (2 * actual_precision > TYPE_PRECISION (type))
3176 return false;
3177 if (actual_precision != TYPE_PRECISION (type1)
3178 || from_unsigned1 != TYPE_UNSIGNED (type1))
3179 rhs1 = build_and_insert_cast (gsi, loc,
3180 build_nonstandard_integer_type
3181 (actual_precision, from_unsigned1), rhs1);
3182 if (actual_precision != TYPE_PRECISION (type2)
3183 || from_unsigned2 != TYPE_UNSIGNED (type2))
3184 rhs2 = build_and_insert_cast (gsi, loc,
3185 build_nonstandard_integer_type
3186 (actual_precision, from_unsigned2), rhs2);
3188 /* Handle constants. */
3189 if (TREE_CODE (rhs1) == INTEGER_CST)
3190 rhs1 = fold_convert (type1, rhs1);
3191 if (TREE_CODE (rhs2) == INTEGER_CST)
3192 rhs2 = fold_convert (type2, rhs2);
3194 gimple_assign_set_rhs1 (stmt, rhs1);
3195 gimple_assign_set_rhs2 (stmt, rhs2);
3196 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3197 update_stmt (stmt);
3198 widen_mul_stats.widen_mults_inserted++;
3199 return true;
3202 /* Process a single gimple statement STMT, which is found at the
3203 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3204 rhs (given by CODE), and try to convert it into a
3205 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3206 is true iff we converted the statement. */
3208 static bool
3209 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3210 enum tree_code code)
3212 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3213 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3214 tree type, type1, type2, optype;
3215 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3216 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3217 optab this_optab;
3218 enum tree_code wmult_code;
3219 enum insn_code handler;
3220 machine_mode to_mode, from_mode, actual_mode;
3221 location_t loc = gimple_location (stmt);
3222 int actual_precision;
3223 bool from_unsigned1, from_unsigned2;
3225 lhs = gimple_assign_lhs (stmt);
3226 type = TREE_TYPE (lhs);
3227 if (TREE_CODE (type) != INTEGER_TYPE
3228 && TREE_CODE (type) != FIXED_POINT_TYPE)
3229 return false;
3231 if (code == MINUS_EXPR)
3232 wmult_code = WIDEN_MULT_MINUS_EXPR;
3233 else
3234 wmult_code = WIDEN_MULT_PLUS_EXPR;
3236 rhs1 = gimple_assign_rhs1 (stmt);
3237 rhs2 = gimple_assign_rhs2 (stmt);
3239 if (TREE_CODE (rhs1) == SSA_NAME)
3241 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3242 if (is_gimple_assign (rhs1_stmt))
3243 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3246 if (TREE_CODE (rhs2) == SSA_NAME)
3248 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3249 if (is_gimple_assign (rhs2_stmt))
3250 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3253 /* Allow for one conversion statement between the multiply
3254 and addition/subtraction statement. If there are more than
3255 one conversions then we assume they would invalidate this
3256 transformation. If that's not the case then they should have
3257 been folded before now. */
3258 if (CONVERT_EXPR_CODE_P (rhs1_code))
3260 conv1_stmt = rhs1_stmt;
3261 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3262 if (TREE_CODE (rhs1) == SSA_NAME)
3264 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3265 if (is_gimple_assign (rhs1_stmt))
3266 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3268 else
3269 return false;
3271 if (CONVERT_EXPR_CODE_P (rhs2_code))
3273 conv2_stmt = rhs2_stmt;
3274 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3275 if (TREE_CODE (rhs2) == SSA_NAME)
3277 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3278 if (is_gimple_assign (rhs2_stmt))
3279 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3281 else
3282 return false;
3285 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3286 is_widening_mult_p, but we still need the rhs returns.
3288 It might also appear that it would be sufficient to use the existing
3289 operands of the widening multiply, but that would limit the choice of
3290 multiply-and-accumulate instructions.
3292 If the widened-multiplication result has more than one uses, it is
3293 probably wiser not to do the conversion. */
3294 if (code == PLUS_EXPR
3295 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3297 if (!has_single_use (rhs1)
3298 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3299 &type2, &mult_rhs2))
3300 return false;
3301 add_rhs = rhs2;
3302 conv_stmt = conv1_stmt;
3304 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3306 if (!has_single_use (rhs2)
3307 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3308 &type2, &mult_rhs2))
3309 return false;
3310 add_rhs = rhs1;
3311 conv_stmt = conv2_stmt;
3313 else
3314 return false;
3316 to_mode = TYPE_MODE (type);
3317 from_mode = TYPE_MODE (type1);
3318 from_unsigned1 = TYPE_UNSIGNED (type1);
3319 from_unsigned2 = TYPE_UNSIGNED (type2);
3320 optype = type1;
3322 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3323 if (from_unsigned1 != from_unsigned2)
3325 if (!INTEGRAL_TYPE_P (type))
3326 return false;
3327 /* We can use a signed multiply with unsigned types as long as
3328 there is a wider mode to use, or it is the smaller of the two
3329 types that is unsigned. Note that type1 >= type2, always. */
3330 if ((from_unsigned1
3331 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3332 || (from_unsigned2
3333 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3335 from_mode = GET_MODE_WIDER_MODE (from_mode);
3336 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3337 return false;
3340 from_unsigned1 = from_unsigned2 = false;
3341 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3342 false);
3345 /* If there was a conversion between the multiply and addition
3346 then we need to make sure it fits a multiply-and-accumulate.
3347 The should be a single mode change which does not change the
3348 value. */
3349 if (conv_stmt)
3351 /* We use the original, unmodified data types for this. */
3352 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3353 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3354 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3355 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3357 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3359 /* Conversion is a truncate. */
3360 if (TYPE_PRECISION (to_type) < data_size)
3361 return false;
3363 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3365 /* Conversion is an extend. Check it's the right sort. */
3366 if (TYPE_UNSIGNED (from_type) != is_unsigned
3367 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3368 return false;
3370 /* else convert is a no-op for our purposes. */
3373 /* Verify that the machine can perform a widening multiply
3374 accumulate in this mode/signedness combination, otherwise
3375 this transformation is likely to pessimize code. */
3376 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3377 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3378 from_mode, 0, &actual_mode);
3380 if (handler == CODE_FOR_nothing)
3381 return false;
3383 /* Ensure that the inputs to the handler are in the correct precison
3384 for the opcode. This will be the full mode size. */
3385 actual_precision = GET_MODE_PRECISION (actual_mode);
3386 if (actual_precision != TYPE_PRECISION (type1)
3387 || from_unsigned1 != TYPE_UNSIGNED (type1))
3388 mult_rhs1 = build_and_insert_cast (gsi, loc,
3389 build_nonstandard_integer_type
3390 (actual_precision, from_unsigned1),
3391 mult_rhs1);
3392 if (actual_precision != TYPE_PRECISION (type2)
3393 || from_unsigned2 != TYPE_UNSIGNED (type2))
3394 mult_rhs2 = build_and_insert_cast (gsi, loc,
3395 build_nonstandard_integer_type
3396 (actual_precision, from_unsigned2),
3397 mult_rhs2);
3399 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3400 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3402 /* Handle constants. */
3403 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3404 mult_rhs1 = fold_convert (type1, mult_rhs1);
3405 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3406 mult_rhs2 = fold_convert (type2, mult_rhs2);
3408 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3409 add_rhs);
3410 update_stmt (gsi_stmt (*gsi));
3411 widen_mul_stats.maccs_inserted++;
3412 return true;
3415 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3416 with uses in additions and subtractions to form fused multiply-add
3417 operations. Returns true if successful and MUL_STMT should be removed. */
3419 static bool
3420 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3422 tree mul_result = gimple_get_lhs (mul_stmt);
3423 tree type = TREE_TYPE (mul_result);
3424 gimple *use_stmt, *neguse_stmt;
3425 gassign *fma_stmt;
3426 use_operand_p use_p;
3427 imm_use_iterator imm_iter;
3429 if (FLOAT_TYPE_P (type)
3430 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3431 return false;
3433 /* We don't want to do bitfield reduction ops. */
3434 if (INTEGRAL_TYPE_P (type)
3435 && (TYPE_PRECISION (type)
3436 != GET_MODE_PRECISION (TYPE_MODE (type))))
3437 return false;
3439 /* If the target doesn't support it, don't generate it. We assume that
3440 if fma isn't available then fms, fnma or fnms are not either. */
3441 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3442 return false;
3444 /* If the multiplication has zero uses, it is kept around probably because
3445 of -fnon-call-exceptions. Don't optimize it away in that case,
3446 it is DCE job. */
3447 if (has_zero_uses (mul_result))
3448 return false;
3450 /* Make sure that the multiplication statement becomes dead after
3451 the transformation, thus that all uses are transformed to FMAs.
3452 This means we assume that an FMA operation has the same cost
3453 as an addition. */
3454 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3456 enum tree_code use_code;
3457 tree result = mul_result;
3458 bool negate_p = false;
3460 use_stmt = USE_STMT (use_p);
3462 if (is_gimple_debug (use_stmt))
3463 continue;
3465 /* For now restrict this operations to single basic blocks. In theory
3466 we would want to support sinking the multiplication in
3467 m = a*b;
3468 if ()
3469 ma = m + c;
3470 else
3471 d = m;
3472 to form a fma in the then block and sink the multiplication to the
3473 else block. */
3474 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3475 return false;
3477 if (!is_gimple_assign (use_stmt))
3478 return false;
3480 use_code = gimple_assign_rhs_code (use_stmt);
3482 /* A negate on the multiplication leads to FNMA. */
3483 if (use_code == NEGATE_EXPR)
3485 ssa_op_iter iter;
3486 use_operand_p usep;
3488 result = gimple_assign_lhs (use_stmt);
3490 /* Make sure the negate statement becomes dead with this
3491 single transformation. */
3492 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3493 &use_p, &neguse_stmt))
3494 return false;
3496 /* Make sure the multiplication isn't also used on that stmt. */
3497 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3498 if (USE_FROM_PTR (usep) == mul_result)
3499 return false;
3501 /* Re-validate. */
3502 use_stmt = neguse_stmt;
3503 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3504 return false;
3505 if (!is_gimple_assign (use_stmt))
3506 return false;
3508 use_code = gimple_assign_rhs_code (use_stmt);
3509 negate_p = true;
3512 switch (use_code)
3514 case MINUS_EXPR:
3515 if (gimple_assign_rhs2 (use_stmt) == result)
3516 negate_p = !negate_p;
3517 break;
3518 case PLUS_EXPR:
3519 break;
3520 default:
3521 /* FMA can only be formed from PLUS and MINUS. */
3522 return false;
3525 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3526 by a MULT_EXPR that we'll visit later, we might be able to
3527 get a more profitable match with fnma.
3528 OTOH, if we don't, a negate / fma pair has likely lower latency
3529 that a mult / subtract pair. */
3530 if (use_code == MINUS_EXPR && !negate_p
3531 && gimple_assign_rhs1 (use_stmt) == result
3532 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3533 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3535 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3537 if (TREE_CODE (rhs2) == SSA_NAME)
3539 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3540 if (has_single_use (rhs2)
3541 && is_gimple_assign (stmt2)
3542 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3543 return false;
3547 /* We can't handle a * b + a * b. */
3548 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3549 return false;
3551 /* While it is possible to validate whether or not the exact form
3552 that we've recognized is available in the backend, the assumption
3553 is that the transformation is never a loss. For instance, suppose
3554 the target only has the plain FMA pattern available. Consider
3555 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3556 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3557 still have 3 operations, but in the FMA form the two NEGs are
3558 independent and could be run in parallel. */
3561 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3563 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3564 enum tree_code use_code;
3565 tree addop, mulop1 = op1, result = mul_result;
3566 bool negate_p = false;
3568 if (is_gimple_debug (use_stmt))
3569 continue;
3571 use_code = gimple_assign_rhs_code (use_stmt);
3572 if (use_code == NEGATE_EXPR)
3574 result = gimple_assign_lhs (use_stmt);
3575 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3576 gsi_remove (&gsi, true);
3577 release_defs (use_stmt);
3579 use_stmt = neguse_stmt;
3580 gsi = gsi_for_stmt (use_stmt);
3581 use_code = gimple_assign_rhs_code (use_stmt);
3582 negate_p = true;
3585 if (gimple_assign_rhs1 (use_stmt) == result)
3587 addop = gimple_assign_rhs2 (use_stmt);
3588 /* a * b - c -> a * b + (-c) */
3589 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3590 addop = force_gimple_operand_gsi (&gsi,
3591 build1 (NEGATE_EXPR,
3592 type, addop),
3593 true, NULL_TREE, true,
3594 GSI_SAME_STMT);
3596 else
3598 addop = gimple_assign_rhs1 (use_stmt);
3599 /* a - b * c -> (-b) * c + a */
3600 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3601 negate_p = !negate_p;
3604 if (negate_p)
3605 mulop1 = force_gimple_operand_gsi (&gsi,
3606 build1 (NEGATE_EXPR,
3607 type, mulop1),
3608 true, NULL_TREE, true,
3609 GSI_SAME_STMT);
3611 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3612 FMA_EXPR, mulop1, op2, addop);
3613 gsi_replace (&gsi, fma_stmt, true);
3614 widen_mul_stats.fmas_inserted++;
3617 return true;
3621 /* Helper function of match_uaddsub_overflow. Return 1
3622 if USE_STMT is unsigned overflow check ovf != 0 for
3623 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3624 and 0 otherwise. */
3626 static int
3627 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3629 enum tree_code ccode = ERROR_MARK;
3630 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3631 if (gimple_code (use_stmt) == GIMPLE_COND)
3633 ccode = gimple_cond_code (use_stmt);
3634 crhs1 = gimple_cond_lhs (use_stmt);
3635 crhs2 = gimple_cond_rhs (use_stmt);
3637 else if (is_gimple_assign (use_stmt))
3639 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3641 ccode = gimple_assign_rhs_code (use_stmt);
3642 crhs1 = gimple_assign_rhs1 (use_stmt);
3643 crhs2 = gimple_assign_rhs2 (use_stmt);
3645 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3647 tree cond = gimple_assign_rhs1 (use_stmt);
3648 if (COMPARISON_CLASS_P (cond))
3650 ccode = TREE_CODE (cond);
3651 crhs1 = TREE_OPERAND (cond, 0);
3652 crhs2 = TREE_OPERAND (cond, 1);
3654 else
3655 return 0;
3657 else
3658 return 0;
3660 else
3661 return 0;
3663 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3664 return 0;
3666 enum tree_code code = gimple_assign_rhs_code (stmt);
3667 tree lhs = gimple_assign_lhs (stmt);
3668 tree rhs1 = gimple_assign_rhs1 (stmt);
3669 tree rhs2 = gimple_assign_rhs2 (stmt);
3671 switch (ccode)
3673 case GT_EXPR:
3674 case LE_EXPR:
3675 /* r = a - b; r > a or r <= a
3676 r = a + b; a > r or a <= r or b > r or b <= r. */
3677 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3678 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3679 && crhs2 == lhs))
3680 return ccode == GT_EXPR ? 1 : -1;
3681 break;
3682 case LT_EXPR:
3683 case GE_EXPR:
3684 /* r = a - b; a < r or a >= r
3685 r = a + b; r < a or r >= a or r < b or r >= b. */
3686 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3687 || (code == PLUS_EXPR && crhs1 == lhs
3688 && (crhs2 == rhs1 || crhs2 == rhs2)))
3689 return ccode == LT_EXPR ? 1 : -1;
3690 break;
3691 default:
3692 break;
3694 return 0;
3697 /* Recognize for unsigned x
3698 x = y - z;
3699 if (x > y)
3700 where there are other uses of x and replace it with
3701 _7 = SUB_OVERFLOW (y, z);
3702 x = REALPART_EXPR <_7>;
3703 _8 = IMAGPART_EXPR <_7>;
3704 if (_8)
3705 and similarly for addition. */
3707 static bool
3708 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3709 enum tree_code code)
3711 tree lhs = gimple_assign_lhs (stmt);
3712 tree type = TREE_TYPE (lhs);
3713 use_operand_p use_p;
3714 imm_use_iterator iter;
3715 bool use_seen = false;
3716 bool ovf_use_seen = false;
3717 gimple *use_stmt;
3719 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3720 if (!INTEGRAL_TYPE_P (type)
3721 || !TYPE_UNSIGNED (type)
3722 || has_zero_uses (lhs)
3723 || has_single_use (lhs)
3724 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3725 TYPE_MODE (type)) == CODE_FOR_nothing)
3726 return false;
3728 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3730 use_stmt = USE_STMT (use_p);
3731 if (is_gimple_debug (use_stmt))
3732 continue;
3734 if (uaddsub_overflow_check_p (stmt, use_stmt))
3735 ovf_use_seen = true;
3736 else
3737 use_seen = true;
3738 if (ovf_use_seen && use_seen)
3739 break;
3742 if (!ovf_use_seen || !use_seen)
3743 return false;
3745 tree ctype = build_complex_type (type);
3746 tree rhs1 = gimple_assign_rhs1 (stmt);
3747 tree rhs2 = gimple_assign_rhs2 (stmt);
3748 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3749 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3750 2, rhs1, rhs2);
3751 tree ctmp = make_ssa_name (ctype);
3752 gimple_call_set_lhs (g, ctmp);
3753 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3754 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3755 build1 (REALPART_EXPR, type, ctmp));
3756 gsi_replace (gsi, g2, true);
3757 tree ovf = make_ssa_name (type);
3758 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3759 build1 (IMAGPART_EXPR, type, ctmp));
3760 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3762 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3764 if (is_gimple_debug (use_stmt))
3765 continue;
3767 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3768 if (ovf_use == 0)
3769 continue;
3770 if (gimple_code (use_stmt) == GIMPLE_COND)
3772 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3773 gimple_cond_set_lhs (cond_stmt, ovf);
3774 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3775 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3777 else
3779 gcc_checking_assert (is_gimple_assign (use_stmt));
3780 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3782 gimple_assign_set_rhs1 (use_stmt, ovf);
3783 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3784 gimple_assign_set_rhs_code (use_stmt,
3785 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3787 else
3789 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3790 == COND_EXPR);
3791 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3792 boolean_type_node, ovf,
3793 build_int_cst (type, 0));
3794 gimple_assign_set_rhs1 (use_stmt, cond);
3797 update_stmt (use_stmt);
3799 return true;
3802 /* Return true if target has support for divmod. */
3804 static bool
3805 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3807 /* If target supports hardware divmod insn, use it for divmod. */
3808 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3809 return true;
3811 /* Check if libfunc for divmod is available. */
3812 rtx libfunc = optab_libfunc (divmod_optab, mode);
3813 if (libfunc != NULL_RTX)
3815 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3816 we don't want to use the libfunc even if it exists for given mode. */
3817 for (machine_mode div_mode = mode;
3818 div_mode != VOIDmode;
3819 div_mode = GET_MODE_WIDER_MODE (div_mode))
3820 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3821 return false;
3823 return targetm.expand_divmod_libfunc != NULL;
3826 return false;
3829 /* Check if stmt is candidate for divmod transform. */
3831 static bool
3832 divmod_candidate_p (gassign *stmt)
3834 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3835 enum machine_mode mode = TYPE_MODE (type);
3836 optab divmod_optab, div_optab;
3838 if (TYPE_UNSIGNED (type))
3840 divmod_optab = udivmod_optab;
3841 div_optab = udiv_optab;
3843 else
3845 divmod_optab = sdivmod_optab;
3846 div_optab = sdiv_optab;
3849 tree op1 = gimple_assign_rhs1 (stmt);
3850 tree op2 = gimple_assign_rhs2 (stmt);
3852 /* Disable the transform if either is a constant, since division-by-constant
3853 may have specialized expansion. */
3854 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3855 return false;
3857 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3858 expand using the [su]divv optabs. */
3859 if (TYPE_OVERFLOW_TRAPS (type))
3860 return false;
3862 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3863 return false;
3865 return true;
3868 /* This function looks for:
3869 t1 = a TRUNC_DIV_EXPR b;
3870 t2 = a TRUNC_MOD_EXPR b;
3871 and transforms it to the following sequence:
3872 complex_tmp = DIVMOD (a, b);
3873 t1 = REALPART_EXPR(a);
3874 t2 = IMAGPART_EXPR(b);
3875 For conditions enabling the transform see divmod_candidate_p().
3877 The pass has three parts:
3878 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3879 other trunc_div_expr and trunc_mod_expr stmts.
3880 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3881 to stmts vector.
3882 3) Insert DIVMOD call just before top_stmt and update entries in
3883 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3884 IMAGPART_EXPR for mod). */
3886 static bool
3887 convert_to_divmod (gassign *stmt)
3889 if (stmt_can_throw_internal (stmt)
3890 || !divmod_candidate_p (stmt))
3891 return false;
3893 tree op1 = gimple_assign_rhs1 (stmt);
3894 tree op2 = gimple_assign_rhs2 (stmt);
3896 imm_use_iterator use_iter;
3897 gimple *use_stmt;
3898 auto_vec<gimple *> stmts;
3900 gimple *top_stmt = stmt;
3901 basic_block top_bb = gimple_bb (stmt);
3903 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3904 at-least stmt and possibly other trunc_div/trunc_mod stmts
3905 having same operands as stmt. */
3907 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3909 if (is_gimple_assign (use_stmt)
3910 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3911 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3912 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3913 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3915 if (stmt_can_throw_internal (use_stmt))
3916 continue;
3918 basic_block bb = gimple_bb (use_stmt);
3920 if (bb == top_bb)
3922 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3923 top_stmt = use_stmt;
3925 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3927 top_bb = bb;
3928 top_stmt = use_stmt;
3933 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3934 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3936 stmts.safe_push (top_stmt);
3937 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3939 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3940 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3941 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3942 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3944 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3946 if (is_gimple_assign (use_stmt)
3947 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3948 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3949 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3950 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3952 if (use_stmt == top_stmt
3953 || stmt_can_throw_internal (use_stmt)
3954 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3955 continue;
3957 stmts.safe_push (use_stmt);
3958 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3959 div_seen = true;
3963 if (!div_seen)
3964 return false;
3966 /* Part 3: Create libcall to internal fn DIVMOD:
3967 divmod_tmp = DIVMOD (op1, op2). */
3969 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3970 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3971 call_stmt, "divmod_tmp");
3972 gimple_call_set_lhs (call_stmt, res);
3974 /* Insert the call before top_stmt. */
3975 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3976 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3978 widen_mul_stats.divmod_calls_inserted++;
3980 /* Update all statements in stmts vector:
3981 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3982 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3984 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3986 tree new_rhs;
3988 switch (gimple_assign_rhs_code (use_stmt))
3990 case TRUNC_DIV_EXPR:
3991 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
3992 break;
3994 case TRUNC_MOD_EXPR:
3995 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
3996 break;
3998 default:
3999 gcc_unreachable ();
4002 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
4003 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
4004 update_stmt (use_stmt);
4007 return true;
4010 /* Find integer multiplications where the operands are extended from
4011 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4012 where appropriate. */
4014 namespace {
4016 const pass_data pass_data_optimize_widening_mul =
4018 GIMPLE_PASS, /* type */
4019 "widening_mul", /* name */
4020 OPTGROUP_NONE, /* optinfo_flags */
4021 TV_NONE, /* tv_id */
4022 PROP_ssa, /* properties_required */
4023 0, /* properties_provided */
4024 0, /* properties_destroyed */
4025 0, /* todo_flags_start */
4026 TODO_update_ssa, /* todo_flags_finish */
4029 class pass_optimize_widening_mul : public gimple_opt_pass
4031 public:
4032 pass_optimize_widening_mul (gcc::context *ctxt)
4033 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
4036 /* opt_pass methods: */
4037 virtual bool gate (function *)
4039 return flag_expensive_optimizations && optimize;
4042 virtual unsigned int execute (function *);
4044 }; // class pass_optimize_widening_mul
4046 unsigned int
4047 pass_optimize_widening_mul::execute (function *fun)
4049 basic_block bb;
4050 bool cfg_changed = false;
4052 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
4053 calculate_dominance_info (CDI_DOMINATORS);
4054 renumber_gimple_stmt_uids ();
4056 FOR_EACH_BB_FN (bb, fun)
4058 gimple_stmt_iterator gsi;
4060 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4062 gimple *stmt = gsi_stmt (gsi);
4063 enum tree_code code;
4065 if (is_gimple_assign (stmt))
4067 code = gimple_assign_rhs_code (stmt);
4068 switch (code)
4070 case MULT_EXPR:
4071 if (!convert_mult_to_widen (stmt, &gsi)
4072 && convert_mult_to_fma (stmt,
4073 gimple_assign_rhs1 (stmt),
4074 gimple_assign_rhs2 (stmt)))
4076 gsi_remove (&gsi, true);
4077 release_defs (stmt);
4078 continue;
4080 break;
4082 case PLUS_EXPR:
4083 case MINUS_EXPR:
4084 if (!convert_plusminus_to_widen (&gsi, stmt, code))
4085 match_uaddsub_overflow (&gsi, stmt, code);
4086 break;
4088 case TRUNC_MOD_EXPR:
4089 convert_to_divmod (as_a<gassign *> (stmt));
4090 break;
4092 default:;
4095 else if (is_gimple_call (stmt)
4096 && gimple_call_lhs (stmt))
4098 tree fndecl = gimple_call_fndecl (stmt);
4099 if (fndecl
4100 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
4102 switch (DECL_FUNCTION_CODE (fndecl))
4104 case BUILT_IN_POWF:
4105 case BUILT_IN_POW:
4106 case BUILT_IN_POWL:
4107 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
4108 && real_equal
4109 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
4110 &dconst2)
4111 && convert_mult_to_fma (stmt,
4112 gimple_call_arg (stmt, 0),
4113 gimple_call_arg (stmt, 0)))
4115 unlink_stmt_vdef (stmt);
4116 if (gsi_remove (&gsi, true)
4117 && gimple_purge_dead_eh_edges (bb))
4118 cfg_changed = true;
4119 release_defs (stmt);
4120 continue;
4122 break;
4124 default:;
4128 gsi_next (&gsi);
4132 statistics_counter_event (fun, "widening multiplications inserted",
4133 widen_mul_stats.widen_mults_inserted);
4134 statistics_counter_event (fun, "widening maccs inserted",
4135 widen_mul_stats.maccs_inserted);
4136 statistics_counter_event (fun, "fused multiply-adds inserted",
4137 widen_mul_stats.fmas_inserted);
4138 statistics_counter_event (fun, "divmod calls inserted",
4139 widen_mul_stats.divmod_calls_inserted);
4141 return cfg_changed ? TODO_cleanup_cfg : 0;
4144 } // anon namespace
4146 gimple_opt_pass *
4147 make_pass_optimize_widening_mul (gcc::context *ctxt)
4149 return new pass_optimize_widening_mul (ctxt);