PR target/81369
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blob7ac1659fa0670b7080685f3f9513939807073a63
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
121 division. */
122 struct occurrence {
123 /* The basic block represented by this structure. */
124 basic_block bb;
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
127 inserted in BB. */
128 tree recip_def;
130 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
131 was inserted in BB. */
132 gimple *recip_def_stmt;
134 /* Pointer to a list of "struct occurrence"s for blocks dominated
135 by BB. */
136 struct occurrence *children;
138 /* Pointer to the next "struct occurrence"s in the list of blocks
139 sharing a common dominator. */
140 struct occurrence *next;
142 /* The number of divisions that are in BB before compute_merit. The
143 number of divisions that are in BB or post-dominate it after
144 compute_merit. */
145 int num_divisions;
147 /* True if the basic block has a division, false if it is a common
148 dominator for basic blocks that do. If it is false and trapping
149 math is active, BB is not a candidate for inserting a reciprocal. */
150 bool bb_has_division;
153 static struct
155 /* Number of 1.0/X ops inserted. */
156 int rdivs_inserted;
158 /* Number of 1.0/FUNC ops inserted. */
159 int rfuncs_inserted;
160 } reciprocal_stats;
162 static struct
164 /* Number of cexpi calls inserted. */
165 int inserted;
166 } sincos_stats;
168 static struct
170 /* Number of hand-written 16-bit nop / bswaps found. */
171 int found_16bit;
173 /* Number of hand-written 32-bit nop / bswaps found. */
174 int found_32bit;
176 /* Number of hand-written 64-bit nop / bswaps found. */
177 int found_64bit;
178 } nop_stats, bswap_stats;
180 static struct
182 /* Number of widening multiplication ops inserted. */
183 int widen_mults_inserted;
185 /* Number of integer multiply-and-accumulate ops inserted. */
186 int maccs_inserted;
188 /* Number of fp fused multiply-add ops inserted. */
189 int fmas_inserted;
191 /* Number of divmod calls inserted. */
192 int divmod_calls_inserted;
193 } widen_mul_stats;
195 /* The instance of "struct occurrence" representing the highest
196 interesting block in the dominator tree. */
197 static struct occurrence *occ_head;
199 /* Allocation pool for getting instances of "struct occurrence". */
200 static object_allocator<occurrence> *occ_pool;
204 /* Allocate and return a new struct occurrence for basic block BB, and
205 whose children list is headed by CHILDREN. */
206 static struct occurrence *
207 occ_new (basic_block bb, struct occurrence *children)
209 struct occurrence *occ;
211 bb->aux = occ = occ_pool->allocate ();
212 memset (occ, 0, sizeof (struct occurrence));
214 occ->bb = bb;
215 occ->children = children;
216 return occ;
220 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
221 list of "struct occurrence"s, one per basic block, having IDOM as
222 their common dominator.
224 We try to insert NEW_OCC as deep as possible in the tree, and we also
225 insert any other block that is a common dominator for BB and one
226 block already in the tree. */
228 static void
229 insert_bb (struct occurrence *new_occ, basic_block idom,
230 struct occurrence **p_head)
232 struct occurrence *occ, **p_occ;
234 for (p_occ = p_head; (occ = *p_occ) != NULL; )
236 basic_block bb = new_occ->bb, occ_bb = occ->bb;
237 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
238 if (dom == bb)
240 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
241 from its list. */
242 *p_occ = occ->next;
243 occ->next = new_occ->children;
244 new_occ->children = occ;
246 /* Try the next block (it may as well be dominated by BB). */
249 else if (dom == occ_bb)
251 /* OCC_BB dominates BB. Tail recurse to look deeper. */
252 insert_bb (new_occ, dom, &occ->children);
253 return;
256 else if (dom != idom)
258 gcc_assert (!dom->aux);
260 /* There is a dominator between IDOM and BB, add it and make
261 two children out of NEW_OCC and OCC. First, remove OCC from
262 its list. */
263 *p_occ = occ->next;
264 new_occ->next = occ;
265 occ->next = NULL;
267 /* None of the previous blocks has DOM as a dominator: if we tail
268 recursed, we would reexamine them uselessly. Just switch BB with
269 DOM, and go on looking for blocks dominated by DOM. */
270 new_occ = occ_new (dom, new_occ);
273 else
275 /* Nothing special, go on with the next element. */
276 p_occ = &occ->next;
280 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
281 new_occ->next = *p_head;
282 *p_head = new_occ;
285 /* Register that we found a division in BB. */
287 static inline void
288 register_division_in (basic_block bb)
290 struct occurrence *occ;
292 occ = (struct occurrence *) bb->aux;
293 if (!occ)
295 occ = occ_new (bb, NULL);
296 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
299 occ->bb_has_division = true;
300 occ->num_divisions++;
304 /* Compute the number of divisions that postdominate each block in OCC and
305 its children. */
307 static void
308 compute_merit (struct occurrence *occ)
310 struct occurrence *occ_child;
311 basic_block dom = occ->bb;
313 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
315 basic_block bb;
316 if (occ_child->children)
317 compute_merit (occ_child);
319 if (flag_exceptions)
320 bb = single_noncomplex_succ (dom);
321 else
322 bb = dom;
324 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
325 occ->num_divisions += occ_child->num_divisions;
330 /* Return whether USE_STMT is a floating-point division by DEF. */
331 static inline bool
332 is_division_by (gimple *use_stmt, tree def)
334 return is_gimple_assign (use_stmt)
335 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
336 && gimple_assign_rhs2 (use_stmt) == def
337 /* Do not recognize x / x as valid division, as we are getting
338 confused later by replacing all immediate uses x in such
339 a stmt. */
340 && gimple_assign_rhs1 (use_stmt) != def;
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345 the given basic block. The field may be left NULL, of course,
346 if it is not possible or profitable to do the optimization.
348 DEF_BSI is an iterator pointing at the statement defining DEF.
349 If RECIP_DEF is set, a dominator already has a computation that can
350 be used. */
352 static void
353 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
354 tree def, tree recip_def, int threshold)
356 tree type;
357 gassign *new_stmt;
358 gimple_stmt_iterator gsi;
359 struct occurrence *occ_child;
361 if (!recip_def
362 && (occ->bb_has_division || !flag_trapping_math)
363 && occ->num_divisions >= threshold)
365 /* Make a variable with the replacement and substitute it. */
366 type = TREE_TYPE (def);
367 recip_def = create_tmp_reg (type, "reciptmp");
368 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
369 build_one_cst (type), def);
371 if (occ->bb_has_division)
373 /* Case 1: insert before an existing division. */
374 gsi = gsi_after_labels (occ->bb);
375 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
376 gsi_next (&gsi);
378 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
380 else if (def_gsi && occ->bb == def_gsi->bb)
382 /* Case 2: insert right after the definition. Note that this will
383 never happen if the definition statement can throw, because in
384 that case the sole successor of the statement's basic block will
385 dominate all the uses as well. */
386 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
388 else
390 /* Case 3: insert in a basic block not containing defs/uses. */
391 gsi = gsi_after_labels (occ->bb);
392 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
395 reciprocal_stats.rdivs_inserted++;
397 occ->recip_def_stmt = new_stmt;
400 occ->recip_def = recip_def;
401 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
402 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
407 possible. */
409 static inline void
410 replace_reciprocal (use_operand_p use_p)
412 gimple *use_stmt = USE_STMT (use_p);
413 basic_block bb = gimple_bb (use_stmt);
414 struct occurrence *occ = (struct occurrence *) bb->aux;
416 if (optimize_bb_for_speed_p (bb)
417 && occ->recip_def && use_stmt != occ->recip_def_stmt)
419 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
420 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
421 SET_USE (use_p, occ->recip_def);
422 fold_stmt_inplace (&gsi);
423 update_stmt (use_stmt);
428 /* Free OCC and return one more "struct occurrence" to be freed. */
430 static struct occurrence *
431 free_bb (struct occurrence *occ)
433 struct occurrence *child, *next;
435 /* First get the two pointers hanging off OCC. */
436 next = occ->next;
437 child = occ->children;
438 occ->bb->aux = NULL;
439 occ_pool->remove (occ);
441 /* Now ensure that we don't recurse unless it is necessary. */
442 if (!child)
443 return next;
444 else
446 while (next)
447 next = free_bb (next);
449 return child;
454 /* Look for floating-point divisions among DEF's uses, and try to
455 replace them by multiplications with the reciprocal. Add
456 as many statements computing the reciprocal as needed.
458 DEF must be a GIMPLE register of a floating-point type. */
460 static void
461 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
463 use_operand_p use_p;
464 imm_use_iterator use_iter;
465 struct occurrence *occ;
466 int count = 0, threshold;
468 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
470 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
472 gimple *use_stmt = USE_STMT (use_p);
473 if (is_division_by (use_stmt, def))
475 register_division_in (gimple_bb (use_stmt));
476 count++;
480 /* Do the expensive part only if we can hope to optimize something. */
481 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
482 if (count >= threshold)
484 gimple *use_stmt;
485 for (occ = occ_head; occ; occ = occ->next)
487 compute_merit (occ);
488 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
491 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
493 if (is_division_by (use_stmt, def))
495 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
496 replace_reciprocal (use_p);
501 for (occ = occ_head; occ; )
502 occ = free_bb (occ);
504 occ_head = NULL;
507 /* Return an internal function that implements the reciprocal of CALL,
508 or IFN_LAST if there is no such function that the target supports. */
510 internal_fn
511 internal_fn_reciprocal (gcall *call)
513 internal_fn ifn;
515 switch (gimple_call_combined_fn (call))
517 CASE_CFN_SQRT:
518 ifn = IFN_RSQRT;
519 break;
521 default:
522 return IFN_LAST;
525 tree_pair types = direct_internal_fn_types (ifn, call);
526 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
527 return IFN_LAST;
529 return ifn;
532 /* Go through all the floating-point SSA_NAMEs, and call
533 execute_cse_reciprocals_1 on each of them. */
534 namespace {
536 const pass_data pass_data_cse_reciprocals =
538 GIMPLE_PASS, /* type */
539 "recip", /* name */
540 OPTGROUP_NONE, /* optinfo_flags */
541 TV_NONE, /* tv_id */
542 PROP_ssa, /* properties_required */
543 0, /* properties_provided */
544 0, /* properties_destroyed */
545 0, /* todo_flags_start */
546 TODO_update_ssa, /* todo_flags_finish */
549 class pass_cse_reciprocals : public gimple_opt_pass
551 public:
552 pass_cse_reciprocals (gcc::context *ctxt)
553 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
556 /* opt_pass methods: */
557 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
558 virtual unsigned int execute (function *);
560 }; // class pass_cse_reciprocals
562 unsigned int
563 pass_cse_reciprocals::execute (function *fun)
565 basic_block bb;
566 tree arg;
568 occ_pool = new object_allocator<occurrence> ("dominators for recip");
570 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
571 calculate_dominance_info (CDI_DOMINATORS);
572 calculate_dominance_info (CDI_POST_DOMINATORS);
574 if (flag_checking)
575 FOR_EACH_BB_FN (bb, fun)
576 gcc_assert (!bb->aux);
578 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
579 if (FLOAT_TYPE_P (TREE_TYPE (arg))
580 && is_gimple_reg (arg))
582 tree name = ssa_default_def (fun, arg);
583 if (name)
584 execute_cse_reciprocals_1 (NULL, name);
587 FOR_EACH_BB_FN (bb, fun)
589 tree def;
591 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
592 gsi_next (&gsi))
594 gphi *phi = gsi.phi ();
595 def = PHI_RESULT (phi);
596 if (! virtual_operand_p (def)
597 && FLOAT_TYPE_P (TREE_TYPE (def)))
598 execute_cse_reciprocals_1 (NULL, def);
601 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
602 gsi_next (&gsi))
604 gimple *stmt = gsi_stmt (gsi);
606 if (gimple_has_lhs (stmt)
607 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
608 && FLOAT_TYPE_P (TREE_TYPE (def))
609 && TREE_CODE (def) == SSA_NAME)
610 execute_cse_reciprocals_1 (&gsi, def);
613 if (optimize_bb_for_size_p (bb))
614 continue;
616 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
617 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
618 gsi_next (&gsi))
620 gimple *stmt = gsi_stmt (gsi);
622 if (is_gimple_assign (stmt)
623 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
625 tree arg1 = gimple_assign_rhs2 (stmt);
626 gimple *stmt1;
628 if (TREE_CODE (arg1) != SSA_NAME)
629 continue;
631 stmt1 = SSA_NAME_DEF_STMT (arg1);
633 if (is_gimple_call (stmt1)
634 && gimple_call_lhs (stmt1))
636 bool fail;
637 imm_use_iterator ui;
638 use_operand_p use_p;
639 tree fndecl = NULL_TREE;
641 gcall *call = as_a <gcall *> (stmt1);
642 internal_fn ifn = internal_fn_reciprocal (call);
643 if (ifn == IFN_LAST)
645 fndecl = gimple_call_fndecl (call);
646 if (!fndecl
647 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
648 continue;
649 fndecl = targetm.builtin_reciprocal (fndecl);
650 if (!fndecl)
651 continue;
654 /* Check that all uses of the SSA name are divisions,
655 otherwise replacing the defining statement will do
656 the wrong thing. */
657 fail = false;
658 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
660 gimple *stmt2 = USE_STMT (use_p);
661 if (is_gimple_debug (stmt2))
662 continue;
663 if (!is_gimple_assign (stmt2)
664 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
665 || gimple_assign_rhs1 (stmt2) == arg1
666 || gimple_assign_rhs2 (stmt2) != arg1)
668 fail = true;
669 break;
672 if (fail)
673 continue;
675 gimple_replace_ssa_lhs (call, arg1);
676 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
678 auto_vec<tree, 4> args;
679 for (unsigned int i = 0;
680 i < gimple_call_num_args (call); i++)
681 args.safe_push (gimple_call_arg (call, i));
682 gcall *stmt2;
683 if (ifn == IFN_LAST)
684 stmt2 = gimple_build_call_vec (fndecl, args);
685 else
686 stmt2 = gimple_build_call_internal_vec (ifn, args);
687 gimple_call_set_lhs (stmt2, arg1);
688 if (gimple_vdef (call))
690 gimple_set_vdef (stmt2, gimple_vdef (call));
691 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
693 gimple_set_vuse (stmt2, gimple_vuse (call));
694 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
695 gsi_replace (&gsi2, stmt2, true);
697 else
699 if (ifn == IFN_LAST)
700 gimple_call_set_fndecl (call, fndecl);
701 else
702 gimple_call_set_internal_fn (call, ifn);
703 update_stmt (call);
705 reciprocal_stats.rfuncs_inserted++;
707 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
709 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
710 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
711 fold_stmt_inplace (&gsi);
712 update_stmt (stmt);
719 statistics_counter_event (fun, "reciprocal divs inserted",
720 reciprocal_stats.rdivs_inserted);
721 statistics_counter_event (fun, "reciprocal functions inserted",
722 reciprocal_stats.rfuncs_inserted);
724 free_dominance_info (CDI_DOMINATORS);
725 free_dominance_info (CDI_POST_DOMINATORS);
726 delete occ_pool;
727 return 0;
730 } // anon namespace
732 gimple_opt_pass *
733 make_pass_cse_reciprocals (gcc::context *ctxt)
735 return new pass_cse_reciprocals (ctxt);
738 /* Records an occurrence at statement USE_STMT in the vector of trees
739 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
740 is not yet initialized. Returns true if the occurrence was pushed on
741 the vector. Adjusts *TOP_BB to be the basic block dominating all
742 statements in the vector. */
744 static bool
745 maybe_record_sincos (vec<gimple *> *stmts,
746 basic_block *top_bb, gimple *use_stmt)
748 basic_block use_bb = gimple_bb (use_stmt);
749 if (*top_bb
750 && (*top_bb == use_bb
751 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
752 stmts->safe_push (use_stmt);
753 else if (!*top_bb
754 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
756 stmts->safe_push (use_stmt);
757 *top_bb = use_bb;
759 else
760 return false;
762 return true;
765 /* Look for sin, cos and cexpi calls with the same argument NAME and
766 create a single call to cexpi CSEing the result in this case.
767 We first walk over all immediate uses of the argument collecting
768 statements that we can CSE in a vector and in a second pass replace
769 the statement rhs with a REALPART or IMAGPART expression on the
770 result of the cexpi call we insert before the use statement that
771 dominates all other candidates. */
773 static bool
774 execute_cse_sincos_1 (tree name)
776 gimple_stmt_iterator gsi;
777 imm_use_iterator use_iter;
778 tree fndecl, res, type;
779 gimple *def_stmt, *use_stmt, *stmt;
780 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
781 auto_vec<gimple *> stmts;
782 basic_block top_bb = NULL;
783 int i;
784 bool cfg_changed = false;
786 type = TREE_TYPE (name);
787 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
789 if (gimple_code (use_stmt) != GIMPLE_CALL
790 || !gimple_call_lhs (use_stmt))
791 continue;
793 switch (gimple_call_combined_fn (use_stmt))
795 CASE_CFN_COS:
796 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
797 break;
799 CASE_CFN_SIN:
800 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
801 break;
803 CASE_CFN_CEXPI:
804 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
805 break;
807 default:;
811 if (seen_cos + seen_sin + seen_cexpi <= 1)
812 return false;
814 /* Simply insert cexpi at the beginning of top_bb but not earlier than
815 the name def statement. */
816 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
817 if (!fndecl)
818 return false;
819 stmt = gimple_build_call (fndecl, 1, name);
820 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
821 gimple_call_set_lhs (stmt, res);
823 def_stmt = SSA_NAME_DEF_STMT (name);
824 if (!SSA_NAME_IS_DEFAULT_DEF (name)
825 && gimple_code (def_stmt) != GIMPLE_PHI
826 && gimple_bb (def_stmt) == top_bb)
828 gsi = gsi_for_stmt (def_stmt);
829 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
831 else
833 gsi = gsi_after_labels (top_bb);
834 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
836 sincos_stats.inserted++;
838 /* And adjust the recorded old call sites. */
839 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
841 tree rhs = NULL;
843 switch (gimple_call_combined_fn (use_stmt))
845 CASE_CFN_COS:
846 rhs = fold_build1 (REALPART_EXPR, type, res);
847 break;
849 CASE_CFN_SIN:
850 rhs = fold_build1 (IMAGPART_EXPR, type, res);
851 break;
853 CASE_CFN_CEXPI:
854 rhs = res;
855 break;
857 default:;
858 gcc_unreachable ();
861 /* Replace call with a copy. */
862 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
864 gsi = gsi_for_stmt (use_stmt);
865 gsi_replace (&gsi, stmt, true);
866 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
867 cfg_changed = true;
870 return cfg_changed;
873 /* To evaluate powi(x,n), the floating point value x raised to the
874 constant integer exponent n, we use a hybrid algorithm that
875 combines the "window method" with look-up tables. For an
876 introduction to exponentiation algorithms and "addition chains",
877 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
878 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
879 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
880 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
882 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
883 multiplications to inline before calling the system library's pow
884 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
885 so this default never requires calling pow, powf or powl. */
887 #ifndef POWI_MAX_MULTS
888 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
889 #endif
891 /* The size of the "optimal power tree" lookup table. All
892 exponents less than this value are simply looked up in the
893 powi_table below. This threshold is also used to size the
894 cache of pseudo registers that hold intermediate results. */
895 #define POWI_TABLE_SIZE 256
897 /* The size, in bits of the window, used in the "window method"
898 exponentiation algorithm. This is equivalent to a radix of
899 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
900 #define POWI_WINDOW_SIZE 3
902 /* The following table is an efficient representation of an
903 "optimal power tree". For each value, i, the corresponding
904 value, j, in the table states than an optimal evaluation
905 sequence for calculating pow(x,i) can be found by evaluating
906 pow(x,j)*pow(x,i-j). An optimal power tree for the first
907 100 integers is given in Knuth's "Seminumerical algorithms". */
909 static const unsigned char powi_table[POWI_TABLE_SIZE] =
911 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
912 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
913 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
914 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
915 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
916 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
917 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
918 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
919 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
920 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
921 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
922 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
923 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
924 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
925 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
926 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
927 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
928 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
929 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
930 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
931 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
932 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
933 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
934 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
935 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
936 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
937 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
938 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
939 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
940 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
941 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
942 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
946 /* Return the number of multiplications required to calculate
947 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
948 subroutine of powi_cost. CACHE is an array indicating
949 which exponents have already been calculated. */
951 static int
952 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
954 /* If we've already calculated this exponent, then this evaluation
955 doesn't require any additional multiplications. */
956 if (cache[n])
957 return 0;
959 cache[n] = true;
960 return powi_lookup_cost (n - powi_table[n], cache)
961 + powi_lookup_cost (powi_table[n], cache) + 1;
964 /* Return the number of multiplications required to calculate
965 powi(x,n) for an arbitrary x, given the exponent N. This
966 function needs to be kept in sync with powi_as_mults below. */
968 static int
969 powi_cost (HOST_WIDE_INT n)
971 bool cache[POWI_TABLE_SIZE];
972 unsigned HOST_WIDE_INT digit;
973 unsigned HOST_WIDE_INT val;
974 int result;
976 if (n == 0)
977 return 0;
979 /* Ignore the reciprocal when calculating the cost. */
980 val = (n < 0) ? -n : n;
982 /* Initialize the exponent cache. */
983 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
984 cache[1] = true;
986 result = 0;
988 while (val >= POWI_TABLE_SIZE)
990 if (val & 1)
992 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
993 result += powi_lookup_cost (digit, cache)
994 + POWI_WINDOW_SIZE + 1;
995 val >>= POWI_WINDOW_SIZE;
997 else
999 val >>= 1;
1000 result++;
1004 return result + powi_lookup_cost (val, cache);
1007 /* Recursive subroutine of powi_as_mults. This function takes the
1008 array, CACHE, of already calculated exponents and an exponent N and
1009 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1011 static tree
1012 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1013 HOST_WIDE_INT n, tree *cache)
1015 tree op0, op1, ssa_target;
1016 unsigned HOST_WIDE_INT digit;
1017 gassign *mult_stmt;
1019 if (n < POWI_TABLE_SIZE && cache[n])
1020 return cache[n];
1022 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1024 if (n < POWI_TABLE_SIZE)
1026 cache[n] = ssa_target;
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1030 else if (n & 1)
1032 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1033 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1034 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1036 else
1038 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1039 op1 = op0;
1042 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1043 gimple_set_location (mult_stmt, loc);
1044 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1046 return ssa_target;
1049 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1050 This function needs to be kept in sync with powi_cost above. */
1052 static tree
1053 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1054 tree arg0, HOST_WIDE_INT n)
1056 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1057 gassign *div_stmt;
1058 tree target;
1060 if (n == 0)
1061 return build_real (type, dconst1);
1063 memset (cache, 0, sizeof (cache));
1064 cache[1] = arg0;
1066 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1067 if (n >= 0)
1068 return result;
1070 /* If the original exponent was negative, reciprocate the result. */
1071 target = make_temp_ssa_name (type, NULL, "powmult");
1072 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1073 build_real (type, dconst1), result);
1074 gimple_set_location (div_stmt, loc);
1075 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1077 return target;
1080 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1081 location info LOC. If the arguments are appropriate, create an
1082 equivalent sequence of statements prior to GSI using an optimal
1083 number of multiplications, and return an expession holding the
1084 result. */
1086 static tree
1087 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1088 tree arg0, HOST_WIDE_INT n)
1090 /* Avoid largest negative number. */
1091 if (n != -n
1092 && ((n >= -1 && n <= 2)
1093 || (optimize_function_for_speed_p (cfun)
1094 && powi_cost (n) <= POWI_MAX_MULTS)))
1095 return powi_as_mults (gsi, loc, arg0, n);
1097 return NULL_TREE;
1100 /* Build a gimple call statement that calls FN with argument ARG.
1101 Set the lhs of the call statement to a fresh SSA name. Insert the
1102 statement prior to GSI's current position, and return the fresh
1103 SSA name. */
1105 static tree
1106 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1107 tree fn, tree arg)
1109 gcall *call_stmt;
1110 tree ssa_target;
1112 call_stmt = gimple_build_call (fn, 1, arg);
1113 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1114 gimple_set_lhs (call_stmt, ssa_target);
1115 gimple_set_location (call_stmt, loc);
1116 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1118 return ssa_target;
1121 /* Build a gimple binary operation with the given CODE and arguments
1122 ARG0, ARG1, assigning the result to a new SSA name for variable
1123 TARGET. Insert the statement prior to GSI's current position, and
1124 return the fresh SSA name.*/
1126 static tree
1127 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1128 const char *name, enum tree_code code,
1129 tree arg0, tree arg1)
1131 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1132 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1133 gimple_set_location (stmt, loc);
1134 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1135 return result;
1138 /* Build a gimple reference operation with the given CODE and argument
1139 ARG, assigning the result to a new SSA name of TYPE with NAME.
1140 Insert the statement prior to GSI's current position, and return
1141 the fresh SSA name. */
1143 static inline tree
1144 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1145 const char *name, enum tree_code code, tree arg0)
1147 tree result = make_temp_ssa_name (type, NULL, name);
1148 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1149 gimple_set_location (stmt, loc);
1150 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1151 return result;
1154 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1155 prior to GSI's current position, and return the fresh SSA name. */
1157 static tree
1158 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1159 tree type, tree val)
1161 tree result = make_ssa_name (type);
1162 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1163 gimple_set_location (stmt, loc);
1164 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1165 return result;
1168 struct pow_synth_sqrt_info
1170 bool *factors;
1171 unsigned int deepest;
1172 unsigned int num_mults;
1175 /* Return true iff the real value C can be represented as a
1176 sum of powers of 0.5 up to N. That is:
1177 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1178 Record in INFO the various parameters of the synthesis algorithm such
1179 as the factors a[i], the maximum 0.5 power and the number of
1180 multiplications that will be required. */
1182 bool
1183 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1184 struct pow_synth_sqrt_info *info)
1186 REAL_VALUE_TYPE factor = dconsthalf;
1187 REAL_VALUE_TYPE remainder = c;
1189 info->deepest = 0;
1190 info->num_mults = 0;
1191 memset (info->factors, 0, n * sizeof (bool));
1193 for (unsigned i = 0; i < n; i++)
1195 REAL_VALUE_TYPE res;
1197 /* If something inexact happened bail out now. */
1198 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1199 return false;
1201 /* We have hit zero. The number is representable as a sum
1202 of powers of 0.5. */
1203 if (real_equal (&res, &dconst0))
1205 info->factors[i] = true;
1206 info->deepest = i + 1;
1207 return true;
1209 else if (!REAL_VALUE_NEGATIVE (res))
1211 remainder = res;
1212 info->factors[i] = true;
1213 info->num_mults++;
1215 else
1216 info->factors[i] = false;
1218 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1220 return false;
1223 /* Return the tree corresponding to FN being applied
1224 to ARG N times at GSI and LOC.
1225 Look up previous results from CACHE if need be.
1226 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1228 static tree
1229 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1230 tree fn, location_t loc, tree *cache)
1232 tree res = cache[n];
1233 if (!res)
1235 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1236 res = build_and_insert_call (gsi, loc, fn, prev);
1237 cache[n] = res;
1240 return res;
1243 /* Print to STREAM the repeated application of function FNAME to ARG
1244 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1245 "foo (foo (x))". */
1247 static void
1248 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1249 unsigned int n)
1251 if (n == 0)
1252 fprintf (stream, "%s", arg);
1253 else
1255 fprintf (stream, "%s (", fname);
1256 print_nested_fn (stream, fname, arg, n - 1);
1257 fprintf (stream, ")");
1261 /* Print to STREAM the fractional sequence of sqrt chains
1262 applied to ARG, described by INFO. Used for the dump file. */
1264 static void
1265 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1266 struct pow_synth_sqrt_info *info)
1268 for (unsigned int i = 0; i < info->deepest; i++)
1270 bool is_set = info->factors[i];
1271 if (is_set)
1273 print_nested_fn (stream, "sqrt", arg, i + 1);
1274 if (i != info->deepest - 1)
1275 fprintf (stream, " * ");
1280 /* Print to STREAM a representation of raising ARG to an integer
1281 power N. Used for the dump file. */
1283 static void
1284 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1286 if (n > 1)
1287 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1288 else if (n == 1)
1289 fprintf (stream, "%s", arg);
1292 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1293 square roots. Place at GSI and LOC. Limit the maximum depth
1294 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1295 result of the expanded sequence or NULL_TREE if the expansion failed.
1297 This routine assumes that ARG1 is a real number with a fractional part
1298 (the integer exponent case will have been handled earlier in
1299 gimple_expand_builtin_pow).
1301 For ARG1 > 0.0:
1302 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1303 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1304 FRAC_PART == ARG1 - WHOLE_PART:
1305 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1306 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1307 if it can be expressed as such, that is if FRAC_PART satisfies:
1308 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1309 where integer a[i] is either 0 or 1.
1311 Example:
1312 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1313 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1315 For ARG1 < 0.0 there are two approaches:
1316 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1317 is calculated as above.
1319 Example:
1320 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1321 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1323 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1324 FRAC_PART := ARG1 - WHOLE_PART
1325 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1326 Example:
1327 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1328 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1330 For ARG1 < 0.0 we choose between (A) and (B) depending on
1331 how many multiplications we'd have to do.
1332 So, for the example in (B): POW (x, -5.875), if we were to
1333 follow algorithm (A) we would produce:
1334 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1335 which contains more multiplications than approach (B).
1337 Hopefully, this approach will eliminate potentially expensive POW library
1338 calls when unsafe floating point math is enabled and allow the compiler to
1339 further optimise the multiplies, square roots and divides produced by this
1340 function. */
1342 static tree
1343 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1344 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1346 tree type = TREE_TYPE (arg0);
1347 machine_mode mode = TYPE_MODE (type);
1348 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1349 bool one_over = true;
1351 if (!sqrtfn)
1352 return NULL_TREE;
1354 if (TREE_CODE (arg1) != REAL_CST)
1355 return NULL_TREE;
1357 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1359 gcc_assert (max_depth > 0);
1360 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1362 struct pow_synth_sqrt_info synth_info;
1363 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1364 synth_info.deepest = 0;
1365 synth_info.num_mults = 0;
1367 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1368 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1370 /* The whole and fractional parts of exp. */
1371 REAL_VALUE_TYPE whole_part;
1372 REAL_VALUE_TYPE frac_part;
1374 real_floor (&whole_part, mode, &exp);
1375 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1378 REAL_VALUE_TYPE ceil_whole = dconst0;
1379 REAL_VALUE_TYPE ceil_fract = dconst0;
1381 if (neg_exp)
1383 real_ceil (&ceil_whole, mode, &exp);
1384 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1387 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1388 return NULL_TREE;
1390 /* Check whether it's more profitable to not use 1.0 / ... */
1391 if (neg_exp)
1393 struct pow_synth_sqrt_info alt_synth_info;
1394 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1395 alt_synth_info.deepest = 0;
1396 alt_synth_info.num_mults = 0;
1398 if (representable_as_half_series_p (ceil_fract, max_depth,
1399 &alt_synth_info)
1400 && alt_synth_info.deepest <= synth_info.deepest
1401 && alt_synth_info.num_mults < synth_info.num_mults)
1403 whole_part = ceil_whole;
1404 frac_part = ceil_fract;
1405 synth_info.deepest = alt_synth_info.deepest;
1406 synth_info.num_mults = alt_synth_info.num_mults;
1407 memcpy (synth_info.factors, alt_synth_info.factors,
1408 (max_depth + 1) * sizeof (bool));
1409 one_over = false;
1413 HOST_WIDE_INT n = real_to_integer (&whole_part);
1414 REAL_VALUE_TYPE cint;
1415 real_from_integer (&cint, VOIDmode, n, SIGNED);
1417 if (!real_identical (&whole_part, &cint))
1418 return NULL_TREE;
1420 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1421 return NULL_TREE;
1423 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1425 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1427 /* Calculate the integer part of the exponent. */
1428 if (n > 1)
1430 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1431 if (!integer_res)
1432 return NULL_TREE;
1435 if (dump_file)
1437 char string[64];
1439 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1440 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1442 if (neg_exp)
1444 if (one_over)
1446 fprintf (dump_file, "1.0 / (");
1447 dump_integer_part (dump_file, "x", n);
1448 if (n > 0)
1449 fprintf (dump_file, " * ");
1450 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1451 fprintf (dump_file, ")");
1453 else
1455 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1456 fprintf (dump_file, " / (");
1457 dump_integer_part (dump_file, "x", n);
1458 fprintf (dump_file, ")");
1461 else
1463 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1464 if (n > 0)
1465 fprintf (dump_file, " * ");
1466 dump_integer_part (dump_file, "x", n);
1469 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1473 tree fract_res = NULL_TREE;
1474 cache[0] = arg0;
1476 /* Calculate the fractional part of the exponent. */
1477 for (unsigned i = 0; i < synth_info.deepest; i++)
1479 if (synth_info.factors[i])
1481 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1483 if (!fract_res)
1484 fract_res = sqrt_chain;
1486 else
1487 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1488 fract_res, sqrt_chain);
1492 tree res = NULL_TREE;
1494 if (neg_exp)
1496 if (one_over)
1498 if (n > 0)
1499 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1500 fract_res, integer_res);
1501 else
1502 res = fract_res;
1504 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1505 build_real (type, dconst1), res);
1507 else
1509 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1510 fract_res, integer_res);
1513 else
1514 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1515 fract_res, integer_res);
1516 return res;
1519 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1520 with location info LOC. If possible, create an equivalent and
1521 less expensive sequence of statements prior to GSI, and return an
1522 expession holding the result. */
1524 static tree
1525 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1526 tree arg0, tree arg1)
1528 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1529 REAL_VALUE_TYPE c2, dconst3;
1530 HOST_WIDE_INT n;
1531 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1532 machine_mode mode;
1533 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1534 bool hw_sqrt_exists, c_is_int, c2_is_int;
1536 dconst1_4 = dconst1;
1537 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1539 /* If the exponent isn't a constant, there's nothing of interest
1540 to be done. */
1541 if (TREE_CODE (arg1) != REAL_CST)
1542 return NULL_TREE;
1544 /* Don't perform the operation if flag_signaling_nans is on
1545 and the operand is a signaling NaN. */
1546 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1547 && ((TREE_CODE (arg0) == REAL_CST
1548 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1549 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1550 return NULL_TREE;
1552 /* If the exponent is equivalent to an integer, expand to an optimal
1553 multiplication sequence when profitable. */
1554 c = TREE_REAL_CST (arg1);
1555 n = real_to_integer (&c);
1556 real_from_integer (&cint, VOIDmode, n, SIGNED);
1557 c_is_int = real_identical (&c, &cint);
1559 if (c_is_int
1560 && ((n >= -1 && n <= 2)
1561 || (flag_unsafe_math_optimizations
1562 && speed_p
1563 && powi_cost (n) <= POWI_MAX_MULTS)))
1564 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1566 /* Attempt various optimizations using sqrt and cbrt. */
1567 type = TREE_TYPE (arg0);
1568 mode = TYPE_MODE (type);
1569 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1571 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1572 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1573 sqrt(-0) = -0. */
1574 if (sqrtfn
1575 && real_equal (&c, &dconsthalf)
1576 && !HONOR_SIGNED_ZEROS (mode))
1577 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1579 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1581 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1582 optimizations since 1./3. is not exactly representable. If x
1583 is negative and finite, the correct value of pow(x,1./3.) is
1584 a NaN with the "invalid" exception raised, because the value
1585 of 1./3. actually has an even denominator. The correct value
1586 of cbrt(x) is a negative real value. */
1587 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1588 dconst1_3 = real_value_truncate (mode, dconst_third ());
1590 if (flag_unsafe_math_optimizations
1591 && cbrtfn
1592 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1593 && real_equal (&c, &dconst1_3))
1594 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1596 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1597 if we don't have a hardware sqrt insn. */
1598 dconst1_6 = dconst1_3;
1599 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1601 if (flag_unsafe_math_optimizations
1602 && sqrtfn
1603 && cbrtfn
1604 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1605 && speed_p
1606 && hw_sqrt_exists
1607 && real_equal (&c, &dconst1_6))
1609 /* sqrt(x) */
1610 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1612 /* cbrt(sqrt(x)) */
1613 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1617 /* Attempt to expand the POW as a product of square root chains.
1618 Expand the 0.25 case even when otpimising for size. */
1619 if (flag_unsafe_math_optimizations
1620 && sqrtfn
1621 && hw_sqrt_exists
1622 && (speed_p || real_equal (&c, &dconst1_4))
1623 && !HONOR_SIGNED_ZEROS (mode))
1625 unsigned int max_depth = speed_p
1626 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1627 : 2;
1629 tree expand_with_sqrts
1630 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1632 if (expand_with_sqrts)
1633 return expand_with_sqrts;
1636 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1637 n = real_to_integer (&c2);
1638 real_from_integer (&cint, VOIDmode, n, SIGNED);
1639 c2_is_int = real_identical (&c2, &cint);
1641 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1643 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1644 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1646 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1647 different from pow(x, 1./3.) due to rounding and behavior with
1648 negative x, we need to constrain this transformation to unsafe
1649 math and positive x or finite math. */
1650 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1651 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1652 real_round (&c2, mode, &c2);
1653 n = real_to_integer (&c2);
1654 real_from_integer (&cint, VOIDmode, n, SIGNED);
1655 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1656 real_convert (&c2, mode, &c2);
1658 if (flag_unsafe_math_optimizations
1659 && cbrtfn
1660 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1661 && real_identical (&c2, &c)
1662 && !c2_is_int
1663 && optimize_function_for_speed_p (cfun)
1664 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1666 tree powi_x_ndiv3 = NULL_TREE;
1668 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1669 possible or profitable, give up. Skip the degenerate case when
1670 abs(n) < 3, where the result is always 1. */
1671 if (absu_hwi (n) >= 3)
1673 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1674 abs_hwi (n / 3));
1675 if (!powi_x_ndiv3)
1676 return NULL_TREE;
1679 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1680 as that creates an unnecessary variable. Instead, just produce
1681 either cbrt(x) or cbrt(x) * cbrt(x). */
1682 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1684 if (absu_hwi (n) % 3 == 1)
1685 powi_cbrt_x = cbrt_x;
1686 else
1687 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1688 cbrt_x, cbrt_x);
1690 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1691 if (absu_hwi (n) < 3)
1692 result = powi_cbrt_x;
1693 else
1694 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1695 powi_x_ndiv3, powi_cbrt_x);
1697 /* If n is negative, reciprocate the result. */
1698 if (n < 0)
1699 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1700 build_real (type, dconst1), result);
1702 return result;
1705 /* No optimizations succeeded. */
1706 return NULL_TREE;
1709 /* ARG is the argument to a cabs builtin call in GSI with location info
1710 LOC. Create a sequence of statements prior to GSI that calculates
1711 sqrt(R*R + I*I), where R and I are the real and imaginary components
1712 of ARG, respectively. Return an expression holding the result. */
1714 static tree
1715 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1717 tree real_part, imag_part, addend1, addend2, sum, result;
1718 tree type = TREE_TYPE (TREE_TYPE (arg));
1719 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1720 machine_mode mode = TYPE_MODE (type);
1722 if (!flag_unsafe_math_optimizations
1723 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1724 || !sqrtfn
1725 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1726 return NULL_TREE;
1728 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1729 REALPART_EXPR, arg);
1730 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1731 real_part, real_part);
1732 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1733 IMAGPART_EXPR, arg);
1734 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1735 imag_part, imag_part);
1736 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1737 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1739 return result;
1742 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1743 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1744 an optimal number of multiplies, when n is a constant. */
1746 namespace {
1748 const pass_data pass_data_cse_sincos =
1750 GIMPLE_PASS, /* type */
1751 "sincos", /* name */
1752 OPTGROUP_NONE, /* optinfo_flags */
1753 TV_NONE, /* tv_id */
1754 PROP_ssa, /* properties_required */
1755 PROP_gimple_opt_math, /* properties_provided */
1756 0, /* properties_destroyed */
1757 0, /* todo_flags_start */
1758 TODO_update_ssa, /* todo_flags_finish */
1761 class pass_cse_sincos : public gimple_opt_pass
1763 public:
1764 pass_cse_sincos (gcc::context *ctxt)
1765 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1768 /* opt_pass methods: */
1769 virtual bool gate (function *)
1771 /* We no longer require either sincos or cexp, since powi expansion
1772 piggybacks on this pass. */
1773 return optimize;
1776 virtual unsigned int execute (function *);
1778 }; // class pass_cse_sincos
1780 unsigned int
1781 pass_cse_sincos::execute (function *fun)
1783 basic_block bb;
1784 bool cfg_changed = false;
1786 calculate_dominance_info (CDI_DOMINATORS);
1787 memset (&sincos_stats, 0, sizeof (sincos_stats));
1789 FOR_EACH_BB_FN (bb, fun)
1791 gimple_stmt_iterator gsi;
1792 bool cleanup_eh = false;
1794 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1796 gimple *stmt = gsi_stmt (gsi);
1798 /* Only the last stmt in a bb could throw, no need to call
1799 gimple_purge_dead_eh_edges if we change something in the middle
1800 of a basic block. */
1801 cleanup_eh = false;
1803 if (is_gimple_call (stmt)
1804 && gimple_call_lhs (stmt))
1806 tree arg, arg0, arg1, result;
1807 HOST_WIDE_INT n;
1808 location_t loc;
1810 switch (gimple_call_combined_fn (stmt))
1812 CASE_CFN_COS:
1813 CASE_CFN_SIN:
1814 CASE_CFN_CEXPI:
1815 /* Make sure we have either sincos or cexp. */
1816 if (!targetm.libc_has_function (function_c99_math_complex)
1817 && !targetm.libc_has_function (function_sincos))
1818 break;
1820 arg = gimple_call_arg (stmt, 0);
1821 if (TREE_CODE (arg) == SSA_NAME)
1822 cfg_changed |= execute_cse_sincos_1 (arg);
1823 break;
1825 CASE_CFN_POW:
1826 arg0 = gimple_call_arg (stmt, 0);
1827 arg1 = gimple_call_arg (stmt, 1);
1829 loc = gimple_location (stmt);
1830 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1832 if (result)
1834 tree lhs = gimple_get_lhs (stmt);
1835 gassign *new_stmt = gimple_build_assign (lhs, result);
1836 gimple_set_location (new_stmt, loc);
1837 unlink_stmt_vdef (stmt);
1838 gsi_replace (&gsi, new_stmt, true);
1839 cleanup_eh = true;
1840 if (gimple_vdef (stmt))
1841 release_ssa_name (gimple_vdef (stmt));
1843 break;
1845 CASE_CFN_POWI:
1846 arg0 = gimple_call_arg (stmt, 0);
1847 arg1 = gimple_call_arg (stmt, 1);
1848 loc = gimple_location (stmt);
1850 if (real_minus_onep (arg0))
1852 tree t0, t1, cond, one, minus_one;
1853 gassign *stmt;
1855 t0 = TREE_TYPE (arg0);
1856 t1 = TREE_TYPE (arg1);
1857 one = build_real (t0, dconst1);
1858 minus_one = build_real (t0, dconstm1);
1860 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1861 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1862 arg1, build_int_cst (t1, 1));
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1866 result = make_temp_ssa_name (t0, NULL, "powi");
1867 stmt = gimple_build_assign (result, COND_EXPR, cond,
1868 minus_one, one);
1869 gimple_set_location (stmt, loc);
1870 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1872 else
1874 if (!tree_fits_shwi_p (arg1))
1875 break;
1877 n = tree_to_shwi (arg1);
1878 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1881 if (result)
1883 tree lhs = gimple_get_lhs (stmt);
1884 gassign *new_stmt = gimple_build_assign (lhs, result);
1885 gimple_set_location (new_stmt, loc);
1886 unlink_stmt_vdef (stmt);
1887 gsi_replace (&gsi, new_stmt, true);
1888 cleanup_eh = true;
1889 if (gimple_vdef (stmt))
1890 release_ssa_name (gimple_vdef (stmt));
1892 break;
1894 CASE_CFN_CABS:
1895 arg0 = gimple_call_arg (stmt, 0);
1896 loc = gimple_location (stmt);
1897 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1899 if (result)
1901 tree lhs = gimple_get_lhs (stmt);
1902 gassign *new_stmt = gimple_build_assign (lhs, result);
1903 gimple_set_location (new_stmt, loc);
1904 unlink_stmt_vdef (stmt);
1905 gsi_replace (&gsi, new_stmt, true);
1906 cleanup_eh = true;
1907 if (gimple_vdef (stmt))
1908 release_ssa_name (gimple_vdef (stmt));
1910 break;
1912 default:;
1916 if (cleanup_eh)
1917 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1920 statistics_counter_event (fun, "sincos statements inserted",
1921 sincos_stats.inserted);
1923 return cfg_changed ? TODO_cleanup_cfg : 0;
1926 } // anon namespace
1928 gimple_opt_pass *
1929 make_pass_cse_sincos (gcc::context *ctxt)
1931 return new pass_cse_sincos (ctxt);
1934 /* A symbolic number structure is used to detect byte permutation and selection
1935 patterns of a source. To achieve that, its field N contains an artificial
1936 number consisting of BITS_PER_MARKER sized markers tracking where does each
1937 byte come from in the source:
1939 0 - target byte has the value 0
1940 FF - target byte has an unknown value (eg. due to sign extension)
1941 1..size - marker value is the byte index in the source (0 for lsb).
1943 To detect permutations on memory sources (arrays and structures), a symbolic
1944 number is also associated:
1945 - a base address BASE_ADDR and an OFFSET giving the address of the source;
1946 - a range which gives the difference between the highest and lowest accessed
1947 memory location to make such a symbolic number;
1948 - the address SRC of the source element of lowest address as a convenience
1949 to easily get BASE_ADDR + offset + lowest bytepos;
1950 - number of expressions N_OPS bitwise ored together to represent
1951 approximate cost of the computation.
1953 Note 1: the range is different from size as size reflects the size of the
1954 type of the current expression. For instance, for an array char a[],
1955 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
1956 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
1957 time a range of 1.
1959 Note 2: for non-memory sources, range holds the same value as size.
1961 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
1963 struct symbolic_number {
1964 uint64_t n;
1965 tree type;
1966 tree base_addr;
1967 tree offset;
1968 HOST_WIDE_INT bytepos;
1969 tree src;
1970 tree alias_set;
1971 tree vuse;
1972 unsigned HOST_WIDE_INT range;
1973 int n_ops;
1976 #define BITS_PER_MARKER 8
1977 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1978 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1979 #define HEAD_MARKER(n, size) \
1980 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1982 /* The number which the find_bswap_or_nop_1 result should match in
1983 order to have a nop. The number is masked according to the size of
1984 the symbolic number before using it. */
1985 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1986 (uint64_t)0x08070605 << 32 | 0x04030201)
1988 /* The number which the find_bswap_or_nop_1 result should match in
1989 order to have a byte swap. The number is masked according to the
1990 size of the symbolic number before using it. */
1991 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1992 (uint64_t)0x01020304 << 32 | 0x05060708)
1994 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1995 number N. Return false if the requested operation is not permitted
1996 on a symbolic number. */
1998 static inline bool
1999 do_shift_rotate (enum tree_code code,
2000 struct symbolic_number *n,
2001 int count)
2003 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2004 unsigned head_marker;
2006 if (count % BITS_PER_UNIT != 0)
2007 return false;
2008 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
2010 /* Zero out the extra bits of N in order to avoid them being shifted
2011 into the significant bits. */
2012 if (size < 64 / BITS_PER_MARKER)
2013 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2015 switch (code)
2017 case LSHIFT_EXPR:
2018 n->n <<= count;
2019 break;
2020 case RSHIFT_EXPR:
2021 head_marker = HEAD_MARKER (n->n, size);
2022 n->n >>= count;
2023 /* Arithmetic shift of signed type: result is dependent on the value. */
2024 if (!TYPE_UNSIGNED (n->type) && head_marker)
2025 for (i = 0; i < count / BITS_PER_MARKER; i++)
2026 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2027 << ((size - 1 - i) * BITS_PER_MARKER);
2028 break;
2029 case LROTATE_EXPR:
2030 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2031 break;
2032 case RROTATE_EXPR:
2033 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2034 break;
2035 default:
2036 return false;
2038 /* Zero unused bits for size. */
2039 if (size < 64 / BITS_PER_MARKER)
2040 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2041 return true;
2044 /* Perform sanity checking for the symbolic number N and the gimple
2045 statement STMT. */
2047 static inline bool
2048 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2050 tree lhs_type;
2052 lhs_type = gimple_expr_type (stmt);
2054 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2055 return false;
2057 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2058 return false;
2060 return true;
2063 /* Initialize the symbolic number N for the bswap pass from the base element
2064 SRC manipulated by the bitwise OR expression. */
2066 static bool
2067 init_symbolic_number (struct symbolic_number *n, tree src)
2069 int size;
2071 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
2072 return false;
2074 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2075 n->src = src;
2077 /* Set up the symbolic number N by setting each byte to a value between 1 and
2078 the byte size of rhs1. The highest order byte is set to n->size and the
2079 lowest order byte to 1. */
2080 n->type = TREE_TYPE (src);
2081 size = TYPE_PRECISION (n->type);
2082 if (size % BITS_PER_UNIT != 0)
2083 return false;
2084 size /= BITS_PER_UNIT;
2085 if (size > 64 / BITS_PER_MARKER)
2086 return false;
2087 n->range = size;
2088 n->n = CMPNOP;
2089 n->n_ops = 1;
2091 if (size < 64 / BITS_PER_MARKER)
2092 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2094 return true;
2097 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2098 the answer. If so, REF is that memory source and the base of the memory area
2099 accessed and the offset of the access from that base are recorded in N. */
2101 bool
2102 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2104 /* Leaf node is an array or component ref. Memorize its base and
2105 offset from base to compare to other such leaf node. */
2106 HOST_WIDE_INT bitsize, bitpos;
2107 machine_mode mode;
2108 int unsignedp, reversep, volatilep;
2109 tree offset, base_addr;
2111 /* Not prepared to handle PDP endian. */
2112 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2113 return false;
2115 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2116 return false;
2118 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2119 &unsignedp, &reversep, &volatilep);
2121 if (TREE_CODE (base_addr) == MEM_REF)
2123 offset_int bit_offset = 0;
2124 tree off = TREE_OPERAND (base_addr, 1);
2126 if (!integer_zerop (off))
2128 offset_int boff, coff = mem_ref_offset (base_addr);
2129 boff = coff << LOG2_BITS_PER_UNIT;
2130 bit_offset += boff;
2133 base_addr = TREE_OPERAND (base_addr, 0);
2135 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2136 if (wi::neg_p (bit_offset))
2138 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2139 offset_int tem = bit_offset.and_not (mask);
2140 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2141 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2142 bit_offset -= tem;
2143 tem >>= LOG2_BITS_PER_UNIT;
2144 if (offset)
2145 offset = size_binop (PLUS_EXPR, offset,
2146 wide_int_to_tree (sizetype, tem));
2147 else
2148 offset = wide_int_to_tree (sizetype, tem);
2151 bitpos += bit_offset.to_shwi ();
2154 if (bitpos % BITS_PER_UNIT)
2155 return false;
2156 if (bitsize % BITS_PER_UNIT)
2157 return false;
2158 if (reversep)
2159 return false;
2161 if (!init_symbolic_number (n, ref))
2162 return false;
2163 n->base_addr = base_addr;
2164 n->offset = offset;
2165 n->bytepos = bitpos / BITS_PER_UNIT;
2166 n->alias_set = reference_alias_ptr_type (ref);
2167 n->vuse = gimple_vuse (stmt);
2168 return true;
2171 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2172 symbolic number N1 and N2 whose source statements are respectively
2173 SOURCE_STMT1 and SOURCE_STMT2. */
2175 static gimple *
2176 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2177 gimple *source_stmt2, struct symbolic_number *n2,
2178 struct symbolic_number *n)
2180 int i, size;
2181 uint64_t mask;
2182 gimple *source_stmt;
2183 struct symbolic_number *n_start;
2185 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
2186 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2187 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2188 rhs1 = TREE_OPERAND (rhs1, 0);
2189 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
2190 if (TREE_CODE (rhs2) == BIT_FIELD_REF
2191 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
2192 rhs2 = TREE_OPERAND (rhs2, 0);
2194 /* Sources are different, cancel bswap if they are not memory location with
2195 the same base (array, structure, ...). */
2196 if (rhs1 != rhs2)
2198 uint64_t inc;
2199 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2200 struct symbolic_number *toinc_n_ptr, *n_end;
2201 basic_block bb1, bb2;
2203 if (!n1->base_addr || !n2->base_addr
2204 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2205 return NULL;
2207 if (!n1->offset != !n2->offset
2208 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2209 return NULL;
2211 if (n1->bytepos < n2->bytepos)
2213 n_start = n1;
2214 start_sub = n2->bytepos - n1->bytepos;
2216 else
2218 n_start = n2;
2219 start_sub = n1->bytepos - n2->bytepos;
2222 bb1 = gimple_bb (source_stmt1);
2223 bb2 = gimple_bb (source_stmt2);
2224 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
2225 source_stmt = source_stmt1;
2226 else
2227 source_stmt = source_stmt2;
2229 /* Find the highest address at which a load is performed and
2230 compute related info. */
2231 end1 = n1->bytepos + (n1->range - 1);
2232 end2 = n2->bytepos + (n2->range - 1);
2233 if (end1 < end2)
2235 end = end2;
2236 end_sub = end2 - end1;
2238 else
2240 end = end1;
2241 end_sub = end1 - end2;
2243 n_end = (end2 > end1) ? n2 : n1;
2245 /* Find symbolic number whose lsb is the most significant. */
2246 if (BYTES_BIG_ENDIAN)
2247 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2248 else
2249 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2251 n->range = end - n_start->bytepos + 1;
2253 /* Check that the range of memory covered can be represented by
2254 a symbolic number. */
2255 if (n->range > 64 / BITS_PER_MARKER)
2256 return NULL;
2258 /* Reinterpret byte marks in symbolic number holding the value of
2259 bigger weight according to target endianness. */
2260 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2261 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2262 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2264 unsigned marker
2265 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2266 if (marker && marker != MARKER_BYTE_UNKNOWN)
2267 toinc_n_ptr->n += inc;
2270 else
2272 n->range = n1->range;
2273 n_start = n1;
2274 source_stmt = source_stmt1;
2277 if (!n1->alias_set
2278 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2279 n->alias_set = n1->alias_set;
2280 else
2281 n->alias_set = ptr_type_node;
2282 n->vuse = n_start->vuse;
2283 n->base_addr = n_start->base_addr;
2284 n->offset = n_start->offset;
2285 n->src = n_start->src;
2286 n->bytepos = n_start->bytepos;
2287 n->type = n_start->type;
2288 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2290 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2292 uint64_t masked1, masked2;
2294 masked1 = n1->n & mask;
2295 masked2 = n2->n & mask;
2296 if (masked1 && masked2 && masked1 != masked2)
2297 return NULL;
2299 n->n = n1->n | n2->n;
2300 n->n_ops = n1->n_ops + n2->n_ops;
2302 return source_stmt;
2305 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2306 the operation given by the rhs of STMT on the result. If the operation
2307 could successfully be executed the function returns a gimple stmt whose
2308 rhs's first tree is the expression of the source operand and NULL
2309 otherwise. */
2311 static gimple *
2312 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2314 enum tree_code code;
2315 tree rhs1, rhs2 = NULL;
2316 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2317 enum gimple_rhs_class rhs_class;
2319 if (!limit || !is_gimple_assign (stmt))
2320 return NULL;
2322 rhs1 = gimple_assign_rhs1 (stmt);
2324 if (find_bswap_or_nop_load (stmt, rhs1, n))
2325 return stmt;
2327 /* Handle BIT_FIELD_REF. */
2328 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2329 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2331 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
2332 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
2333 if (bitpos % BITS_PER_UNIT == 0
2334 && bitsize % BITS_PER_UNIT == 0
2335 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
2337 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
2338 if (BYTES_BIG_ENDIAN)
2339 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
2341 /* Shift. */
2342 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
2343 return NULL;
2345 /* Mask. */
2346 uint64_t mask = 0;
2347 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2348 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
2349 i++, tmp <<= BITS_PER_UNIT)
2350 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2351 n->n &= mask;
2353 /* Convert. */
2354 n->type = TREE_TYPE (rhs1);
2355 if (!n->base_addr)
2356 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2358 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
2361 return NULL;
2364 if (TREE_CODE (rhs1) != SSA_NAME)
2365 return NULL;
2367 code = gimple_assign_rhs_code (stmt);
2368 rhs_class = gimple_assign_rhs_class (stmt);
2369 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2371 if (rhs_class == GIMPLE_BINARY_RHS)
2372 rhs2 = gimple_assign_rhs2 (stmt);
2374 /* Handle unary rhs and binary rhs with integer constants as second
2375 operand. */
2377 if (rhs_class == GIMPLE_UNARY_RHS
2378 || (rhs_class == GIMPLE_BINARY_RHS
2379 && TREE_CODE (rhs2) == INTEGER_CST))
2381 if (code != BIT_AND_EXPR
2382 && code != LSHIFT_EXPR
2383 && code != RSHIFT_EXPR
2384 && code != LROTATE_EXPR
2385 && code != RROTATE_EXPR
2386 && !CONVERT_EXPR_CODE_P (code))
2387 return NULL;
2389 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2391 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2392 we have to initialize the symbolic number. */
2393 if (!source_stmt1)
2395 if (gimple_assign_load_p (stmt)
2396 || !init_symbolic_number (n, rhs1))
2397 return NULL;
2398 source_stmt1 = stmt;
2401 switch (code)
2403 case BIT_AND_EXPR:
2405 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2406 uint64_t val = int_cst_value (rhs2), mask = 0;
2407 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2409 /* Only constants masking full bytes are allowed. */
2410 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2411 if ((val & tmp) != 0 && (val & tmp) != tmp)
2412 return NULL;
2413 else if (val & tmp)
2414 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2416 n->n &= mask;
2418 break;
2419 case LSHIFT_EXPR:
2420 case RSHIFT_EXPR:
2421 case LROTATE_EXPR:
2422 case RROTATE_EXPR:
2423 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2424 return NULL;
2425 break;
2426 CASE_CONVERT:
2428 int i, type_size, old_type_size;
2429 tree type;
2431 type = gimple_expr_type (stmt);
2432 type_size = TYPE_PRECISION (type);
2433 if (type_size % BITS_PER_UNIT != 0)
2434 return NULL;
2435 type_size /= BITS_PER_UNIT;
2436 if (type_size > 64 / BITS_PER_MARKER)
2437 return NULL;
2439 /* Sign extension: result is dependent on the value. */
2440 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2441 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2442 && HEAD_MARKER (n->n, old_type_size))
2443 for (i = 0; i < type_size - old_type_size; i++)
2444 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2445 << ((type_size - 1 - i) * BITS_PER_MARKER);
2447 if (type_size < 64 / BITS_PER_MARKER)
2449 /* If STMT casts to a smaller type mask out the bits not
2450 belonging to the target type. */
2451 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2453 n->type = type;
2454 if (!n->base_addr)
2455 n->range = type_size;
2457 break;
2458 default:
2459 return NULL;
2461 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2464 /* Handle binary rhs. */
2466 if (rhs_class == GIMPLE_BINARY_RHS)
2468 struct symbolic_number n1, n2;
2469 gimple *source_stmt, *source_stmt2;
2471 if (code != BIT_IOR_EXPR)
2472 return NULL;
2474 if (TREE_CODE (rhs2) != SSA_NAME)
2475 return NULL;
2477 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2479 switch (code)
2481 case BIT_IOR_EXPR:
2482 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2484 if (!source_stmt1)
2485 return NULL;
2487 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2489 if (!source_stmt2)
2490 return NULL;
2492 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2493 return NULL;
2495 if (!n1.vuse != !n2.vuse
2496 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2497 return NULL;
2499 source_stmt
2500 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2502 if (!source_stmt)
2503 return NULL;
2505 if (!verify_symbolic_number_p (n, stmt))
2506 return NULL;
2508 break;
2509 default:
2510 return NULL;
2512 return source_stmt;
2514 return NULL;
2517 /* Check if STMT completes a bswap implementation or a read in a given
2518 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2519 accordingly. It also sets N to represent the kind of operations
2520 performed: size of the resulting expression and whether it works on
2521 a memory source, and if so alias-set and vuse. At last, the
2522 function returns a stmt whose rhs's first tree is the source
2523 expression. */
2525 static gimple *
2526 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2528 unsigned rsize;
2529 uint64_t tmpn, mask;
2530 /* The number which the find_bswap_or_nop_1 result should match in order
2531 to have a full byte swap. The number is shifted to the right
2532 according to the size of the symbolic number before using it. */
2533 uint64_t cmpxchg = CMPXCHG;
2534 uint64_t cmpnop = CMPNOP;
2536 gimple *ins_stmt;
2537 int limit;
2539 /* The last parameter determines the depth search limit. It usually
2540 correlates directly to the number n of bytes to be touched. We
2541 increase that number by log2(n) + 1 here in order to also
2542 cover signed -> unsigned conversions of the src operand as can be seen
2543 in libgcc, and for initial shift/and operation of the src operand. */
2544 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2545 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2546 ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2548 if (!ins_stmt)
2549 return NULL;
2551 /* Find real size of result (highest non-zero byte). */
2552 if (n->base_addr)
2553 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2554 else
2555 rsize = n->range;
2557 /* Zero out the bits corresponding to untouched bytes in original gimple
2558 expression. */
2559 if (n->range < (int) sizeof (int64_t))
2561 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2562 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2563 cmpnop &= mask;
2566 /* Zero out the bits corresponding to unused bytes in the result of the
2567 gimple expression. */
2568 if (rsize < n->range)
2570 if (BYTES_BIG_ENDIAN)
2572 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2573 cmpxchg &= mask;
2574 cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
2576 else
2578 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2579 cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
2580 cmpnop &= mask;
2582 n->range = rsize;
2585 /* A complete byte swap should make the symbolic number to start with
2586 the largest digit in the highest order byte. Unchanged symbolic
2587 number indicates a read with same endianness as target architecture. */
2588 if (n->n == cmpnop)
2589 *bswap = false;
2590 else if (n->n == cmpxchg)
2591 *bswap = true;
2592 else
2593 return NULL;
2595 /* Useless bit manipulation performed by code. */
2596 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
2597 return NULL;
2599 n->range *= BITS_PER_UNIT;
2600 return ins_stmt;
2603 namespace {
2605 const pass_data pass_data_optimize_bswap =
2607 GIMPLE_PASS, /* type */
2608 "bswap", /* name */
2609 OPTGROUP_NONE, /* optinfo_flags */
2610 TV_NONE, /* tv_id */
2611 PROP_ssa, /* properties_required */
2612 0, /* properties_provided */
2613 0, /* properties_destroyed */
2614 0, /* todo_flags_start */
2615 0, /* todo_flags_finish */
2618 class pass_optimize_bswap : public gimple_opt_pass
2620 public:
2621 pass_optimize_bswap (gcc::context *ctxt)
2622 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2625 /* opt_pass methods: */
2626 virtual bool gate (function *)
2628 return flag_expensive_optimizations && optimize;
2631 virtual unsigned int execute (function *);
2633 }; // class pass_optimize_bswap
2635 /* Perform the bswap optimization: replace the expression computed in the rhs
2636 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2637 Which of these alternatives replace the rhs is given by N->base_addr (non
2638 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2639 load to perform are also given in N while the builtin bswap invoke is given
2640 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2641 load statements involved to construct the rhs in CUR_STMT and N->range gives
2642 the size of the rhs expression for maintaining some statistics.
2644 Note that if the replacement involve a load, CUR_STMT is moved just after
2645 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2646 changing of basic block. */
2648 static bool
2649 bswap_replace (gimple *cur_stmt, gimple *ins_stmt, tree fndecl,
2650 tree bswap_type, tree load_type, struct symbolic_number *n,
2651 bool bswap)
2653 gimple_stmt_iterator gsi;
2654 tree src, tmp, tgt;
2655 gimple *bswap_stmt;
2657 gsi = gsi_for_stmt (cur_stmt);
2658 src = n->src;
2659 tgt = gimple_assign_lhs (cur_stmt);
2661 /* Need to load the value from memory first. */
2662 if (n->base_addr)
2664 gimple_stmt_iterator gsi_ins = gsi_for_stmt (ins_stmt);
2665 tree addr_expr, addr_tmp, val_expr, val_tmp;
2666 tree load_offset_ptr, aligned_load_type;
2667 gimple *addr_stmt, *load_stmt;
2668 unsigned align;
2669 HOST_WIDE_INT load_offset = 0;
2670 basic_block ins_bb, cur_bb;
2672 ins_bb = gimple_bb (ins_stmt);
2673 cur_bb = gimple_bb (cur_stmt);
2674 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
2675 return false;
2677 align = get_object_alignment (src);
2679 /* Move cur_stmt just before one of the load of the original
2680 to ensure it has the same VUSE. See PR61517 for what could
2681 go wrong. */
2682 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
2683 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2684 gsi_move_before (&gsi, &gsi_ins);
2685 gsi = gsi_for_stmt (cur_stmt);
2687 /* Compute address to load from and cast according to the size
2688 of the load. */
2689 addr_expr = build_fold_addr_expr (unshare_expr (src));
2690 if (is_gimple_mem_ref_addr (addr_expr))
2691 addr_tmp = addr_expr;
2692 else
2694 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2695 "load_src");
2696 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2697 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2700 /* Perform the load. */
2701 aligned_load_type = load_type;
2702 if (align < TYPE_ALIGN (load_type))
2703 aligned_load_type = build_aligned_type (load_type, align);
2704 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2705 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2706 load_offset_ptr);
2708 if (!bswap)
2710 if (n->range == 16)
2711 nop_stats.found_16bit++;
2712 else if (n->range == 32)
2713 nop_stats.found_32bit++;
2714 else
2716 gcc_assert (n->range == 64);
2717 nop_stats.found_64bit++;
2720 /* Convert the result of load if necessary. */
2721 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2723 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2724 "load_dst");
2725 load_stmt = gimple_build_assign (val_tmp, val_expr);
2726 gimple_set_vuse (load_stmt, n->vuse);
2727 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2728 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2730 else
2732 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2733 gimple_set_vuse (cur_stmt, n->vuse);
2735 update_stmt (cur_stmt);
2737 if (dump_file)
2739 fprintf (dump_file,
2740 "%d bit load in target endianness found at: ",
2741 (int) n->range);
2742 print_gimple_stmt (dump_file, cur_stmt, 0);
2744 return true;
2746 else
2748 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2749 load_stmt = gimple_build_assign (val_tmp, val_expr);
2750 gimple_set_vuse (load_stmt, n->vuse);
2751 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2753 src = val_tmp;
2755 else if (!bswap)
2757 gimple *g;
2758 if (!useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
2760 if (!is_gimple_val (src))
2761 return false;
2762 g = gimple_build_assign (tgt, NOP_EXPR, src);
2764 else
2765 g = gimple_build_assign (tgt, src);
2766 if (n->range == 16)
2767 nop_stats.found_16bit++;
2768 else if (n->range == 32)
2769 nop_stats.found_32bit++;
2770 else
2772 gcc_assert (n->range == 64);
2773 nop_stats.found_64bit++;
2775 if (dump_file)
2777 fprintf (dump_file,
2778 "%d bit reshuffle in target endianness found at: ",
2779 (int) n->range);
2780 print_gimple_stmt (dump_file, cur_stmt, 0);
2782 gsi_replace (&gsi, g, true);
2783 return true;
2785 else if (TREE_CODE (src) == BIT_FIELD_REF)
2786 src = TREE_OPERAND (src, 0);
2788 if (n->range == 16)
2789 bswap_stats.found_16bit++;
2790 else if (n->range == 32)
2791 bswap_stats.found_32bit++;
2792 else
2794 gcc_assert (n->range == 64);
2795 bswap_stats.found_64bit++;
2798 tmp = src;
2800 /* Convert the src expression if necessary. */
2801 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2803 gimple *convert_stmt;
2805 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2806 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2807 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2810 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2811 are considered as rotation of 2N bit values by N bits is generally not
2812 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2813 gives 0x03040102 while a bswap for that value is 0x04030201. */
2814 if (bswap && n->range == 16)
2816 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2817 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2818 bswap_stmt = gimple_build_assign (NULL, src);
2820 else
2821 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2823 tmp = tgt;
2825 /* Convert the result if necessary. */
2826 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2828 gimple *convert_stmt;
2830 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2831 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2832 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2835 gimple_set_lhs (bswap_stmt, tmp);
2837 if (dump_file)
2839 fprintf (dump_file, "%d bit bswap implementation found at: ",
2840 (int) n->range);
2841 print_gimple_stmt (dump_file, cur_stmt, 0);
2844 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2845 gsi_remove (&gsi, true);
2846 return true;
2849 /* Find manual byte swap implementations as well as load in a given
2850 endianness. Byte swaps are turned into a bswap builtin invokation
2851 while endian loads are converted to bswap builtin invokation or
2852 simple load according to the target endianness. */
2854 unsigned int
2855 pass_optimize_bswap::execute (function *fun)
2857 basic_block bb;
2858 bool bswap32_p, bswap64_p;
2859 bool changed = false;
2860 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2862 if (BITS_PER_UNIT != 8)
2863 return 0;
2865 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2866 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2867 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2868 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2869 || (bswap32_p && word_mode == SImode)));
2871 /* Determine the argument type of the builtins. The code later on
2872 assumes that the return and argument type are the same. */
2873 if (bswap32_p)
2875 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2876 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2879 if (bswap64_p)
2881 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2882 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2885 memset (&nop_stats, 0, sizeof (nop_stats));
2886 memset (&bswap_stats, 0, sizeof (bswap_stats));
2887 calculate_dominance_info (CDI_DOMINATORS);
2889 FOR_EACH_BB_FN (bb, fun)
2891 gimple_stmt_iterator gsi;
2893 /* We do a reverse scan for bswap patterns to make sure we get the
2894 widest match. As bswap pattern matching doesn't handle previously
2895 inserted smaller bswap replacements as sub-patterns, the wider
2896 variant wouldn't be detected. */
2897 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2899 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
2900 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2901 enum tree_code code;
2902 struct symbolic_number n;
2903 bool bswap;
2905 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2906 might be moved to a different basic block by bswap_replace and gsi
2907 must not points to it if that's the case. Moving the gsi_prev
2908 there make sure that gsi points to the statement previous to
2909 cur_stmt while still making sure that all statements are
2910 considered in this basic block. */
2911 gsi_prev (&gsi);
2913 if (!is_gimple_assign (cur_stmt))
2914 continue;
2916 code = gimple_assign_rhs_code (cur_stmt);
2917 switch (code)
2919 case LROTATE_EXPR:
2920 case RROTATE_EXPR:
2921 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2922 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2923 % BITS_PER_UNIT)
2924 continue;
2925 /* Fall through. */
2926 case BIT_IOR_EXPR:
2927 break;
2928 default:
2929 continue;
2932 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2934 if (!ins_stmt)
2935 continue;
2937 switch (n.range)
2939 case 16:
2940 /* Already in canonical form, nothing to do. */
2941 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2942 continue;
2943 load_type = bswap_type = uint16_type_node;
2944 break;
2945 case 32:
2946 load_type = uint32_type_node;
2947 if (bswap32_p)
2949 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2950 bswap_type = bswap32_type;
2952 break;
2953 case 64:
2954 load_type = uint64_type_node;
2955 if (bswap64_p)
2957 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2958 bswap_type = bswap64_type;
2960 break;
2961 default:
2962 continue;
2965 if (bswap && !fndecl && n.range != 16)
2966 continue;
2968 if (bswap_replace (cur_stmt, ins_stmt, fndecl, bswap_type, load_type,
2969 &n, bswap))
2970 changed = true;
2974 statistics_counter_event (fun, "16-bit nop implementations found",
2975 nop_stats.found_16bit);
2976 statistics_counter_event (fun, "32-bit nop implementations found",
2977 nop_stats.found_32bit);
2978 statistics_counter_event (fun, "64-bit nop implementations found",
2979 nop_stats.found_64bit);
2980 statistics_counter_event (fun, "16-bit bswap implementations found",
2981 bswap_stats.found_16bit);
2982 statistics_counter_event (fun, "32-bit bswap implementations found",
2983 bswap_stats.found_32bit);
2984 statistics_counter_event (fun, "64-bit bswap implementations found",
2985 bswap_stats.found_64bit);
2987 return (changed ? TODO_update_ssa : 0);
2990 } // anon namespace
2992 gimple_opt_pass *
2993 make_pass_optimize_bswap (gcc::context *ctxt)
2995 return new pass_optimize_bswap (ctxt);
2998 /* Return true if stmt is a type conversion operation that can be stripped
2999 when used in a widening multiply operation. */
3000 static bool
3001 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
3003 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
3005 if (TREE_CODE (result_type) == INTEGER_TYPE)
3007 tree op_type;
3008 tree inner_op_type;
3010 if (!CONVERT_EXPR_CODE_P (rhs_code))
3011 return false;
3013 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
3015 /* If the type of OP has the same precision as the result, then
3016 we can strip this conversion. The multiply operation will be
3017 selected to create the correct extension as a by-product. */
3018 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
3019 return true;
3021 /* We can also strip a conversion if it preserves the signed-ness of
3022 the operation and doesn't narrow the range. */
3023 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
3025 /* If the inner-most type is unsigned, then we can strip any
3026 intermediate widening operation. If it's signed, then the
3027 intermediate widening operation must also be signed. */
3028 if ((TYPE_UNSIGNED (inner_op_type)
3029 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
3030 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
3031 return true;
3033 return false;
3036 return rhs_code == FIXED_CONVERT_EXPR;
3039 /* Return true if RHS is a suitable operand for a widening multiplication,
3040 assuming a target type of TYPE.
3041 There are two cases:
3043 - RHS makes some value at least twice as wide. Store that value
3044 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
3046 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
3047 but leave *TYPE_OUT untouched. */
3049 static bool
3050 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
3051 tree *new_rhs_out)
3053 gimple *stmt;
3054 tree type1, rhs1;
3056 if (TREE_CODE (rhs) == SSA_NAME)
3058 stmt = SSA_NAME_DEF_STMT (rhs);
3059 if (is_gimple_assign (stmt))
3061 if (! widening_mult_conversion_strippable_p (type, stmt))
3062 rhs1 = rhs;
3063 else
3065 rhs1 = gimple_assign_rhs1 (stmt);
3067 if (TREE_CODE (rhs1) == INTEGER_CST)
3069 *new_rhs_out = rhs1;
3070 *type_out = NULL;
3071 return true;
3075 else
3076 rhs1 = rhs;
3078 type1 = TREE_TYPE (rhs1);
3080 if (TREE_CODE (type1) != TREE_CODE (type)
3081 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3082 return false;
3084 *new_rhs_out = rhs1;
3085 *type_out = type1;
3086 return true;
3089 if (TREE_CODE (rhs) == INTEGER_CST)
3091 *new_rhs_out = rhs;
3092 *type_out = NULL;
3093 return true;
3096 return false;
3099 /* Return true if STMT performs a widening multiplication, assuming the
3100 output type is TYPE. If so, store the unwidened types of the operands
3101 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3102 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3103 and *TYPE2_OUT would give the operands of the multiplication. */
3105 static bool
3106 is_widening_mult_p (gimple *stmt,
3107 tree *type1_out, tree *rhs1_out,
3108 tree *type2_out, tree *rhs2_out)
3110 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3112 if (TREE_CODE (type) != INTEGER_TYPE
3113 && TREE_CODE (type) != FIXED_POINT_TYPE)
3114 return false;
3116 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3117 rhs1_out))
3118 return false;
3120 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3121 rhs2_out))
3122 return false;
3124 if (*type1_out == NULL)
3126 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3127 return false;
3128 *type1_out = *type2_out;
3131 if (*type2_out == NULL)
3133 if (!int_fits_type_p (*rhs2_out, *type1_out))
3134 return false;
3135 *type2_out = *type1_out;
3138 /* Ensure that the larger of the two operands comes first. */
3139 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3141 std::swap (*type1_out, *type2_out);
3142 std::swap (*rhs1_out, *rhs2_out);
3145 return true;
3148 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3149 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3150 value is true iff we converted the statement. */
3152 static bool
3153 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3155 tree lhs, rhs1, rhs2, type, type1, type2;
3156 enum insn_code handler;
3157 machine_mode to_mode, from_mode, actual_mode;
3158 optab op;
3159 int actual_precision;
3160 location_t loc = gimple_location (stmt);
3161 bool from_unsigned1, from_unsigned2;
3163 lhs = gimple_assign_lhs (stmt);
3164 type = TREE_TYPE (lhs);
3165 if (TREE_CODE (type) != INTEGER_TYPE)
3166 return false;
3168 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3169 return false;
3171 to_mode = TYPE_MODE (type);
3172 from_mode = TYPE_MODE (type1);
3173 from_unsigned1 = TYPE_UNSIGNED (type1);
3174 from_unsigned2 = TYPE_UNSIGNED (type2);
3176 if (from_unsigned1 && from_unsigned2)
3177 op = umul_widen_optab;
3178 else if (!from_unsigned1 && !from_unsigned2)
3179 op = smul_widen_optab;
3180 else
3181 op = usmul_widen_optab;
3183 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3184 0, &actual_mode);
3186 if (handler == CODE_FOR_nothing)
3188 if (op != smul_widen_optab)
3190 /* We can use a signed multiply with unsigned types as long as
3191 there is a wider mode to use, or it is the smaller of the two
3192 types that is unsigned. Note that type1 >= type2, always. */
3193 if ((TYPE_UNSIGNED (type1)
3194 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3195 || (TYPE_UNSIGNED (type2)
3196 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3198 from_mode = GET_MODE_WIDER_MODE (from_mode);
3199 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3200 return false;
3203 op = smul_widen_optab;
3204 handler = find_widening_optab_handler_and_mode (op, to_mode,
3205 from_mode, 0,
3206 &actual_mode);
3208 if (handler == CODE_FOR_nothing)
3209 return false;
3211 from_unsigned1 = from_unsigned2 = false;
3213 else
3214 return false;
3217 /* Ensure that the inputs to the handler are in the correct precison
3218 for the opcode. This will be the full mode size. */
3219 actual_precision = GET_MODE_PRECISION (actual_mode);
3220 if (2 * actual_precision > TYPE_PRECISION (type))
3221 return false;
3222 if (actual_precision != TYPE_PRECISION (type1)
3223 || from_unsigned1 != TYPE_UNSIGNED (type1))
3224 rhs1 = build_and_insert_cast (gsi, loc,
3225 build_nonstandard_integer_type
3226 (actual_precision, from_unsigned1), rhs1);
3227 if (actual_precision != TYPE_PRECISION (type2)
3228 || from_unsigned2 != TYPE_UNSIGNED (type2))
3229 rhs2 = build_and_insert_cast (gsi, loc,
3230 build_nonstandard_integer_type
3231 (actual_precision, from_unsigned2), rhs2);
3233 /* Handle constants. */
3234 if (TREE_CODE (rhs1) == INTEGER_CST)
3235 rhs1 = fold_convert (type1, rhs1);
3236 if (TREE_CODE (rhs2) == INTEGER_CST)
3237 rhs2 = fold_convert (type2, rhs2);
3239 gimple_assign_set_rhs1 (stmt, rhs1);
3240 gimple_assign_set_rhs2 (stmt, rhs2);
3241 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3242 update_stmt (stmt);
3243 widen_mul_stats.widen_mults_inserted++;
3244 return true;
3247 /* Process a single gimple statement STMT, which is found at the
3248 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3249 rhs (given by CODE), and try to convert it into a
3250 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3251 is true iff we converted the statement. */
3253 static bool
3254 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3255 enum tree_code code)
3257 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3258 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3259 tree type, type1, type2, optype;
3260 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3261 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3262 optab this_optab;
3263 enum tree_code wmult_code;
3264 enum insn_code handler;
3265 machine_mode to_mode, from_mode, actual_mode;
3266 location_t loc = gimple_location (stmt);
3267 int actual_precision;
3268 bool from_unsigned1, from_unsigned2;
3270 lhs = gimple_assign_lhs (stmt);
3271 type = TREE_TYPE (lhs);
3272 if (TREE_CODE (type) != INTEGER_TYPE
3273 && TREE_CODE (type) != FIXED_POINT_TYPE)
3274 return false;
3276 if (code == MINUS_EXPR)
3277 wmult_code = WIDEN_MULT_MINUS_EXPR;
3278 else
3279 wmult_code = WIDEN_MULT_PLUS_EXPR;
3281 rhs1 = gimple_assign_rhs1 (stmt);
3282 rhs2 = gimple_assign_rhs2 (stmt);
3284 if (TREE_CODE (rhs1) == SSA_NAME)
3286 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3287 if (is_gimple_assign (rhs1_stmt))
3288 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3291 if (TREE_CODE (rhs2) == SSA_NAME)
3293 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3294 if (is_gimple_assign (rhs2_stmt))
3295 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3298 /* Allow for one conversion statement between the multiply
3299 and addition/subtraction statement. If there are more than
3300 one conversions then we assume they would invalidate this
3301 transformation. If that's not the case then they should have
3302 been folded before now. */
3303 if (CONVERT_EXPR_CODE_P (rhs1_code))
3305 conv1_stmt = rhs1_stmt;
3306 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3307 if (TREE_CODE (rhs1) == SSA_NAME)
3309 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3310 if (is_gimple_assign (rhs1_stmt))
3311 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3313 else
3314 return false;
3316 if (CONVERT_EXPR_CODE_P (rhs2_code))
3318 conv2_stmt = rhs2_stmt;
3319 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3320 if (TREE_CODE (rhs2) == SSA_NAME)
3322 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3323 if (is_gimple_assign (rhs2_stmt))
3324 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3326 else
3327 return false;
3330 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3331 is_widening_mult_p, but we still need the rhs returns.
3333 It might also appear that it would be sufficient to use the existing
3334 operands of the widening multiply, but that would limit the choice of
3335 multiply-and-accumulate instructions.
3337 If the widened-multiplication result has more than one uses, it is
3338 probably wiser not to do the conversion. */
3339 if (code == PLUS_EXPR
3340 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3342 if (!has_single_use (rhs1)
3343 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3344 &type2, &mult_rhs2))
3345 return false;
3346 add_rhs = rhs2;
3347 conv_stmt = conv1_stmt;
3349 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3351 if (!has_single_use (rhs2)
3352 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3353 &type2, &mult_rhs2))
3354 return false;
3355 add_rhs = rhs1;
3356 conv_stmt = conv2_stmt;
3358 else
3359 return false;
3361 to_mode = TYPE_MODE (type);
3362 from_mode = TYPE_MODE (type1);
3363 from_unsigned1 = TYPE_UNSIGNED (type1);
3364 from_unsigned2 = TYPE_UNSIGNED (type2);
3365 optype = type1;
3367 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3368 if (from_unsigned1 != from_unsigned2)
3370 if (!INTEGRAL_TYPE_P (type))
3371 return false;
3372 /* We can use a signed multiply with unsigned types as long as
3373 there is a wider mode to use, or it is the smaller of the two
3374 types that is unsigned. Note that type1 >= type2, always. */
3375 if ((from_unsigned1
3376 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3377 || (from_unsigned2
3378 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3380 from_mode = GET_MODE_WIDER_MODE (from_mode);
3381 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3382 return false;
3385 from_unsigned1 = from_unsigned2 = false;
3386 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3387 false);
3390 /* If there was a conversion between the multiply and addition
3391 then we need to make sure it fits a multiply-and-accumulate.
3392 The should be a single mode change which does not change the
3393 value. */
3394 if (conv_stmt)
3396 /* We use the original, unmodified data types for this. */
3397 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3398 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3399 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3400 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3402 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3404 /* Conversion is a truncate. */
3405 if (TYPE_PRECISION (to_type) < data_size)
3406 return false;
3408 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3410 /* Conversion is an extend. Check it's the right sort. */
3411 if (TYPE_UNSIGNED (from_type) != is_unsigned
3412 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3413 return false;
3415 /* else convert is a no-op for our purposes. */
3418 /* Verify that the machine can perform a widening multiply
3419 accumulate in this mode/signedness combination, otherwise
3420 this transformation is likely to pessimize code. */
3421 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3422 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3423 from_mode, 0, &actual_mode);
3425 if (handler == CODE_FOR_nothing)
3426 return false;
3428 /* Ensure that the inputs to the handler are in the correct precison
3429 for the opcode. This will be the full mode size. */
3430 actual_precision = GET_MODE_PRECISION (actual_mode);
3431 if (actual_precision != TYPE_PRECISION (type1)
3432 || from_unsigned1 != TYPE_UNSIGNED (type1))
3433 mult_rhs1 = build_and_insert_cast (gsi, loc,
3434 build_nonstandard_integer_type
3435 (actual_precision, from_unsigned1),
3436 mult_rhs1);
3437 if (actual_precision != TYPE_PRECISION (type2)
3438 || from_unsigned2 != TYPE_UNSIGNED (type2))
3439 mult_rhs2 = build_and_insert_cast (gsi, loc,
3440 build_nonstandard_integer_type
3441 (actual_precision, from_unsigned2),
3442 mult_rhs2);
3444 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3445 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3447 /* Handle constants. */
3448 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3449 mult_rhs1 = fold_convert (type1, mult_rhs1);
3450 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3451 mult_rhs2 = fold_convert (type2, mult_rhs2);
3453 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3454 add_rhs);
3455 update_stmt (gsi_stmt (*gsi));
3456 widen_mul_stats.maccs_inserted++;
3457 return true;
3460 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3461 with uses in additions and subtractions to form fused multiply-add
3462 operations. Returns true if successful and MUL_STMT should be removed. */
3464 static bool
3465 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3467 tree mul_result = gimple_get_lhs (mul_stmt);
3468 tree type = TREE_TYPE (mul_result);
3469 gimple *use_stmt, *neguse_stmt;
3470 gassign *fma_stmt;
3471 use_operand_p use_p;
3472 imm_use_iterator imm_iter;
3474 if (FLOAT_TYPE_P (type)
3475 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3476 return false;
3478 /* We don't want to do bitfield reduction ops. */
3479 if (INTEGRAL_TYPE_P (type)
3480 && (TYPE_PRECISION (type)
3481 != GET_MODE_PRECISION (TYPE_MODE (type))))
3482 return false;
3484 /* If the target doesn't support it, don't generate it. We assume that
3485 if fma isn't available then fms, fnma or fnms are not either. */
3486 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3487 return false;
3489 /* If the multiplication has zero uses, it is kept around probably because
3490 of -fnon-call-exceptions. Don't optimize it away in that case,
3491 it is DCE job. */
3492 if (has_zero_uses (mul_result))
3493 return false;
3495 /* Make sure that the multiplication statement becomes dead after
3496 the transformation, thus that all uses are transformed to FMAs.
3497 This means we assume that an FMA operation has the same cost
3498 as an addition. */
3499 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3501 enum tree_code use_code;
3502 tree result = mul_result;
3503 bool negate_p = false;
3505 use_stmt = USE_STMT (use_p);
3507 if (is_gimple_debug (use_stmt))
3508 continue;
3510 /* For now restrict this operations to single basic blocks. In theory
3511 we would want to support sinking the multiplication in
3512 m = a*b;
3513 if ()
3514 ma = m + c;
3515 else
3516 d = m;
3517 to form a fma in the then block and sink the multiplication to the
3518 else block. */
3519 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3520 return false;
3522 if (!is_gimple_assign (use_stmt))
3523 return false;
3525 use_code = gimple_assign_rhs_code (use_stmt);
3527 /* A negate on the multiplication leads to FNMA. */
3528 if (use_code == NEGATE_EXPR)
3530 ssa_op_iter iter;
3531 use_operand_p usep;
3533 result = gimple_assign_lhs (use_stmt);
3535 /* Make sure the negate statement becomes dead with this
3536 single transformation. */
3537 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3538 &use_p, &neguse_stmt))
3539 return false;
3541 /* Make sure the multiplication isn't also used on that stmt. */
3542 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3543 if (USE_FROM_PTR (usep) == mul_result)
3544 return false;
3546 /* Re-validate. */
3547 use_stmt = neguse_stmt;
3548 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3549 return false;
3550 if (!is_gimple_assign (use_stmt))
3551 return false;
3553 use_code = gimple_assign_rhs_code (use_stmt);
3554 negate_p = true;
3557 switch (use_code)
3559 case MINUS_EXPR:
3560 if (gimple_assign_rhs2 (use_stmt) == result)
3561 negate_p = !negate_p;
3562 break;
3563 case PLUS_EXPR:
3564 break;
3565 default:
3566 /* FMA can only be formed from PLUS and MINUS. */
3567 return false;
3570 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3571 by a MULT_EXPR that we'll visit later, we might be able to
3572 get a more profitable match with fnma.
3573 OTOH, if we don't, a negate / fma pair has likely lower latency
3574 that a mult / subtract pair. */
3575 if (use_code == MINUS_EXPR && !negate_p
3576 && gimple_assign_rhs1 (use_stmt) == result
3577 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3578 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3580 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3582 if (TREE_CODE (rhs2) == SSA_NAME)
3584 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3585 if (has_single_use (rhs2)
3586 && is_gimple_assign (stmt2)
3587 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3588 return false;
3592 /* We can't handle a * b + a * b. */
3593 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3594 return false;
3596 /* While it is possible to validate whether or not the exact form
3597 that we've recognized is available in the backend, the assumption
3598 is that the transformation is never a loss. For instance, suppose
3599 the target only has the plain FMA pattern available. Consider
3600 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3601 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3602 still have 3 operations, but in the FMA form the two NEGs are
3603 independent and could be run in parallel. */
3606 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3608 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3609 enum tree_code use_code;
3610 tree addop, mulop1 = op1, result = mul_result;
3611 bool negate_p = false;
3613 if (is_gimple_debug (use_stmt))
3614 continue;
3616 use_code = gimple_assign_rhs_code (use_stmt);
3617 if (use_code == NEGATE_EXPR)
3619 result = gimple_assign_lhs (use_stmt);
3620 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3621 gsi_remove (&gsi, true);
3622 release_defs (use_stmt);
3624 use_stmt = neguse_stmt;
3625 gsi = gsi_for_stmt (use_stmt);
3626 use_code = gimple_assign_rhs_code (use_stmt);
3627 negate_p = true;
3630 if (gimple_assign_rhs1 (use_stmt) == result)
3632 addop = gimple_assign_rhs2 (use_stmt);
3633 /* a * b - c -> a * b + (-c) */
3634 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3635 addop = force_gimple_operand_gsi (&gsi,
3636 build1 (NEGATE_EXPR,
3637 type, addop),
3638 true, NULL_TREE, true,
3639 GSI_SAME_STMT);
3641 else
3643 addop = gimple_assign_rhs1 (use_stmt);
3644 /* a - b * c -> (-b) * c + a */
3645 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3646 negate_p = !negate_p;
3649 if (negate_p)
3650 mulop1 = force_gimple_operand_gsi (&gsi,
3651 build1 (NEGATE_EXPR,
3652 type, mulop1),
3653 true, NULL_TREE, true,
3654 GSI_SAME_STMT);
3656 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3657 FMA_EXPR, mulop1, op2, addop);
3658 gsi_replace (&gsi, fma_stmt, true);
3659 widen_mul_stats.fmas_inserted++;
3662 return true;
3666 /* Helper function of match_uaddsub_overflow. Return 1
3667 if USE_STMT is unsigned overflow check ovf != 0 for
3668 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3669 and 0 otherwise. */
3671 static int
3672 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3674 enum tree_code ccode = ERROR_MARK;
3675 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3676 if (gimple_code (use_stmt) == GIMPLE_COND)
3678 ccode = gimple_cond_code (use_stmt);
3679 crhs1 = gimple_cond_lhs (use_stmt);
3680 crhs2 = gimple_cond_rhs (use_stmt);
3682 else if (is_gimple_assign (use_stmt))
3684 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3686 ccode = gimple_assign_rhs_code (use_stmt);
3687 crhs1 = gimple_assign_rhs1 (use_stmt);
3688 crhs2 = gimple_assign_rhs2 (use_stmt);
3690 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3692 tree cond = gimple_assign_rhs1 (use_stmt);
3693 if (COMPARISON_CLASS_P (cond))
3695 ccode = TREE_CODE (cond);
3696 crhs1 = TREE_OPERAND (cond, 0);
3697 crhs2 = TREE_OPERAND (cond, 1);
3699 else
3700 return 0;
3702 else
3703 return 0;
3705 else
3706 return 0;
3708 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3709 return 0;
3711 enum tree_code code = gimple_assign_rhs_code (stmt);
3712 tree lhs = gimple_assign_lhs (stmt);
3713 tree rhs1 = gimple_assign_rhs1 (stmt);
3714 tree rhs2 = gimple_assign_rhs2 (stmt);
3716 switch (ccode)
3718 case GT_EXPR:
3719 case LE_EXPR:
3720 /* r = a - b; r > a or r <= a
3721 r = a + b; a > r or a <= r or b > r or b <= r. */
3722 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3723 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3724 && crhs2 == lhs))
3725 return ccode == GT_EXPR ? 1 : -1;
3726 break;
3727 case LT_EXPR:
3728 case GE_EXPR:
3729 /* r = a - b; a < r or a >= r
3730 r = a + b; r < a or r >= a or r < b or r >= b. */
3731 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3732 || (code == PLUS_EXPR && crhs1 == lhs
3733 && (crhs2 == rhs1 || crhs2 == rhs2)))
3734 return ccode == LT_EXPR ? 1 : -1;
3735 break;
3736 default:
3737 break;
3739 return 0;
3742 /* Recognize for unsigned x
3743 x = y - z;
3744 if (x > y)
3745 where there are other uses of x and replace it with
3746 _7 = SUB_OVERFLOW (y, z);
3747 x = REALPART_EXPR <_7>;
3748 _8 = IMAGPART_EXPR <_7>;
3749 if (_8)
3750 and similarly for addition. */
3752 static bool
3753 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3754 enum tree_code code)
3756 tree lhs = gimple_assign_lhs (stmt);
3757 tree type = TREE_TYPE (lhs);
3758 use_operand_p use_p;
3759 imm_use_iterator iter;
3760 bool use_seen = false;
3761 bool ovf_use_seen = false;
3762 gimple *use_stmt;
3764 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3765 if (!INTEGRAL_TYPE_P (type)
3766 || !TYPE_UNSIGNED (type)
3767 || has_zero_uses (lhs)
3768 || has_single_use (lhs)
3769 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3770 TYPE_MODE (type)) == CODE_FOR_nothing)
3771 return false;
3773 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3775 use_stmt = USE_STMT (use_p);
3776 if (is_gimple_debug (use_stmt))
3777 continue;
3779 if (uaddsub_overflow_check_p (stmt, use_stmt))
3780 ovf_use_seen = true;
3781 else
3782 use_seen = true;
3783 if (ovf_use_seen && use_seen)
3784 break;
3787 if (!ovf_use_seen || !use_seen)
3788 return false;
3790 tree ctype = build_complex_type (type);
3791 tree rhs1 = gimple_assign_rhs1 (stmt);
3792 tree rhs2 = gimple_assign_rhs2 (stmt);
3793 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3794 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3795 2, rhs1, rhs2);
3796 tree ctmp = make_ssa_name (ctype);
3797 gimple_call_set_lhs (g, ctmp);
3798 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3799 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3800 build1 (REALPART_EXPR, type, ctmp));
3801 gsi_replace (gsi, g2, true);
3802 tree ovf = make_ssa_name (type);
3803 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3804 build1 (IMAGPART_EXPR, type, ctmp));
3805 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3807 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3809 if (is_gimple_debug (use_stmt))
3810 continue;
3812 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3813 if (ovf_use == 0)
3814 continue;
3815 if (gimple_code (use_stmt) == GIMPLE_COND)
3817 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3818 gimple_cond_set_lhs (cond_stmt, ovf);
3819 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3820 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3822 else
3824 gcc_checking_assert (is_gimple_assign (use_stmt));
3825 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3827 gimple_assign_set_rhs1 (use_stmt, ovf);
3828 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3829 gimple_assign_set_rhs_code (use_stmt,
3830 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3832 else
3834 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3835 == COND_EXPR);
3836 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3837 boolean_type_node, ovf,
3838 build_int_cst (type, 0));
3839 gimple_assign_set_rhs1 (use_stmt, cond);
3842 update_stmt (use_stmt);
3844 return true;
3847 /* Return true if target has support for divmod. */
3849 static bool
3850 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3852 /* If target supports hardware divmod insn, use it for divmod. */
3853 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3854 return true;
3856 /* Check if libfunc for divmod is available. */
3857 rtx libfunc = optab_libfunc (divmod_optab, mode);
3858 if (libfunc != NULL_RTX)
3860 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3861 we don't want to use the libfunc even if it exists for given mode. */
3862 for (machine_mode div_mode = mode;
3863 div_mode != VOIDmode;
3864 div_mode = GET_MODE_WIDER_MODE (div_mode))
3865 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3866 return false;
3868 return targetm.expand_divmod_libfunc != NULL;
3871 return false;
3874 /* Check if stmt is candidate for divmod transform. */
3876 static bool
3877 divmod_candidate_p (gassign *stmt)
3879 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3880 machine_mode mode = TYPE_MODE (type);
3881 optab divmod_optab, div_optab;
3883 if (TYPE_UNSIGNED (type))
3885 divmod_optab = udivmod_optab;
3886 div_optab = udiv_optab;
3888 else
3890 divmod_optab = sdivmod_optab;
3891 div_optab = sdiv_optab;
3894 tree op1 = gimple_assign_rhs1 (stmt);
3895 tree op2 = gimple_assign_rhs2 (stmt);
3897 /* Disable the transform if either is a constant, since division-by-constant
3898 may have specialized expansion. */
3899 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3900 return false;
3902 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3903 expand using the [su]divv optabs. */
3904 if (TYPE_OVERFLOW_TRAPS (type))
3905 return false;
3907 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3908 return false;
3910 return true;
3913 /* This function looks for:
3914 t1 = a TRUNC_DIV_EXPR b;
3915 t2 = a TRUNC_MOD_EXPR b;
3916 and transforms it to the following sequence:
3917 complex_tmp = DIVMOD (a, b);
3918 t1 = REALPART_EXPR(a);
3919 t2 = IMAGPART_EXPR(b);
3920 For conditions enabling the transform see divmod_candidate_p().
3922 The pass has three parts:
3923 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3924 other trunc_div_expr and trunc_mod_expr stmts.
3925 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3926 to stmts vector.
3927 3) Insert DIVMOD call just before top_stmt and update entries in
3928 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3929 IMAGPART_EXPR for mod). */
3931 static bool
3932 convert_to_divmod (gassign *stmt)
3934 if (stmt_can_throw_internal (stmt)
3935 || !divmod_candidate_p (stmt))
3936 return false;
3938 tree op1 = gimple_assign_rhs1 (stmt);
3939 tree op2 = gimple_assign_rhs2 (stmt);
3941 imm_use_iterator use_iter;
3942 gimple *use_stmt;
3943 auto_vec<gimple *> stmts;
3945 gimple *top_stmt = stmt;
3946 basic_block top_bb = gimple_bb (stmt);
3948 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3949 at-least stmt and possibly other trunc_div/trunc_mod stmts
3950 having same operands as stmt. */
3952 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3954 if (is_gimple_assign (use_stmt)
3955 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3956 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3957 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3958 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3960 if (stmt_can_throw_internal (use_stmt))
3961 continue;
3963 basic_block bb = gimple_bb (use_stmt);
3965 if (bb == top_bb)
3967 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3968 top_stmt = use_stmt;
3970 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3972 top_bb = bb;
3973 top_stmt = use_stmt;
3978 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3979 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3981 stmts.safe_push (top_stmt);
3982 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3984 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3985 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3986 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3987 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3989 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3991 if (is_gimple_assign (use_stmt)
3992 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3993 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3994 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3995 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3997 if (use_stmt == top_stmt
3998 || stmt_can_throw_internal (use_stmt)
3999 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
4000 continue;
4002 stmts.safe_push (use_stmt);
4003 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
4004 div_seen = true;
4008 if (!div_seen)
4009 return false;
4011 /* Part 3: Create libcall to internal fn DIVMOD:
4012 divmod_tmp = DIVMOD (op1, op2). */
4014 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
4015 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
4016 call_stmt, "divmod_tmp");
4017 gimple_call_set_lhs (call_stmt, res);
4019 /* Insert the call before top_stmt. */
4020 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
4021 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
4023 widen_mul_stats.divmod_calls_inserted++;
4025 /* Update all statements in stmts vector:
4026 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
4027 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
4029 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
4031 tree new_rhs;
4033 switch (gimple_assign_rhs_code (use_stmt))
4035 case TRUNC_DIV_EXPR:
4036 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
4037 break;
4039 case TRUNC_MOD_EXPR:
4040 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
4041 break;
4043 default:
4044 gcc_unreachable ();
4047 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
4048 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
4049 update_stmt (use_stmt);
4052 return true;
4055 /* Find integer multiplications where the operands are extended from
4056 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4057 where appropriate. */
4059 namespace {
4061 const pass_data pass_data_optimize_widening_mul =
4063 GIMPLE_PASS, /* type */
4064 "widening_mul", /* name */
4065 OPTGROUP_NONE, /* optinfo_flags */
4066 TV_NONE, /* tv_id */
4067 PROP_ssa, /* properties_required */
4068 0, /* properties_provided */
4069 0, /* properties_destroyed */
4070 0, /* todo_flags_start */
4071 TODO_update_ssa, /* todo_flags_finish */
4074 class pass_optimize_widening_mul : public gimple_opt_pass
4076 public:
4077 pass_optimize_widening_mul (gcc::context *ctxt)
4078 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
4081 /* opt_pass methods: */
4082 virtual bool gate (function *)
4084 return flag_expensive_optimizations && optimize;
4087 virtual unsigned int execute (function *);
4089 }; // class pass_optimize_widening_mul
4091 unsigned int
4092 pass_optimize_widening_mul::execute (function *fun)
4094 basic_block bb;
4095 bool cfg_changed = false;
4097 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
4098 calculate_dominance_info (CDI_DOMINATORS);
4099 renumber_gimple_stmt_uids ();
4101 FOR_EACH_BB_FN (bb, fun)
4103 gimple_stmt_iterator gsi;
4105 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4107 gimple *stmt = gsi_stmt (gsi);
4108 enum tree_code code;
4110 if (is_gimple_assign (stmt))
4112 code = gimple_assign_rhs_code (stmt);
4113 switch (code)
4115 case MULT_EXPR:
4116 if (!convert_mult_to_widen (stmt, &gsi)
4117 && convert_mult_to_fma (stmt,
4118 gimple_assign_rhs1 (stmt),
4119 gimple_assign_rhs2 (stmt)))
4121 gsi_remove (&gsi, true);
4122 release_defs (stmt);
4123 continue;
4125 break;
4127 case PLUS_EXPR:
4128 case MINUS_EXPR:
4129 if (!convert_plusminus_to_widen (&gsi, stmt, code))
4130 match_uaddsub_overflow (&gsi, stmt, code);
4131 break;
4133 case TRUNC_MOD_EXPR:
4134 convert_to_divmod (as_a<gassign *> (stmt));
4135 break;
4137 default:;
4140 else if (is_gimple_call (stmt)
4141 && gimple_call_lhs (stmt))
4143 tree fndecl = gimple_call_fndecl (stmt);
4144 if (fndecl
4145 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
4147 switch (DECL_FUNCTION_CODE (fndecl))
4149 case BUILT_IN_POWF:
4150 case BUILT_IN_POW:
4151 case BUILT_IN_POWL:
4152 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
4153 && real_equal
4154 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
4155 &dconst2)
4156 && convert_mult_to_fma (stmt,
4157 gimple_call_arg (stmt, 0),
4158 gimple_call_arg (stmt, 0)))
4160 unlink_stmt_vdef (stmt);
4161 if (gsi_remove (&gsi, true)
4162 && gimple_purge_dead_eh_edges (bb))
4163 cfg_changed = true;
4164 release_defs (stmt);
4165 continue;
4167 break;
4169 default:;
4173 gsi_next (&gsi);
4177 statistics_counter_event (fun, "widening multiplications inserted",
4178 widen_mul_stats.widen_mults_inserted);
4179 statistics_counter_event (fun, "widening maccs inserted",
4180 widen_mul_stats.maccs_inserted);
4181 statistics_counter_event (fun, "fused multiply-adds inserted",
4182 widen_mul_stats.fmas_inserted);
4183 statistics_counter_event (fun, "divmod calls inserted",
4184 widen_mul_stats.divmod_calls_inserted);
4186 return cfg_changed ? TODO_cleanup_cfg : 0;
4189 } // anon namespace
4191 gimple_opt_pass *
4192 make_pass_optimize_widening_mul (gcc::context *ctxt)
4194 return new pass_optimize_widening_mul (ctxt);