Define arm_arch_core_flags in a single file
[official-gcc.git] / gcc / tree-ssa-math-opts.c
blob4a47254d223e24caf1cd611f434a578729ba205d
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
121 division. */
122 struct occurrence {
123 /* The basic block represented by this structure. */
124 basic_block bb;
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
127 inserted in BB. */
128 tree recip_def;
130 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
131 was inserted in BB. */
132 gimple *recip_def_stmt;
134 /* Pointer to a list of "struct occurrence"s for blocks dominated
135 by BB. */
136 struct occurrence *children;
138 /* Pointer to the next "struct occurrence"s in the list of blocks
139 sharing a common dominator. */
140 struct occurrence *next;
142 /* The number of divisions that are in BB before compute_merit. The
143 number of divisions that are in BB or post-dominate it after
144 compute_merit. */
145 int num_divisions;
147 /* True if the basic block has a division, false if it is a common
148 dominator for basic blocks that do. If it is false and trapping
149 math is active, BB is not a candidate for inserting a reciprocal. */
150 bool bb_has_division;
153 static struct
155 /* Number of 1.0/X ops inserted. */
156 int rdivs_inserted;
158 /* Number of 1.0/FUNC ops inserted. */
159 int rfuncs_inserted;
160 } reciprocal_stats;
162 static struct
164 /* Number of cexpi calls inserted. */
165 int inserted;
166 } sincos_stats;
168 static struct
170 /* Number of hand-written 16-bit nop / bswaps found. */
171 int found_16bit;
173 /* Number of hand-written 32-bit nop / bswaps found. */
174 int found_32bit;
176 /* Number of hand-written 64-bit nop / bswaps found. */
177 int found_64bit;
178 } nop_stats, bswap_stats;
180 static struct
182 /* Number of widening multiplication ops inserted. */
183 int widen_mults_inserted;
185 /* Number of integer multiply-and-accumulate ops inserted. */
186 int maccs_inserted;
188 /* Number of fp fused multiply-add ops inserted. */
189 int fmas_inserted;
191 /* Number of divmod calls inserted. */
192 int divmod_calls_inserted;
193 } widen_mul_stats;
195 /* The instance of "struct occurrence" representing the highest
196 interesting block in the dominator tree. */
197 static struct occurrence *occ_head;
199 /* Allocation pool for getting instances of "struct occurrence". */
200 static object_allocator<occurrence> *occ_pool;
204 /* Allocate and return a new struct occurrence for basic block BB, and
205 whose children list is headed by CHILDREN. */
206 static struct occurrence *
207 occ_new (basic_block bb, struct occurrence *children)
209 struct occurrence *occ;
211 bb->aux = occ = occ_pool->allocate ();
212 memset (occ, 0, sizeof (struct occurrence));
214 occ->bb = bb;
215 occ->children = children;
216 return occ;
220 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
221 list of "struct occurrence"s, one per basic block, having IDOM as
222 their common dominator.
224 We try to insert NEW_OCC as deep as possible in the tree, and we also
225 insert any other block that is a common dominator for BB and one
226 block already in the tree. */
228 static void
229 insert_bb (struct occurrence *new_occ, basic_block idom,
230 struct occurrence **p_head)
232 struct occurrence *occ, **p_occ;
234 for (p_occ = p_head; (occ = *p_occ) != NULL; )
236 basic_block bb = new_occ->bb, occ_bb = occ->bb;
237 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
238 if (dom == bb)
240 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
241 from its list. */
242 *p_occ = occ->next;
243 occ->next = new_occ->children;
244 new_occ->children = occ;
246 /* Try the next block (it may as well be dominated by BB). */
249 else if (dom == occ_bb)
251 /* OCC_BB dominates BB. Tail recurse to look deeper. */
252 insert_bb (new_occ, dom, &occ->children);
253 return;
256 else if (dom != idom)
258 gcc_assert (!dom->aux);
260 /* There is a dominator between IDOM and BB, add it and make
261 two children out of NEW_OCC and OCC. First, remove OCC from
262 its list. */
263 *p_occ = occ->next;
264 new_occ->next = occ;
265 occ->next = NULL;
267 /* None of the previous blocks has DOM as a dominator: if we tail
268 recursed, we would reexamine them uselessly. Just switch BB with
269 DOM, and go on looking for blocks dominated by DOM. */
270 new_occ = occ_new (dom, new_occ);
273 else
275 /* Nothing special, go on with the next element. */
276 p_occ = &occ->next;
280 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
281 new_occ->next = *p_head;
282 *p_head = new_occ;
285 /* Register that we found a division in BB. */
287 static inline void
288 register_division_in (basic_block bb)
290 struct occurrence *occ;
292 occ = (struct occurrence *) bb->aux;
293 if (!occ)
295 occ = occ_new (bb, NULL);
296 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
299 occ->bb_has_division = true;
300 occ->num_divisions++;
304 /* Compute the number of divisions that postdominate each block in OCC and
305 its children. */
307 static void
308 compute_merit (struct occurrence *occ)
310 struct occurrence *occ_child;
311 basic_block dom = occ->bb;
313 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
315 basic_block bb;
316 if (occ_child->children)
317 compute_merit (occ_child);
319 if (flag_exceptions)
320 bb = single_noncomplex_succ (dom);
321 else
322 bb = dom;
324 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
325 occ->num_divisions += occ_child->num_divisions;
330 /* Return whether USE_STMT is a floating-point division by DEF. */
331 static inline bool
332 is_division_by (gimple *use_stmt, tree def)
334 return is_gimple_assign (use_stmt)
335 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
336 && gimple_assign_rhs2 (use_stmt) == def
337 /* Do not recognize x / x as valid division, as we are getting
338 confused later by replacing all immediate uses x in such
339 a stmt. */
340 && gimple_assign_rhs1 (use_stmt) != def;
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345 the given basic block. The field may be left NULL, of course,
346 if it is not possible or profitable to do the optimization.
348 DEF_BSI is an iterator pointing at the statement defining DEF.
349 If RECIP_DEF is set, a dominator already has a computation that can
350 be used. */
352 static void
353 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
354 tree def, tree recip_def, int threshold)
356 tree type;
357 gassign *new_stmt;
358 gimple_stmt_iterator gsi;
359 struct occurrence *occ_child;
361 if (!recip_def
362 && (occ->bb_has_division || !flag_trapping_math)
363 && occ->num_divisions >= threshold)
365 /* Make a variable with the replacement and substitute it. */
366 type = TREE_TYPE (def);
367 recip_def = create_tmp_reg (type, "reciptmp");
368 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
369 build_one_cst (type), def);
371 if (occ->bb_has_division)
373 /* Case 1: insert before an existing division. */
374 gsi = gsi_after_labels (occ->bb);
375 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
376 gsi_next (&gsi);
378 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
380 else if (def_gsi && occ->bb == def_gsi->bb)
382 /* Case 2: insert right after the definition. Note that this will
383 never happen if the definition statement can throw, because in
384 that case the sole successor of the statement's basic block will
385 dominate all the uses as well. */
386 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
388 else
390 /* Case 3: insert in a basic block not containing defs/uses. */
391 gsi = gsi_after_labels (occ->bb);
392 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
395 reciprocal_stats.rdivs_inserted++;
397 occ->recip_def_stmt = new_stmt;
400 occ->recip_def = recip_def;
401 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
402 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
407 possible. */
409 static inline void
410 replace_reciprocal (use_operand_p use_p)
412 gimple *use_stmt = USE_STMT (use_p);
413 basic_block bb = gimple_bb (use_stmt);
414 struct occurrence *occ = (struct occurrence *) bb->aux;
416 if (optimize_bb_for_speed_p (bb)
417 && occ->recip_def && use_stmt != occ->recip_def_stmt)
419 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
420 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
421 SET_USE (use_p, occ->recip_def);
422 fold_stmt_inplace (&gsi);
423 update_stmt (use_stmt);
428 /* Free OCC and return one more "struct occurrence" to be freed. */
430 static struct occurrence *
431 free_bb (struct occurrence *occ)
433 struct occurrence *child, *next;
435 /* First get the two pointers hanging off OCC. */
436 next = occ->next;
437 child = occ->children;
438 occ->bb->aux = NULL;
439 occ_pool->remove (occ);
441 /* Now ensure that we don't recurse unless it is necessary. */
442 if (!child)
443 return next;
444 else
446 while (next)
447 next = free_bb (next);
449 return child;
454 /* Look for floating-point divisions among DEF's uses, and try to
455 replace them by multiplications with the reciprocal. Add
456 as many statements computing the reciprocal as needed.
458 DEF must be a GIMPLE register of a floating-point type. */
460 static void
461 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
463 use_operand_p use_p;
464 imm_use_iterator use_iter;
465 struct occurrence *occ;
466 int count = 0, threshold;
468 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
470 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
472 gimple *use_stmt = USE_STMT (use_p);
473 if (is_division_by (use_stmt, def))
475 register_division_in (gimple_bb (use_stmt));
476 count++;
480 /* Do the expensive part only if we can hope to optimize something. */
481 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
482 if (count >= threshold)
484 gimple *use_stmt;
485 for (occ = occ_head; occ; occ = occ->next)
487 compute_merit (occ);
488 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
491 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
493 if (is_division_by (use_stmt, def))
495 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
496 replace_reciprocal (use_p);
501 for (occ = occ_head; occ; )
502 occ = free_bb (occ);
504 occ_head = NULL;
507 /* Return an internal function that implements the reciprocal of CALL,
508 or IFN_LAST if there is no such function that the target supports. */
510 internal_fn
511 internal_fn_reciprocal (gcall *call)
513 internal_fn ifn;
515 switch (gimple_call_combined_fn (call))
517 CASE_CFN_SQRT:
518 ifn = IFN_RSQRT;
519 break;
521 default:
522 return IFN_LAST;
525 tree_pair types = direct_internal_fn_types (ifn, call);
526 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
527 return IFN_LAST;
529 return ifn;
532 /* Go through all the floating-point SSA_NAMEs, and call
533 execute_cse_reciprocals_1 on each of them. */
534 namespace {
536 const pass_data pass_data_cse_reciprocals =
538 GIMPLE_PASS, /* type */
539 "recip", /* name */
540 OPTGROUP_NONE, /* optinfo_flags */
541 TV_NONE, /* tv_id */
542 PROP_ssa, /* properties_required */
543 0, /* properties_provided */
544 0, /* properties_destroyed */
545 0, /* todo_flags_start */
546 TODO_update_ssa, /* todo_flags_finish */
549 class pass_cse_reciprocals : public gimple_opt_pass
551 public:
552 pass_cse_reciprocals (gcc::context *ctxt)
553 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
556 /* opt_pass methods: */
557 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
558 virtual unsigned int execute (function *);
560 }; // class pass_cse_reciprocals
562 unsigned int
563 pass_cse_reciprocals::execute (function *fun)
565 basic_block bb;
566 tree arg;
568 occ_pool = new object_allocator<occurrence> ("dominators for recip");
570 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
571 calculate_dominance_info (CDI_DOMINATORS);
572 calculate_dominance_info (CDI_POST_DOMINATORS);
574 if (flag_checking)
575 FOR_EACH_BB_FN (bb, fun)
576 gcc_assert (!bb->aux);
578 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
579 if (FLOAT_TYPE_P (TREE_TYPE (arg))
580 && is_gimple_reg (arg))
582 tree name = ssa_default_def (fun, arg);
583 if (name)
584 execute_cse_reciprocals_1 (NULL, name);
587 FOR_EACH_BB_FN (bb, fun)
589 tree def;
591 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
592 gsi_next (&gsi))
594 gphi *phi = gsi.phi ();
595 def = PHI_RESULT (phi);
596 if (! virtual_operand_p (def)
597 && FLOAT_TYPE_P (TREE_TYPE (def)))
598 execute_cse_reciprocals_1 (NULL, def);
601 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
602 gsi_next (&gsi))
604 gimple *stmt = gsi_stmt (gsi);
606 if (gimple_has_lhs (stmt)
607 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
608 && FLOAT_TYPE_P (TREE_TYPE (def))
609 && TREE_CODE (def) == SSA_NAME)
610 execute_cse_reciprocals_1 (&gsi, def);
613 if (optimize_bb_for_size_p (bb))
614 continue;
616 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
617 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
618 gsi_next (&gsi))
620 gimple *stmt = gsi_stmt (gsi);
622 if (is_gimple_assign (stmt)
623 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
625 tree arg1 = gimple_assign_rhs2 (stmt);
626 gimple *stmt1;
628 if (TREE_CODE (arg1) != SSA_NAME)
629 continue;
631 stmt1 = SSA_NAME_DEF_STMT (arg1);
633 if (is_gimple_call (stmt1)
634 && gimple_call_lhs (stmt1))
636 bool fail;
637 imm_use_iterator ui;
638 use_operand_p use_p;
639 tree fndecl = NULL_TREE;
641 gcall *call = as_a <gcall *> (stmt1);
642 internal_fn ifn = internal_fn_reciprocal (call);
643 if (ifn == IFN_LAST)
645 fndecl = gimple_call_fndecl (call);
646 if (!fndecl
647 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
648 continue;
649 fndecl = targetm.builtin_reciprocal (fndecl);
650 if (!fndecl)
651 continue;
654 /* Check that all uses of the SSA name are divisions,
655 otherwise replacing the defining statement will do
656 the wrong thing. */
657 fail = false;
658 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
660 gimple *stmt2 = USE_STMT (use_p);
661 if (is_gimple_debug (stmt2))
662 continue;
663 if (!is_gimple_assign (stmt2)
664 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
665 || gimple_assign_rhs1 (stmt2) == arg1
666 || gimple_assign_rhs2 (stmt2) != arg1)
668 fail = true;
669 break;
672 if (fail)
673 continue;
675 gimple_replace_ssa_lhs (call, arg1);
676 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
678 auto_vec<tree, 4> args;
679 for (unsigned int i = 0;
680 i < gimple_call_num_args (call); i++)
681 args.safe_push (gimple_call_arg (call, i));
682 gcall *stmt2;
683 if (ifn == IFN_LAST)
684 stmt2 = gimple_build_call_vec (fndecl, args);
685 else
686 stmt2 = gimple_build_call_internal_vec (ifn, args);
687 gimple_call_set_lhs (stmt2, arg1);
688 if (gimple_vdef (call))
690 gimple_set_vdef (stmt2, gimple_vdef (call));
691 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
693 gimple_set_vuse (stmt2, gimple_vuse (call));
694 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
695 gsi_replace (&gsi2, stmt2, true);
697 else
699 if (ifn == IFN_LAST)
700 gimple_call_set_fndecl (call, fndecl);
701 else
702 gimple_call_set_internal_fn (call, ifn);
703 update_stmt (call);
705 reciprocal_stats.rfuncs_inserted++;
707 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
709 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
710 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
711 fold_stmt_inplace (&gsi);
712 update_stmt (stmt);
719 statistics_counter_event (fun, "reciprocal divs inserted",
720 reciprocal_stats.rdivs_inserted);
721 statistics_counter_event (fun, "reciprocal functions inserted",
722 reciprocal_stats.rfuncs_inserted);
724 free_dominance_info (CDI_DOMINATORS);
725 free_dominance_info (CDI_POST_DOMINATORS);
726 delete occ_pool;
727 return 0;
730 } // anon namespace
732 gimple_opt_pass *
733 make_pass_cse_reciprocals (gcc::context *ctxt)
735 return new pass_cse_reciprocals (ctxt);
738 /* Records an occurrence at statement USE_STMT in the vector of trees
739 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
740 is not yet initialized. Returns true if the occurrence was pushed on
741 the vector. Adjusts *TOP_BB to be the basic block dominating all
742 statements in the vector. */
744 static bool
745 maybe_record_sincos (vec<gimple *> *stmts,
746 basic_block *top_bb, gimple *use_stmt)
748 basic_block use_bb = gimple_bb (use_stmt);
749 if (*top_bb
750 && (*top_bb == use_bb
751 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
752 stmts->safe_push (use_stmt);
753 else if (!*top_bb
754 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
756 stmts->safe_push (use_stmt);
757 *top_bb = use_bb;
759 else
760 return false;
762 return true;
765 /* Look for sin, cos and cexpi calls with the same argument NAME and
766 create a single call to cexpi CSEing the result in this case.
767 We first walk over all immediate uses of the argument collecting
768 statements that we can CSE in a vector and in a second pass replace
769 the statement rhs with a REALPART or IMAGPART expression on the
770 result of the cexpi call we insert before the use statement that
771 dominates all other candidates. */
773 static bool
774 execute_cse_sincos_1 (tree name)
776 gimple_stmt_iterator gsi;
777 imm_use_iterator use_iter;
778 tree fndecl, res, type;
779 gimple *def_stmt, *use_stmt, *stmt;
780 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
781 auto_vec<gimple *> stmts;
782 basic_block top_bb = NULL;
783 int i;
784 bool cfg_changed = false;
786 type = TREE_TYPE (name);
787 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
789 if (gimple_code (use_stmt) != GIMPLE_CALL
790 || !gimple_call_lhs (use_stmt))
791 continue;
793 switch (gimple_call_combined_fn (use_stmt))
795 CASE_CFN_COS:
796 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
797 break;
799 CASE_CFN_SIN:
800 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
801 break;
803 CASE_CFN_CEXPI:
804 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
805 break;
807 default:;
811 if (seen_cos + seen_sin + seen_cexpi <= 1)
812 return false;
814 /* Simply insert cexpi at the beginning of top_bb but not earlier than
815 the name def statement. */
816 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
817 if (!fndecl)
818 return false;
819 stmt = gimple_build_call (fndecl, 1, name);
820 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
821 gimple_call_set_lhs (stmt, res);
823 def_stmt = SSA_NAME_DEF_STMT (name);
824 if (!SSA_NAME_IS_DEFAULT_DEF (name)
825 && gimple_code (def_stmt) != GIMPLE_PHI
826 && gimple_bb (def_stmt) == top_bb)
828 gsi = gsi_for_stmt (def_stmt);
829 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
831 else
833 gsi = gsi_after_labels (top_bb);
834 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
836 sincos_stats.inserted++;
838 /* And adjust the recorded old call sites. */
839 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
841 tree rhs = NULL;
843 switch (gimple_call_combined_fn (use_stmt))
845 CASE_CFN_COS:
846 rhs = fold_build1 (REALPART_EXPR, type, res);
847 break;
849 CASE_CFN_SIN:
850 rhs = fold_build1 (IMAGPART_EXPR, type, res);
851 break;
853 CASE_CFN_CEXPI:
854 rhs = res;
855 break;
857 default:;
858 gcc_unreachable ();
861 /* Replace call with a copy. */
862 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
864 gsi = gsi_for_stmt (use_stmt);
865 gsi_replace (&gsi, stmt, true);
866 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
867 cfg_changed = true;
870 return cfg_changed;
873 /* To evaluate powi(x,n), the floating point value x raised to the
874 constant integer exponent n, we use a hybrid algorithm that
875 combines the "window method" with look-up tables. For an
876 introduction to exponentiation algorithms and "addition chains",
877 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
878 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
879 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
880 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
882 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
883 multiplications to inline before calling the system library's pow
884 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
885 so this default never requires calling pow, powf or powl. */
887 #ifndef POWI_MAX_MULTS
888 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
889 #endif
891 /* The size of the "optimal power tree" lookup table. All
892 exponents less than this value are simply looked up in the
893 powi_table below. This threshold is also used to size the
894 cache of pseudo registers that hold intermediate results. */
895 #define POWI_TABLE_SIZE 256
897 /* The size, in bits of the window, used in the "window method"
898 exponentiation algorithm. This is equivalent to a radix of
899 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
900 #define POWI_WINDOW_SIZE 3
902 /* The following table is an efficient representation of an
903 "optimal power tree". For each value, i, the corresponding
904 value, j, in the table states than an optimal evaluation
905 sequence for calculating pow(x,i) can be found by evaluating
906 pow(x,j)*pow(x,i-j). An optimal power tree for the first
907 100 integers is given in Knuth's "Seminumerical algorithms". */
909 static const unsigned char powi_table[POWI_TABLE_SIZE] =
911 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
912 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
913 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
914 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
915 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
916 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
917 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
918 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
919 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
920 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
921 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
922 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
923 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
924 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
925 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
926 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
927 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
928 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
929 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
930 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
931 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
932 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
933 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
934 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
935 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
936 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
937 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
938 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
939 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
940 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
941 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
942 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
946 /* Return the number of multiplications required to calculate
947 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
948 subroutine of powi_cost. CACHE is an array indicating
949 which exponents have already been calculated. */
951 static int
952 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
954 /* If we've already calculated this exponent, then this evaluation
955 doesn't require any additional multiplications. */
956 if (cache[n])
957 return 0;
959 cache[n] = true;
960 return powi_lookup_cost (n - powi_table[n], cache)
961 + powi_lookup_cost (powi_table[n], cache) + 1;
964 /* Return the number of multiplications required to calculate
965 powi(x,n) for an arbitrary x, given the exponent N. This
966 function needs to be kept in sync with powi_as_mults below. */
968 static int
969 powi_cost (HOST_WIDE_INT n)
971 bool cache[POWI_TABLE_SIZE];
972 unsigned HOST_WIDE_INT digit;
973 unsigned HOST_WIDE_INT val;
974 int result;
976 if (n == 0)
977 return 0;
979 /* Ignore the reciprocal when calculating the cost. */
980 val = (n < 0) ? -n : n;
982 /* Initialize the exponent cache. */
983 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
984 cache[1] = true;
986 result = 0;
988 while (val >= POWI_TABLE_SIZE)
990 if (val & 1)
992 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
993 result += powi_lookup_cost (digit, cache)
994 + POWI_WINDOW_SIZE + 1;
995 val >>= POWI_WINDOW_SIZE;
997 else
999 val >>= 1;
1000 result++;
1004 return result + powi_lookup_cost (val, cache);
1007 /* Recursive subroutine of powi_as_mults. This function takes the
1008 array, CACHE, of already calculated exponents and an exponent N and
1009 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1011 static tree
1012 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1013 HOST_WIDE_INT n, tree *cache)
1015 tree op0, op1, ssa_target;
1016 unsigned HOST_WIDE_INT digit;
1017 gassign *mult_stmt;
1019 if (n < POWI_TABLE_SIZE && cache[n])
1020 return cache[n];
1022 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1024 if (n < POWI_TABLE_SIZE)
1026 cache[n] = ssa_target;
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1030 else if (n & 1)
1032 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1033 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1034 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1036 else
1038 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1039 op1 = op0;
1042 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1043 gimple_set_location (mult_stmt, loc);
1044 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1046 return ssa_target;
1049 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1050 This function needs to be kept in sync with powi_cost above. */
1052 static tree
1053 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1054 tree arg0, HOST_WIDE_INT n)
1056 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1057 gassign *div_stmt;
1058 tree target;
1060 if (n == 0)
1061 return build_real (type, dconst1);
1063 memset (cache, 0, sizeof (cache));
1064 cache[1] = arg0;
1066 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1067 if (n >= 0)
1068 return result;
1070 /* If the original exponent was negative, reciprocate the result. */
1071 target = make_temp_ssa_name (type, NULL, "powmult");
1072 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1073 build_real (type, dconst1), result);
1074 gimple_set_location (div_stmt, loc);
1075 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1077 return target;
1080 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1081 location info LOC. If the arguments are appropriate, create an
1082 equivalent sequence of statements prior to GSI using an optimal
1083 number of multiplications, and return an expession holding the
1084 result. */
1086 static tree
1087 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1088 tree arg0, HOST_WIDE_INT n)
1090 /* Avoid largest negative number. */
1091 if (n != -n
1092 && ((n >= -1 && n <= 2)
1093 || (optimize_function_for_speed_p (cfun)
1094 && powi_cost (n) <= POWI_MAX_MULTS)))
1095 return powi_as_mults (gsi, loc, arg0, n);
1097 return NULL_TREE;
1100 /* Build a gimple call statement that calls FN with argument ARG.
1101 Set the lhs of the call statement to a fresh SSA name. Insert the
1102 statement prior to GSI's current position, and return the fresh
1103 SSA name. */
1105 static tree
1106 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1107 tree fn, tree arg)
1109 gcall *call_stmt;
1110 tree ssa_target;
1112 call_stmt = gimple_build_call (fn, 1, arg);
1113 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1114 gimple_set_lhs (call_stmt, ssa_target);
1115 gimple_set_location (call_stmt, loc);
1116 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1118 return ssa_target;
1121 /* Build a gimple binary operation with the given CODE and arguments
1122 ARG0, ARG1, assigning the result to a new SSA name for variable
1123 TARGET. Insert the statement prior to GSI's current position, and
1124 return the fresh SSA name.*/
1126 static tree
1127 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1128 const char *name, enum tree_code code,
1129 tree arg0, tree arg1)
1131 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1132 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1133 gimple_set_location (stmt, loc);
1134 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1135 return result;
1138 /* Build a gimple reference operation with the given CODE and argument
1139 ARG, assigning the result to a new SSA name of TYPE with NAME.
1140 Insert the statement prior to GSI's current position, and return
1141 the fresh SSA name. */
1143 static inline tree
1144 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1145 const char *name, enum tree_code code, tree arg0)
1147 tree result = make_temp_ssa_name (type, NULL, name);
1148 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1149 gimple_set_location (stmt, loc);
1150 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1151 return result;
1154 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1155 prior to GSI's current position, and return the fresh SSA name. */
1157 static tree
1158 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1159 tree type, tree val)
1161 tree result = make_ssa_name (type);
1162 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1163 gimple_set_location (stmt, loc);
1164 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1165 return result;
1168 struct pow_synth_sqrt_info
1170 bool *factors;
1171 unsigned int deepest;
1172 unsigned int num_mults;
1175 /* Return true iff the real value C can be represented as a
1176 sum of powers of 0.5 up to N. That is:
1177 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1178 Record in INFO the various parameters of the synthesis algorithm such
1179 as the factors a[i], the maximum 0.5 power and the number of
1180 multiplications that will be required. */
1182 bool
1183 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1184 struct pow_synth_sqrt_info *info)
1186 REAL_VALUE_TYPE factor = dconsthalf;
1187 REAL_VALUE_TYPE remainder = c;
1189 info->deepest = 0;
1190 info->num_mults = 0;
1191 memset (info->factors, 0, n * sizeof (bool));
1193 for (unsigned i = 0; i < n; i++)
1195 REAL_VALUE_TYPE res;
1197 /* If something inexact happened bail out now. */
1198 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1199 return false;
1201 /* We have hit zero. The number is representable as a sum
1202 of powers of 0.5. */
1203 if (real_equal (&res, &dconst0))
1205 info->factors[i] = true;
1206 info->deepest = i + 1;
1207 return true;
1209 else if (!REAL_VALUE_NEGATIVE (res))
1211 remainder = res;
1212 info->factors[i] = true;
1213 info->num_mults++;
1215 else
1216 info->factors[i] = false;
1218 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1220 return false;
1223 /* Return the tree corresponding to FN being applied
1224 to ARG N times at GSI and LOC.
1225 Look up previous results from CACHE if need be.
1226 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1228 static tree
1229 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1230 tree fn, location_t loc, tree *cache)
1232 tree res = cache[n];
1233 if (!res)
1235 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1236 res = build_and_insert_call (gsi, loc, fn, prev);
1237 cache[n] = res;
1240 return res;
1243 /* Print to STREAM the repeated application of function FNAME to ARG
1244 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1245 "foo (foo (x))". */
1247 static void
1248 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1249 unsigned int n)
1251 if (n == 0)
1252 fprintf (stream, "%s", arg);
1253 else
1255 fprintf (stream, "%s (", fname);
1256 print_nested_fn (stream, fname, arg, n - 1);
1257 fprintf (stream, ")");
1261 /* Print to STREAM the fractional sequence of sqrt chains
1262 applied to ARG, described by INFO. Used for the dump file. */
1264 static void
1265 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1266 struct pow_synth_sqrt_info *info)
1268 for (unsigned int i = 0; i < info->deepest; i++)
1270 bool is_set = info->factors[i];
1271 if (is_set)
1273 print_nested_fn (stream, "sqrt", arg, i + 1);
1274 if (i != info->deepest - 1)
1275 fprintf (stream, " * ");
1280 /* Print to STREAM a representation of raising ARG to an integer
1281 power N. Used for the dump file. */
1283 static void
1284 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1286 if (n > 1)
1287 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1288 else if (n == 1)
1289 fprintf (stream, "%s", arg);
1292 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1293 square roots. Place at GSI and LOC. Limit the maximum depth
1294 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1295 result of the expanded sequence or NULL_TREE if the expansion failed.
1297 This routine assumes that ARG1 is a real number with a fractional part
1298 (the integer exponent case will have been handled earlier in
1299 gimple_expand_builtin_pow).
1301 For ARG1 > 0.0:
1302 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1303 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1304 FRAC_PART == ARG1 - WHOLE_PART:
1305 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1306 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1307 if it can be expressed as such, that is if FRAC_PART satisfies:
1308 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1309 where integer a[i] is either 0 or 1.
1311 Example:
1312 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1313 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1315 For ARG1 < 0.0 there are two approaches:
1316 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1317 is calculated as above.
1319 Example:
1320 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1321 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1323 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1324 FRAC_PART := ARG1 - WHOLE_PART
1325 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1326 Example:
1327 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1328 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1330 For ARG1 < 0.0 we choose between (A) and (B) depending on
1331 how many multiplications we'd have to do.
1332 So, for the example in (B): POW (x, -5.875), if we were to
1333 follow algorithm (A) we would produce:
1334 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1335 which contains more multiplications than approach (B).
1337 Hopefully, this approach will eliminate potentially expensive POW library
1338 calls when unsafe floating point math is enabled and allow the compiler to
1339 further optimise the multiplies, square roots and divides produced by this
1340 function. */
1342 static tree
1343 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1344 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1346 tree type = TREE_TYPE (arg0);
1347 machine_mode mode = TYPE_MODE (type);
1348 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1349 bool one_over = true;
1351 if (!sqrtfn)
1352 return NULL_TREE;
1354 if (TREE_CODE (arg1) != REAL_CST)
1355 return NULL_TREE;
1357 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1359 gcc_assert (max_depth > 0);
1360 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1362 struct pow_synth_sqrt_info synth_info;
1363 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1364 synth_info.deepest = 0;
1365 synth_info.num_mults = 0;
1367 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1368 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1370 /* The whole and fractional parts of exp. */
1371 REAL_VALUE_TYPE whole_part;
1372 REAL_VALUE_TYPE frac_part;
1374 real_floor (&whole_part, mode, &exp);
1375 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1378 REAL_VALUE_TYPE ceil_whole = dconst0;
1379 REAL_VALUE_TYPE ceil_fract = dconst0;
1381 if (neg_exp)
1383 real_ceil (&ceil_whole, mode, &exp);
1384 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1387 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1388 return NULL_TREE;
1390 /* Check whether it's more profitable to not use 1.0 / ... */
1391 if (neg_exp)
1393 struct pow_synth_sqrt_info alt_synth_info;
1394 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1395 alt_synth_info.deepest = 0;
1396 alt_synth_info.num_mults = 0;
1398 if (representable_as_half_series_p (ceil_fract, max_depth,
1399 &alt_synth_info)
1400 && alt_synth_info.deepest <= synth_info.deepest
1401 && alt_synth_info.num_mults < synth_info.num_mults)
1403 whole_part = ceil_whole;
1404 frac_part = ceil_fract;
1405 synth_info.deepest = alt_synth_info.deepest;
1406 synth_info.num_mults = alt_synth_info.num_mults;
1407 memcpy (synth_info.factors, alt_synth_info.factors,
1408 (max_depth + 1) * sizeof (bool));
1409 one_over = false;
1413 HOST_WIDE_INT n = real_to_integer (&whole_part);
1414 REAL_VALUE_TYPE cint;
1415 real_from_integer (&cint, VOIDmode, n, SIGNED);
1417 if (!real_identical (&whole_part, &cint))
1418 return NULL_TREE;
1420 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1421 return NULL_TREE;
1423 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1425 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1427 /* Calculate the integer part of the exponent. */
1428 if (n > 1)
1430 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1431 if (!integer_res)
1432 return NULL_TREE;
1435 if (dump_file)
1437 char string[64];
1439 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1440 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1442 if (neg_exp)
1444 if (one_over)
1446 fprintf (dump_file, "1.0 / (");
1447 dump_integer_part (dump_file, "x", n);
1448 if (n > 0)
1449 fprintf (dump_file, " * ");
1450 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1451 fprintf (dump_file, ")");
1453 else
1455 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1456 fprintf (dump_file, " / (");
1457 dump_integer_part (dump_file, "x", n);
1458 fprintf (dump_file, ")");
1461 else
1463 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1464 if (n > 0)
1465 fprintf (dump_file, " * ");
1466 dump_integer_part (dump_file, "x", n);
1469 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1473 tree fract_res = NULL_TREE;
1474 cache[0] = arg0;
1476 /* Calculate the fractional part of the exponent. */
1477 for (unsigned i = 0; i < synth_info.deepest; i++)
1479 if (synth_info.factors[i])
1481 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1483 if (!fract_res)
1484 fract_res = sqrt_chain;
1486 else
1487 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1488 fract_res, sqrt_chain);
1492 tree res = NULL_TREE;
1494 if (neg_exp)
1496 if (one_over)
1498 if (n > 0)
1499 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1500 fract_res, integer_res);
1501 else
1502 res = fract_res;
1504 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1505 build_real (type, dconst1), res);
1507 else
1509 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1510 fract_res, integer_res);
1513 else
1514 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1515 fract_res, integer_res);
1516 return res;
1519 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1520 with location info LOC. If possible, create an equivalent and
1521 less expensive sequence of statements prior to GSI, and return an
1522 expession holding the result. */
1524 static tree
1525 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1526 tree arg0, tree arg1)
1528 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1529 REAL_VALUE_TYPE c2, dconst3;
1530 HOST_WIDE_INT n;
1531 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1532 machine_mode mode;
1533 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1534 bool hw_sqrt_exists, c_is_int, c2_is_int;
1536 dconst1_4 = dconst1;
1537 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1539 /* If the exponent isn't a constant, there's nothing of interest
1540 to be done. */
1541 if (TREE_CODE (arg1) != REAL_CST)
1542 return NULL_TREE;
1544 /* Don't perform the operation if flag_signaling_nans is on
1545 and the operand is a signaling NaN. */
1546 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1547 && ((TREE_CODE (arg0) == REAL_CST
1548 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1549 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1550 return NULL_TREE;
1552 /* If the exponent is equivalent to an integer, expand to an optimal
1553 multiplication sequence when profitable. */
1554 c = TREE_REAL_CST (arg1);
1555 n = real_to_integer (&c);
1556 real_from_integer (&cint, VOIDmode, n, SIGNED);
1557 c_is_int = real_identical (&c, &cint);
1559 if (c_is_int
1560 && ((n >= -1 && n <= 2)
1561 || (flag_unsafe_math_optimizations
1562 && speed_p
1563 && powi_cost (n) <= POWI_MAX_MULTS)))
1564 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1566 /* Attempt various optimizations using sqrt and cbrt. */
1567 type = TREE_TYPE (arg0);
1568 mode = TYPE_MODE (type);
1569 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1571 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1572 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1573 sqrt(-0) = -0. */
1574 if (sqrtfn
1575 && real_equal (&c, &dconsthalf)
1576 && !HONOR_SIGNED_ZEROS (mode))
1577 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1579 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1581 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1582 optimizations since 1./3. is not exactly representable. If x
1583 is negative and finite, the correct value of pow(x,1./3.) is
1584 a NaN with the "invalid" exception raised, because the value
1585 of 1./3. actually has an even denominator. The correct value
1586 of cbrt(x) is a negative real value. */
1587 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1588 dconst1_3 = real_value_truncate (mode, dconst_third ());
1590 if (flag_unsafe_math_optimizations
1591 && cbrtfn
1592 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1593 && real_equal (&c, &dconst1_3))
1594 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1596 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1597 if we don't have a hardware sqrt insn. */
1598 dconst1_6 = dconst1_3;
1599 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1601 if (flag_unsafe_math_optimizations
1602 && sqrtfn
1603 && cbrtfn
1604 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1605 && speed_p
1606 && hw_sqrt_exists
1607 && real_equal (&c, &dconst1_6))
1609 /* sqrt(x) */
1610 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1612 /* cbrt(sqrt(x)) */
1613 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1617 /* Attempt to expand the POW as a product of square root chains.
1618 Expand the 0.25 case even when otpimising for size. */
1619 if (flag_unsafe_math_optimizations
1620 && sqrtfn
1621 && hw_sqrt_exists
1622 && (speed_p || real_equal (&c, &dconst1_4))
1623 && !HONOR_SIGNED_ZEROS (mode))
1625 unsigned int max_depth = speed_p
1626 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1627 : 2;
1629 tree expand_with_sqrts
1630 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1632 if (expand_with_sqrts)
1633 return expand_with_sqrts;
1636 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1637 n = real_to_integer (&c2);
1638 real_from_integer (&cint, VOIDmode, n, SIGNED);
1639 c2_is_int = real_identical (&c2, &cint);
1641 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1643 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1644 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1646 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1647 different from pow(x, 1./3.) due to rounding and behavior with
1648 negative x, we need to constrain this transformation to unsafe
1649 math and positive x or finite math. */
1650 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1651 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1652 real_round (&c2, mode, &c2);
1653 n = real_to_integer (&c2);
1654 real_from_integer (&cint, VOIDmode, n, SIGNED);
1655 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1656 real_convert (&c2, mode, &c2);
1658 if (flag_unsafe_math_optimizations
1659 && cbrtfn
1660 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1661 && real_identical (&c2, &c)
1662 && !c2_is_int
1663 && optimize_function_for_speed_p (cfun)
1664 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1666 tree powi_x_ndiv3 = NULL_TREE;
1668 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1669 possible or profitable, give up. Skip the degenerate case when
1670 abs(n) < 3, where the result is always 1. */
1671 if (absu_hwi (n) >= 3)
1673 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1674 abs_hwi (n / 3));
1675 if (!powi_x_ndiv3)
1676 return NULL_TREE;
1679 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1680 as that creates an unnecessary variable. Instead, just produce
1681 either cbrt(x) or cbrt(x) * cbrt(x). */
1682 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1684 if (absu_hwi (n) % 3 == 1)
1685 powi_cbrt_x = cbrt_x;
1686 else
1687 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1688 cbrt_x, cbrt_x);
1690 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1691 if (absu_hwi (n) < 3)
1692 result = powi_cbrt_x;
1693 else
1694 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1695 powi_x_ndiv3, powi_cbrt_x);
1697 /* If n is negative, reciprocate the result. */
1698 if (n < 0)
1699 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1700 build_real (type, dconst1), result);
1702 return result;
1705 /* No optimizations succeeded. */
1706 return NULL_TREE;
1709 /* ARG is the argument to a cabs builtin call in GSI with location info
1710 LOC. Create a sequence of statements prior to GSI that calculates
1711 sqrt(R*R + I*I), where R and I are the real and imaginary components
1712 of ARG, respectively. Return an expression holding the result. */
1714 static tree
1715 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1717 tree real_part, imag_part, addend1, addend2, sum, result;
1718 tree type = TREE_TYPE (TREE_TYPE (arg));
1719 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1720 machine_mode mode = TYPE_MODE (type);
1722 if (!flag_unsafe_math_optimizations
1723 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1724 || !sqrtfn
1725 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1726 return NULL_TREE;
1728 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1729 REALPART_EXPR, arg);
1730 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1731 real_part, real_part);
1732 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1733 IMAGPART_EXPR, arg);
1734 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1735 imag_part, imag_part);
1736 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1737 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1739 return result;
1742 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1743 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1744 an optimal number of multiplies, when n is a constant. */
1746 namespace {
1748 const pass_data pass_data_cse_sincos =
1750 GIMPLE_PASS, /* type */
1751 "sincos", /* name */
1752 OPTGROUP_NONE, /* optinfo_flags */
1753 TV_NONE, /* tv_id */
1754 PROP_ssa, /* properties_required */
1755 PROP_gimple_opt_math, /* properties_provided */
1756 0, /* properties_destroyed */
1757 0, /* todo_flags_start */
1758 TODO_update_ssa, /* todo_flags_finish */
1761 class pass_cse_sincos : public gimple_opt_pass
1763 public:
1764 pass_cse_sincos (gcc::context *ctxt)
1765 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1768 /* opt_pass methods: */
1769 virtual bool gate (function *)
1771 /* We no longer require either sincos or cexp, since powi expansion
1772 piggybacks on this pass. */
1773 return optimize;
1776 virtual unsigned int execute (function *);
1778 }; // class pass_cse_sincos
1780 unsigned int
1781 pass_cse_sincos::execute (function *fun)
1783 basic_block bb;
1784 bool cfg_changed = false;
1786 calculate_dominance_info (CDI_DOMINATORS);
1787 memset (&sincos_stats, 0, sizeof (sincos_stats));
1789 FOR_EACH_BB_FN (bb, fun)
1791 gimple_stmt_iterator gsi;
1792 bool cleanup_eh = false;
1794 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1796 gimple *stmt = gsi_stmt (gsi);
1798 /* Only the last stmt in a bb could throw, no need to call
1799 gimple_purge_dead_eh_edges if we change something in the middle
1800 of a basic block. */
1801 cleanup_eh = false;
1803 if (is_gimple_call (stmt)
1804 && gimple_call_lhs (stmt))
1806 tree arg, arg0, arg1, result;
1807 HOST_WIDE_INT n;
1808 location_t loc;
1810 switch (gimple_call_combined_fn (stmt))
1812 CASE_CFN_COS:
1813 CASE_CFN_SIN:
1814 CASE_CFN_CEXPI:
1815 /* Make sure we have either sincos or cexp. */
1816 if (!targetm.libc_has_function (function_c99_math_complex)
1817 && !targetm.libc_has_function (function_sincos))
1818 break;
1820 arg = gimple_call_arg (stmt, 0);
1821 if (TREE_CODE (arg) == SSA_NAME)
1822 cfg_changed |= execute_cse_sincos_1 (arg);
1823 break;
1825 CASE_CFN_POW:
1826 arg0 = gimple_call_arg (stmt, 0);
1827 arg1 = gimple_call_arg (stmt, 1);
1829 loc = gimple_location (stmt);
1830 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1832 if (result)
1834 tree lhs = gimple_get_lhs (stmt);
1835 gassign *new_stmt = gimple_build_assign (lhs, result);
1836 gimple_set_location (new_stmt, loc);
1837 unlink_stmt_vdef (stmt);
1838 gsi_replace (&gsi, new_stmt, true);
1839 cleanup_eh = true;
1840 if (gimple_vdef (stmt))
1841 release_ssa_name (gimple_vdef (stmt));
1843 break;
1845 CASE_CFN_POWI:
1846 arg0 = gimple_call_arg (stmt, 0);
1847 arg1 = gimple_call_arg (stmt, 1);
1848 loc = gimple_location (stmt);
1850 if (real_minus_onep (arg0))
1852 tree t0, t1, cond, one, minus_one;
1853 gassign *stmt;
1855 t0 = TREE_TYPE (arg0);
1856 t1 = TREE_TYPE (arg1);
1857 one = build_real (t0, dconst1);
1858 minus_one = build_real (t0, dconstm1);
1860 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1861 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1862 arg1, build_int_cst (t1, 1));
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1866 result = make_temp_ssa_name (t0, NULL, "powi");
1867 stmt = gimple_build_assign (result, COND_EXPR, cond,
1868 minus_one, one);
1869 gimple_set_location (stmt, loc);
1870 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1872 else
1874 if (!tree_fits_shwi_p (arg1))
1875 break;
1877 n = tree_to_shwi (arg1);
1878 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1881 if (result)
1883 tree lhs = gimple_get_lhs (stmt);
1884 gassign *new_stmt = gimple_build_assign (lhs, result);
1885 gimple_set_location (new_stmt, loc);
1886 unlink_stmt_vdef (stmt);
1887 gsi_replace (&gsi, new_stmt, true);
1888 cleanup_eh = true;
1889 if (gimple_vdef (stmt))
1890 release_ssa_name (gimple_vdef (stmt));
1892 break;
1894 CASE_CFN_CABS:
1895 arg0 = gimple_call_arg (stmt, 0);
1896 loc = gimple_location (stmt);
1897 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1899 if (result)
1901 tree lhs = gimple_get_lhs (stmt);
1902 gassign *new_stmt = gimple_build_assign (lhs, result);
1903 gimple_set_location (new_stmt, loc);
1904 unlink_stmt_vdef (stmt);
1905 gsi_replace (&gsi, new_stmt, true);
1906 cleanup_eh = true;
1907 if (gimple_vdef (stmt))
1908 release_ssa_name (gimple_vdef (stmt));
1910 break;
1912 default:;
1916 if (cleanup_eh)
1917 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1920 statistics_counter_event (fun, "sincos statements inserted",
1921 sincos_stats.inserted);
1923 return cfg_changed ? TODO_cleanup_cfg : 0;
1926 } // anon namespace
1928 gimple_opt_pass *
1929 make_pass_cse_sincos (gcc::context *ctxt)
1931 return new pass_cse_sincos (ctxt);
1934 /* A symbolic number structure is used to detect byte permutation and selection
1935 patterns of a source. To achieve that, its field N contains an artificial
1936 number consisting of BITS_PER_MARKER sized markers tracking where does each
1937 byte come from in the source:
1939 0 - target byte has the value 0
1940 FF - target byte has an unknown value (eg. due to sign extension)
1941 1..size - marker value is the byte index in the source (0 for lsb).
1943 To detect permutations on memory sources (arrays and structures), a symbolic
1944 number is also associated:
1945 - a base address BASE_ADDR and an OFFSET giving the address of the source;
1946 - a range which gives the difference between the highest and lowest accessed
1947 memory location to make such a symbolic number;
1948 - the address SRC of the source element of lowest address as a convenience
1949 to easily get BASE_ADDR + offset + lowest bytepos.
1951 Note 1: the range is different from size as size reflects the size of the
1952 type of the current expression. For instance, for an array char a[],
1953 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
1954 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
1955 time a range of 1.
1957 Note 2: for non-memory sources, range holds the same value as size.
1959 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
1961 struct symbolic_number {
1962 uint64_t n;
1963 tree type;
1964 tree base_addr;
1965 tree offset;
1966 HOST_WIDE_INT bytepos;
1967 tree src;
1968 tree alias_set;
1969 tree vuse;
1970 unsigned HOST_WIDE_INT range;
1973 #define BITS_PER_MARKER 8
1974 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1975 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1976 #define HEAD_MARKER(n, size) \
1977 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1979 /* The number which the find_bswap_or_nop_1 result should match in
1980 order to have a nop. The number is masked according to the size of
1981 the symbolic number before using it. */
1982 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1983 (uint64_t)0x08070605 << 32 | 0x04030201)
1985 /* The number which the find_bswap_or_nop_1 result should match in
1986 order to have a byte swap. The number is masked according to the
1987 size of the symbolic number before using it. */
1988 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1989 (uint64_t)0x01020304 << 32 | 0x05060708)
1991 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1992 number N. Return false if the requested operation is not permitted
1993 on a symbolic number. */
1995 static inline bool
1996 do_shift_rotate (enum tree_code code,
1997 struct symbolic_number *n,
1998 int count)
2000 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2001 unsigned head_marker;
2003 if (count % BITS_PER_UNIT != 0)
2004 return false;
2005 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
2007 /* Zero out the extra bits of N in order to avoid them being shifted
2008 into the significant bits. */
2009 if (size < 64 / BITS_PER_MARKER)
2010 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2012 switch (code)
2014 case LSHIFT_EXPR:
2015 n->n <<= count;
2016 break;
2017 case RSHIFT_EXPR:
2018 head_marker = HEAD_MARKER (n->n, size);
2019 n->n >>= count;
2020 /* Arithmetic shift of signed type: result is dependent on the value. */
2021 if (!TYPE_UNSIGNED (n->type) && head_marker)
2022 for (i = 0; i < count / BITS_PER_MARKER; i++)
2023 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2024 << ((size - 1 - i) * BITS_PER_MARKER);
2025 break;
2026 case LROTATE_EXPR:
2027 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2028 break;
2029 case RROTATE_EXPR:
2030 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2031 break;
2032 default:
2033 return false;
2035 /* Zero unused bits for size. */
2036 if (size < 64 / BITS_PER_MARKER)
2037 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2038 return true;
2041 /* Perform sanity checking for the symbolic number N and the gimple
2042 statement STMT. */
2044 static inline bool
2045 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2047 tree lhs_type;
2049 lhs_type = gimple_expr_type (stmt);
2051 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2052 return false;
2054 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2055 return false;
2057 return true;
2060 /* Initialize the symbolic number N for the bswap pass from the base element
2061 SRC manipulated by the bitwise OR expression. */
2063 static bool
2064 init_symbolic_number (struct symbolic_number *n, tree src)
2066 int size;
2068 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
2069 return false;
2071 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2072 n->src = src;
2074 /* Set up the symbolic number N by setting each byte to a value between 1 and
2075 the byte size of rhs1. The highest order byte is set to n->size and the
2076 lowest order byte to 1. */
2077 n->type = TREE_TYPE (src);
2078 size = TYPE_PRECISION (n->type);
2079 if (size % BITS_PER_UNIT != 0)
2080 return false;
2081 size /= BITS_PER_UNIT;
2082 if (size > 64 / BITS_PER_MARKER)
2083 return false;
2084 n->range = size;
2085 n->n = CMPNOP;
2087 if (size < 64 / BITS_PER_MARKER)
2088 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2090 return true;
2093 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2094 the answer. If so, REF is that memory source and the base of the memory area
2095 accessed and the offset of the access from that base are recorded in N. */
2097 bool
2098 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2100 /* Leaf node is an array or component ref. Memorize its base and
2101 offset from base to compare to other such leaf node. */
2102 HOST_WIDE_INT bitsize, bitpos;
2103 machine_mode mode;
2104 int unsignedp, reversep, volatilep;
2105 tree offset, base_addr;
2107 /* Not prepared to handle PDP endian. */
2108 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2109 return false;
2111 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2112 return false;
2114 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2115 &unsignedp, &reversep, &volatilep);
2117 if (TREE_CODE (base_addr) == MEM_REF)
2119 offset_int bit_offset = 0;
2120 tree off = TREE_OPERAND (base_addr, 1);
2122 if (!integer_zerop (off))
2124 offset_int boff, coff = mem_ref_offset (base_addr);
2125 boff = coff << LOG2_BITS_PER_UNIT;
2126 bit_offset += boff;
2129 base_addr = TREE_OPERAND (base_addr, 0);
2131 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2132 if (wi::neg_p (bit_offset))
2134 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2135 offset_int tem = bit_offset.and_not (mask);
2136 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2137 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2138 bit_offset -= tem;
2139 tem >>= LOG2_BITS_PER_UNIT;
2140 if (offset)
2141 offset = size_binop (PLUS_EXPR, offset,
2142 wide_int_to_tree (sizetype, tem));
2143 else
2144 offset = wide_int_to_tree (sizetype, tem);
2147 bitpos += bit_offset.to_shwi ();
2150 if (bitpos % BITS_PER_UNIT)
2151 return false;
2152 if (bitsize % BITS_PER_UNIT)
2153 return false;
2154 if (reversep)
2155 return false;
2157 if (!init_symbolic_number (n, ref))
2158 return false;
2159 n->base_addr = base_addr;
2160 n->offset = offset;
2161 n->bytepos = bitpos / BITS_PER_UNIT;
2162 n->alias_set = reference_alias_ptr_type (ref);
2163 n->vuse = gimple_vuse (stmt);
2164 return true;
2167 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2168 symbolic number N1 and N2 whose source statements are respectively
2169 SOURCE_STMT1 and SOURCE_STMT2. */
2171 static gimple *
2172 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2173 gimple *source_stmt2, struct symbolic_number *n2,
2174 struct symbolic_number *n)
2176 int i, size;
2177 uint64_t mask;
2178 gimple *source_stmt;
2179 struct symbolic_number *n_start;
2181 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
2182 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2183 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2184 rhs1 = TREE_OPERAND (rhs1, 0);
2185 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
2186 if (TREE_CODE (rhs2) == BIT_FIELD_REF
2187 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
2188 rhs2 = TREE_OPERAND (rhs2, 0);
2190 /* Sources are different, cancel bswap if they are not memory location with
2191 the same base (array, structure, ...). */
2192 if (rhs1 != rhs2)
2194 uint64_t inc;
2195 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2196 struct symbolic_number *toinc_n_ptr, *n_end;
2197 basic_block bb1, bb2;
2199 if (!n1->base_addr || !n2->base_addr
2200 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2201 return NULL;
2203 if (!n1->offset != !n2->offset
2204 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2205 return NULL;
2207 if (n1->bytepos < n2->bytepos)
2209 n_start = n1;
2210 start_sub = n2->bytepos - n1->bytepos;
2212 else
2214 n_start = n2;
2215 start_sub = n1->bytepos - n2->bytepos;
2218 bb1 = gimple_bb (source_stmt1);
2219 bb2 = gimple_bb (source_stmt2);
2220 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
2221 source_stmt = source_stmt1;
2222 else
2223 source_stmt = source_stmt2;
2225 /* Find the highest address at which a load is performed and
2226 compute related info. */
2227 end1 = n1->bytepos + (n1->range - 1);
2228 end2 = n2->bytepos + (n2->range - 1);
2229 if (end1 < end2)
2231 end = end2;
2232 end_sub = end2 - end1;
2234 else
2236 end = end1;
2237 end_sub = end1 - end2;
2239 n_end = (end2 > end1) ? n2 : n1;
2241 /* Find symbolic number whose lsb is the most significant. */
2242 if (BYTES_BIG_ENDIAN)
2243 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2244 else
2245 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2247 n->range = end - n_start->bytepos + 1;
2249 /* Check that the range of memory covered can be represented by
2250 a symbolic number. */
2251 if (n->range > 64 / BITS_PER_MARKER)
2252 return NULL;
2254 /* Reinterpret byte marks in symbolic number holding the value of
2255 bigger weight according to target endianness. */
2256 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2257 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2258 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2260 unsigned marker
2261 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2262 if (marker && marker != MARKER_BYTE_UNKNOWN)
2263 toinc_n_ptr->n += inc;
2266 else
2268 n->range = n1->range;
2269 n_start = n1;
2270 source_stmt = source_stmt1;
2273 if (!n1->alias_set
2274 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2275 n->alias_set = n1->alias_set;
2276 else
2277 n->alias_set = ptr_type_node;
2278 n->vuse = n_start->vuse;
2279 n->base_addr = n_start->base_addr;
2280 n->offset = n_start->offset;
2281 n->src = n_start->src;
2282 n->bytepos = n_start->bytepos;
2283 n->type = n_start->type;
2284 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2286 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2288 uint64_t masked1, masked2;
2290 masked1 = n1->n & mask;
2291 masked2 = n2->n & mask;
2292 if (masked1 && masked2 && masked1 != masked2)
2293 return NULL;
2295 n->n = n1->n | n2->n;
2297 return source_stmt;
2300 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2301 the operation given by the rhs of STMT on the result. If the operation
2302 could successfully be executed the function returns a gimple stmt whose
2303 rhs's first tree is the expression of the source operand and NULL
2304 otherwise. */
2306 static gimple *
2307 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2309 enum tree_code code;
2310 tree rhs1, rhs2 = NULL;
2311 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2312 enum gimple_rhs_class rhs_class;
2314 if (!limit || !is_gimple_assign (stmt))
2315 return NULL;
2317 rhs1 = gimple_assign_rhs1 (stmt);
2319 if (find_bswap_or_nop_load (stmt, rhs1, n))
2320 return stmt;
2322 /* Handle BIT_FIELD_REF. */
2323 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2324 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2326 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
2327 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
2328 if (bitpos % BITS_PER_UNIT == 0
2329 && bitsize % BITS_PER_UNIT == 0
2330 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
2332 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
2333 if (BYTES_BIG_ENDIAN)
2334 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
2336 /* Shift. */
2337 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
2338 return NULL;
2340 /* Mask. */
2341 uint64_t mask = 0;
2342 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2343 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
2344 i++, tmp <<= BITS_PER_UNIT)
2345 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2346 n->n &= mask;
2348 /* Convert. */
2349 n->type = TREE_TYPE (rhs1);
2350 if (!n->base_addr)
2351 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2353 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
2356 return NULL;
2359 if (TREE_CODE (rhs1) != SSA_NAME)
2360 return NULL;
2362 code = gimple_assign_rhs_code (stmt);
2363 rhs_class = gimple_assign_rhs_class (stmt);
2364 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2366 if (rhs_class == GIMPLE_BINARY_RHS)
2367 rhs2 = gimple_assign_rhs2 (stmt);
2369 /* Handle unary rhs and binary rhs with integer constants as second
2370 operand. */
2372 if (rhs_class == GIMPLE_UNARY_RHS
2373 || (rhs_class == GIMPLE_BINARY_RHS
2374 && TREE_CODE (rhs2) == INTEGER_CST))
2376 if (code != BIT_AND_EXPR
2377 && code != LSHIFT_EXPR
2378 && code != RSHIFT_EXPR
2379 && code != LROTATE_EXPR
2380 && code != RROTATE_EXPR
2381 && !CONVERT_EXPR_CODE_P (code))
2382 return NULL;
2384 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2386 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2387 we have to initialize the symbolic number. */
2388 if (!source_stmt1)
2390 if (gimple_assign_load_p (stmt)
2391 || !init_symbolic_number (n, rhs1))
2392 return NULL;
2393 source_stmt1 = stmt;
2396 switch (code)
2398 case BIT_AND_EXPR:
2400 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2401 uint64_t val = int_cst_value (rhs2), mask = 0;
2402 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2404 /* Only constants masking full bytes are allowed. */
2405 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2406 if ((val & tmp) != 0 && (val & tmp) != tmp)
2407 return NULL;
2408 else if (val & tmp)
2409 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2411 n->n &= mask;
2413 break;
2414 case LSHIFT_EXPR:
2415 case RSHIFT_EXPR:
2416 case LROTATE_EXPR:
2417 case RROTATE_EXPR:
2418 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2419 return NULL;
2420 break;
2421 CASE_CONVERT:
2423 int i, type_size, old_type_size;
2424 tree type;
2426 type = gimple_expr_type (stmt);
2427 type_size = TYPE_PRECISION (type);
2428 if (type_size % BITS_PER_UNIT != 0)
2429 return NULL;
2430 type_size /= BITS_PER_UNIT;
2431 if (type_size > 64 / BITS_PER_MARKER)
2432 return NULL;
2434 /* Sign extension: result is dependent on the value. */
2435 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2436 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2437 && HEAD_MARKER (n->n, old_type_size))
2438 for (i = 0; i < type_size - old_type_size; i++)
2439 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2440 << ((type_size - 1 - i) * BITS_PER_MARKER);
2442 if (type_size < 64 / BITS_PER_MARKER)
2444 /* If STMT casts to a smaller type mask out the bits not
2445 belonging to the target type. */
2446 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2448 n->type = type;
2449 if (!n->base_addr)
2450 n->range = type_size;
2452 break;
2453 default:
2454 return NULL;
2456 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2459 /* Handle binary rhs. */
2461 if (rhs_class == GIMPLE_BINARY_RHS)
2463 struct symbolic_number n1, n2;
2464 gimple *source_stmt, *source_stmt2;
2466 if (code != BIT_IOR_EXPR)
2467 return NULL;
2469 if (TREE_CODE (rhs2) != SSA_NAME)
2470 return NULL;
2472 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2474 switch (code)
2476 case BIT_IOR_EXPR:
2477 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2479 if (!source_stmt1)
2480 return NULL;
2482 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2484 if (!source_stmt2)
2485 return NULL;
2487 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2488 return NULL;
2490 if (!n1.vuse != !n2.vuse
2491 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2492 return NULL;
2494 source_stmt
2495 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2497 if (!source_stmt)
2498 return NULL;
2500 if (!verify_symbolic_number_p (n, stmt))
2501 return NULL;
2503 break;
2504 default:
2505 return NULL;
2507 return source_stmt;
2509 return NULL;
2512 /* Check if STMT completes a bswap implementation or a read in a given
2513 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2514 accordingly. It also sets N to represent the kind of operations
2515 performed: size of the resulting expression and whether it works on
2516 a memory source, and if so alias-set and vuse. At last, the
2517 function returns a stmt whose rhs's first tree is the source
2518 expression. */
2520 static gimple *
2521 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2523 unsigned rsize;
2524 uint64_t tmpn, mask;
2525 /* The number which the find_bswap_or_nop_1 result should match in order
2526 to have a full byte swap. The number is shifted to the right
2527 according to the size of the symbolic number before using it. */
2528 uint64_t cmpxchg = CMPXCHG;
2529 uint64_t cmpnop = CMPNOP;
2531 gimple *ins_stmt;
2532 int limit;
2534 /* The last parameter determines the depth search limit. It usually
2535 correlates directly to the number n of bytes to be touched. We
2536 increase that number by log2(n) + 1 here in order to also
2537 cover signed -> unsigned conversions of the src operand as can be seen
2538 in libgcc, and for initial shift/and operation of the src operand. */
2539 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2540 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2541 ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2543 if (!ins_stmt)
2544 return NULL;
2546 /* Find real size of result (highest non-zero byte). */
2547 if (n->base_addr)
2548 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2549 else
2550 rsize = n->range;
2552 /* Zero out the bits corresponding to untouched bytes in original gimple
2553 expression. */
2554 if (n->range < (int) sizeof (int64_t))
2556 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2557 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2558 cmpnop &= mask;
2561 /* Zero out the bits corresponding to unused bytes in the result of the
2562 gimple expression. */
2563 if (rsize < n->range)
2565 if (BYTES_BIG_ENDIAN)
2567 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2568 cmpxchg &= mask;
2569 cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
2571 else
2573 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2574 cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
2575 cmpnop &= mask;
2577 n->range = rsize;
2580 /* A complete byte swap should make the symbolic number to start with
2581 the largest digit in the highest order byte. Unchanged symbolic
2582 number indicates a read with same endianness as target architecture. */
2583 if (n->n == cmpnop)
2584 *bswap = false;
2585 else if (n->n == cmpxchg)
2586 *bswap = true;
2587 else
2588 return NULL;
2590 /* Useless bit manipulation performed by code. */
2591 if (!n->base_addr && n->n == cmpnop)
2592 return NULL;
2594 n->range *= BITS_PER_UNIT;
2595 return ins_stmt;
2598 namespace {
2600 const pass_data pass_data_optimize_bswap =
2602 GIMPLE_PASS, /* type */
2603 "bswap", /* name */
2604 OPTGROUP_NONE, /* optinfo_flags */
2605 TV_NONE, /* tv_id */
2606 PROP_ssa, /* properties_required */
2607 0, /* properties_provided */
2608 0, /* properties_destroyed */
2609 0, /* todo_flags_start */
2610 0, /* todo_flags_finish */
2613 class pass_optimize_bswap : public gimple_opt_pass
2615 public:
2616 pass_optimize_bswap (gcc::context *ctxt)
2617 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2620 /* opt_pass methods: */
2621 virtual bool gate (function *)
2623 return flag_expensive_optimizations && optimize;
2626 virtual unsigned int execute (function *);
2628 }; // class pass_optimize_bswap
2630 /* Perform the bswap optimization: replace the expression computed in the rhs
2631 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2632 Which of these alternatives replace the rhs is given by N->base_addr (non
2633 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2634 load to perform are also given in N while the builtin bswap invoke is given
2635 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2636 load statements involved to construct the rhs in CUR_STMT and N->range gives
2637 the size of the rhs expression for maintaining some statistics.
2639 Note that if the replacement involve a load, CUR_STMT is moved just after
2640 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2641 changing of basic block. */
2643 static bool
2644 bswap_replace (gimple *cur_stmt, gimple *ins_stmt, tree fndecl,
2645 tree bswap_type, tree load_type, struct symbolic_number *n,
2646 bool bswap)
2648 gimple_stmt_iterator gsi;
2649 tree src, tmp, tgt;
2650 gimple *bswap_stmt;
2652 gsi = gsi_for_stmt (cur_stmt);
2653 src = n->src;
2654 tgt = gimple_assign_lhs (cur_stmt);
2656 /* Need to load the value from memory first. */
2657 if (n->base_addr)
2659 gimple_stmt_iterator gsi_ins = gsi_for_stmt (ins_stmt);
2660 tree addr_expr, addr_tmp, val_expr, val_tmp;
2661 tree load_offset_ptr, aligned_load_type;
2662 gimple *addr_stmt, *load_stmt;
2663 unsigned align;
2664 HOST_WIDE_INT load_offset = 0;
2665 basic_block ins_bb, cur_bb;
2667 ins_bb = gimple_bb (ins_stmt);
2668 cur_bb = gimple_bb (cur_stmt);
2669 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
2670 return false;
2672 align = get_object_alignment (src);
2674 /* Move cur_stmt just before one of the load of the original
2675 to ensure it has the same VUSE. See PR61517 for what could
2676 go wrong. */
2677 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
2678 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2679 gsi_move_before (&gsi, &gsi_ins);
2680 gsi = gsi_for_stmt (cur_stmt);
2682 /* Compute address to load from and cast according to the size
2683 of the load. */
2684 addr_expr = build_fold_addr_expr (unshare_expr (src));
2685 if (is_gimple_mem_ref_addr (addr_expr))
2686 addr_tmp = addr_expr;
2687 else
2689 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2690 "load_src");
2691 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2692 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2695 /* Perform the load. */
2696 aligned_load_type = load_type;
2697 if (align < TYPE_ALIGN (load_type))
2698 aligned_load_type = build_aligned_type (load_type, align);
2699 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2700 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2701 load_offset_ptr);
2703 if (!bswap)
2705 if (n->range == 16)
2706 nop_stats.found_16bit++;
2707 else if (n->range == 32)
2708 nop_stats.found_32bit++;
2709 else
2711 gcc_assert (n->range == 64);
2712 nop_stats.found_64bit++;
2715 /* Convert the result of load if necessary. */
2716 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2718 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2719 "load_dst");
2720 load_stmt = gimple_build_assign (val_tmp, val_expr);
2721 gimple_set_vuse (load_stmt, n->vuse);
2722 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2723 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2725 else
2727 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2728 gimple_set_vuse (cur_stmt, n->vuse);
2730 update_stmt (cur_stmt);
2732 if (dump_file)
2734 fprintf (dump_file,
2735 "%d bit load in target endianness found at: ",
2736 (int) n->range);
2737 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2739 return true;
2741 else
2743 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2744 load_stmt = gimple_build_assign (val_tmp, val_expr);
2745 gimple_set_vuse (load_stmt, n->vuse);
2746 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2748 src = val_tmp;
2750 else if (TREE_CODE (src) == BIT_FIELD_REF)
2751 src = TREE_OPERAND (src, 0);
2753 if (n->range == 16)
2754 bswap_stats.found_16bit++;
2755 else if (n->range == 32)
2756 bswap_stats.found_32bit++;
2757 else
2759 gcc_assert (n->range == 64);
2760 bswap_stats.found_64bit++;
2763 tmp = src;
2765 /* Convert the src expression if necessary. */
2766 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2768 gimple *convert_stmt;
2770 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2771 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2772 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2775 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2776 are considered as rotation of 2N bit values by N bits is generally not
2777 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2778 gives 0x03040102 while a bswap for that value is 0x04030201. */
2779 if (bswap && n->range == 16)
2781 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2782 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2783 bswap_stmt = gimple_build_assign (NULL, src);
2785 else
2786 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2788 tmp = tgt;
2790 /* Convert the result if necessary. */
2791 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2793 gimple *convert_stmt;
2795 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2796 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2797 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2800 gimple_set_lhs (bswap_stmt, tmp);
2802 if (dump_file)
2804 fprintf (dump_file, "%d bit bswap implementation found at: ",
2805 (int) n->range);
2806 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2809 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2810 gsi_remove (&gsi, true);
2811 return true;
2814 /* Find manual byte swap implementations as well as load in a given
2815 endianness. Byte swaps are turned into a bswap builtin invokation
2816 while endian loads are converted to bswap builtin invokation or
2817 simple load according to the target endianness. */
2819 unsigned int
2820 pass_optimize_bswap::execute (function *fun)
2822 basic_block bb;
2823 bool bswap32_p, bswap64_p;
2824 bool changed = false;
2825 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2827 if (BITS_PER_UNIT != 8)
2828 return 0;
2830 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2831 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2832 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2833 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2834 || (bswap32_p && word_mode == SImode)));
2836 /* Determine the argument type of the builtins. The code later on
2837 assumes that the return and argument type are the same. */
2838 if (bswap32_p)
2840 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2841 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2844 if (bswap64_p)
2846 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2847 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2850 memset (&nop_stats, 0, sizeof (nop_stats));
2851 memset (&bswap_stats, 0, sizeof (bswap_stats));
2852 calculate_dominance_info (CDI_DOMINATORS);
2854 FOR_EACH_BB_FN (bb, fun)
2856 gimple_stmt_iterator gsi;
2858 /* We do a reverse scan for bswap patterns to make sure we get the
2859 widest match. As bswap pattern matching doesn't handle previously
2860 inserted smaller bswap replacements as sub-patterns, the wider
2861 variant wouldn't be detected. */
2862 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2864 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
2865 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2866 enum tree_code code;
2867 struct symbolic_number n;
2868 bool bswap;
2870 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2871 might be moved to a different basic block by bswap_replace and gsi
2872 must not points to it if that's the case. Moving the gsi_prev
2873 there make sure that gsi points to the statement previous to
2874 cur_stmt while still making sure that all statements are
2875 considered in this basic block. */
2876 gsi_prev (&gsi);
2878 if (!is_gimple_assign (cur_stmt))
2879 continue;
2881 code = gimple_assign_rhs_code (cur_stmt);
2882 switch (code)
2884 case LROTATE_EXPR:
2885 case RROTATE_EXPR:
2886 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2887 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2888 % BITS_PER_UNIT)
2889 continue;
2890 /* Fall through. */
2891 case BIT_IOR_EXPR:
2892 break;
2893 default:
2894 continue;
2897 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2899 if (!ins_stmt)
2900 continue;
2902 switch (n.range)
2904 case 16:
2905 /* Already in canonical form, nothing to do. */
2906 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2907 continue;
2908 load_type = bswap_type = uint16_type_node;
2909 break;
2910 case 32:
2911 load_type = uint32_type_node;
2912 if (bswap32_p)
2914 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2915 bswap_type = bswap32_type;
2917 break;
2918 case 64:
2919 load_type = uint64_type_node;
2920 if (bswap64_p)
2922 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2923 bswap_type = bswap64_type;
2925 break;
2926 default:
2927 continue;
2930 if (bswap && !fndecl && n.range != 16)
2931 continue;
2933 if (bswap_replace (cur_stmt, ins_stmt, fndecl, bswap_type, load_type,
2934 &n, bswap))
2935 changed = true;
2939 statistics_counter_event (fun, "16-bit nop implementations found",
2940 nop_stats.found_16bit);
2941 statistics_counter_event (fun, "32-bit nop implementations found",
2942 nop_stats.found_32bit);
2943 statistics_counter_event (fun, "64-bit nop implementations found",
2944 nop_stats.found_64bit);
2945 statistics_counter_event (fun, "16-bit bswap implementations found",
2946 bswap_stats.found_16bit);
2947 statistics_counter_event (fun, "32-bit bswap implementations found",
2948 bswap_stats.found_32bit);
2949 statistics_counter_event (fun, "64-bit bswap implementations found",
2950 bswap_stats.found_64bit);
2952 return (changed ? TODO_update_ssa : 0);
2955 } // anon namespace
2957 gimple_opt_pass *
2958 make_pass_optimize_bswap (gcc::context *ctxt)
2960 return new pass_optimize_bswap (ctxt);
2963 /* Return true if stmt is a type conversion operation that can be stripped
2964 when used in a widening multiply operation. */
2965 static bool
2966 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2968 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2970 if (TREE_CODE (result_type) == INTEGER_TYPE)
2972 tree op_type;
2973 tree inner_op_type;
2975 if (!CONVERT_EXPR_CODE_P (rhs_code))
2976 return false;
2978 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2980 /* If the type of OP has the same precision as the result, then
2981 we can strip this conversion. The multiply operation will be
2982 selected to create the correct extension as a by-product. */
2983 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2984 return true;
2986 /* We can also strip a conversion if it preserves the signed-ness of
2987 the operation and doesn't narrow the range. */
2988 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2990 /* If the inner-most type is unsigned, then we can strip any
2991 intermediate widening operation. If it's signed, then the
2992 intermediate widening operation must also be signed. */
2993 if ((TYPE_UNSIGNED (inner_op_type)
2994 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2995 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2996 return true;
2998 return false;
3001 return rhs_code == FIXED_CONVERT_EXPR;
3004 /* Return true if RHS is a suitable operand for a widening multiplication,
3005 assuming a target type of TYPE.
3006 There are two cases:
3008 - RHS makes some value at least twice as wide. Store that value
3009 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
3011 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
3012 but leave *TYPE_OUT untouched. */
3014 static bool
3015 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
3016 tree *new_rhs_out)
3018 gimple *stmt;
3019 tree type1, rhs1;
3021 if (TREE_CODE (rhs) == SSA_NAME)
3023 stmt = SSA_NAME_DEF_STMT (rhs);
3024 if (is_gimple_assign (stmt))
3026 if (! widening_mult_conversion_strippable_p (type, stmt))
3027 rhs1 = rhs;
3028 else
3030 rhs1 = gimple_assign_rhs1 (stmt);
3032 if (TREE_CODE (rhs1) == INTEGER_CST)
3034 *new_rhs_out = rhs1;
3035 *type_out = NULL;
3036 return true;
3040 else
3041 rhs1 = rhs;
3043 type1 = TREE_TYPE (rhs1);
3045 if (TREE_CODE (type1) != TREE_CODE (type)
3046 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3047 return false;
3049 *new_rhs_out = rhs1;
3050 *type_out = type1;
3051 return true;
3054 if (TREE_CODE (rhs) == INTEGER_CST)
3056 *new_rhs_out = rhs;
3057 *type_out = NULL;
3058 return true;
3061 return false;
3064 /* Return true if STMT performs a widening multiplication, assuming the
3065 output type is TYPE. If so, store the unwidened types of the operands
3066 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3067 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3068 and *TYPE2_OUT would give the operands of the multiplication. */
3070 static bool
3071 is_widening_mult_p (gimple *stmt,
3072 tree *type1_out, tree *rhs1_out,
3073 tree *type2_out, tree *rhs2_out)
3075 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3077 if (TREE_CODE (type) != INTEGER_TYPE
3078 && TREE_CODE (type) != FIXED_POINT_TYPE)
3079 return false;
3081 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3082 rhs1_out))
3083 return false;
3085 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3086 rhs2_out))
3087 return false;
3089 if (*type1_out == NULL)
3091 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3092 return false;
3093 *type1_out = *type2_out;
3096 if (*type2_out == NULL)
3098 if (!int_fits_type_p (*rhs2_out, *type1_out))
3099 return false;
3100 *type2_out = *type1_out;
3103 /* Ensure that the larger of the two operands comes first. */
3104 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3106 std::swap (*type1_out, *type2_out);
3107 std::swap (*rhs1_out, *rhs2_out);
3110 return true;
3113 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3114 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3115 value is true iff we converted the statement. */
3117 static bool
3118 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3120 tree lhs, rhs1, rhs2, type, type1, type2;
3121 enum insn_code handler;
3122 machine_mode to_mode, from_mode, actual_mode;
3123 optab op;
3124 int actual_precision;
3125 location_t loc = gimple_location (stmt);
3126 bool from_unsigned1, from_unsigned2;
3128 lhs = gimple_assign_lhs (stmt);
3129 type = TREE_TYPE (lhs);
3130 if (TREE_CODE (type) != INTEGER_TYPE)
3131 return false;
3133 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3134 return false;
3136 to_mode = TYPE_MODE (type);
3137 from_mode = TYPE_MODE (type1);
3138 from_unsigned1 = TYPE_UNSIGNED (type1);
3139 from_unsigned2 = TYPE_UNSIGNED (type2);
3141 if (from_unsigned1 && from_unsigned2)
3142 op = umul_widen_optab;
3143 else if (!from_unsigned1 && !from_unsigned2)
3144 op = smul_widen_optab;
3145 else
3146 op = usmul_widen_optab;
3148 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3149 0, &actual_mode);
3151 if (handler == CODE_FOR_nothing)
3153 if (op != smul_widen_optab)
3155 /* We can use a signed multiply with unsigned types as long as
3156 there is a wider mode to use, or it is the smaller of the two
3157 types that is unsigned. Note that type1 >= type2, always. */
3158 if ((TYPE_UNSIGNED (type1)
3159 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3160 || (TYPE_UNSIGNED (type2)
3161 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3163 from_mode = GET_MODE_WIDER_MODE (from_mode);
3164 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3165 return false;
3168 op = smul_widen_optab;
3169 handler = find_widening_optab_handler_and_mode (op, to_mode,
3170 from_mode, 0,
3171 &actual_mode);
3173 if (handler == CODE_FOR_nothing)
3174 return false;
3176 from_unsigned1 = from_unsigned2 = false;
3178 else
3179 return false;
3182 /* Ensure that the inputs to the handler are in the correct precison
3183 for the opcode. This will be the full mode size. */
3184 actual_precision = GET_MODE_PRECISION (actual_mode);
3185 if (2 * actual_precision > TYPE_PRECISION (type))
3186 return false;
3187 if (actual_precision != TYPE_PRECISION (type1)
3188 || from_unsigned1 != TYPE_UNSIGNED (type1))
3189 rhs1 = build_and_insert_cast (gsi, loc,
3190 build_nonstandard_integer_type
3191 (actual_precision, from_unsigned1), rhs1);
3192 if (actual_precision != TYPE_PRECISION (type2)
3193 || from_unsigned2 != TYPE_UNSIGNED (type2))
3194 rhs2 = build_and_insert_cast (gsi, loc,
3195 build_nonstandard_integer_type
3196 (actual_precision, from_unsigned2), rhs2);
3198 /* Handle constants. */
3199 if (TREE_CODE (rhs1) == INTEGER_CST)
3200 rhs1 = fold_convert (type1, rhs1);
3201 if (TREE_CODE (rhs2) == INTEGER_CST)
3202 rhs2 = fold_convert (type2, rhs2);
3204 gimple_assign_set_rhs1 (stmt, rhs1);
3205 gimple_assign_set_rhs2 (stmt, rhs2);
3206 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3207 update_stmt (stmt);
3208 widen_mul_stats.widen_mults_inserted++;
3209 return true;
3212 /* Process a single gimple statement STMT, which is found at the
3213 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3214 rhs (given by CODE), and try to convert it into a
3215 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3216 is true iff we converted the statement. */
3218 static bool
3219 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3220 enum tree_code code)
3222 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3223 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3224 tree type, type1, type2, optype;
3225 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3226 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3227 optab this_optab;
3228 enum tree_code wmult_code;
3229 enum insn_code handler;
3230 machine_mode to_mode, from_mode, actual_mode;
3231 location_t loc = gimple_location (stmt);
3232 int actual_precision;
3233 bool from_unsigned1, from_unsigned2;
3235 lhs = gimple_assign_lhs (stmt);
3236 type = TREE_TYPE (lhs);
3237 if (TREE_CODE (type) != INTEGER_TYPE
3238 && TREE_CODE (type) != FIXED_POINT_TYPE)
3239 return false;
3241 if (code == MINUS_EXPR)
3242 wmult_code = WIDEN_MULT_MINUS_EXPR;
3243 else
3244 wmult_code = WIDEN_MULT_PLUS_EXPR;
3246 rhs1 = gimple_assign_rhs1 (stmt);
3247 rhs2 = gimple_assign_rhs2 (stmt);
3249 if (TREE_CODE (rhs1) == SSA_NAME)
3251 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3252 if (is_gimple_assign (rhs1_stmt))
3253 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3256 if (TREE_CODE (rhs2) == SSA_NAME)
3258 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3259 if (is_gimple_assign (rhs2_stmt))
3260 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3263 /* Allow for one conversion statement between the multiply
3264 and addition/subtraction statement. If there are more than
3265 one conversions then we assume they would invalidate this
3266 transformation. If that's not the case then they should have
3267 been folded before now. */
3268 if (CONVERT_EXPR_CODE_P (rhs1_code))
3270 conv1_stmt = rhs1_stmt;
3271 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3272 if (TREE_CODE (rhs1) == SSA_NAME)
3274 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3275 if (is_gimple_assign (rhs1_stmt))
3276 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3278 else
3279 return false;
3281 if (CONVERT_EXPR_CODE_P (rhs2_code))
3283 conv2_stmt = rhs2_stmt;
3284 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3285 if (TREE_CODE (rhs2) == SSA_NAME)
3287 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3288 if (is_gimple_assign (rhs2_stmt))
3289 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3291 else
3292 return false;
3295 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3296 is_widening_mult_p, but we still need the rhs returns.
3298 It might also appear that it would be sufficient to use the existing
3299 operands of the widening multiply, but that would limit the choice of
3300 multiply-and-accumulate instructions.
3302 If the widened-multiplication result has more than one uses, it is
3303 probably wiser not to do the conversion. */
3304 if (code == PLUS_EXPR
3305 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3307 if (!has_single_use (rhs1)
3308 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3309 &type2, &mult_rhs2))
3310 return false;
3311 add_rhs = rhs2;
3312 conv_stmt = conv1_stmt;
3314 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3316 if (!has_single_use (rhs2)
3317 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3318 &type2, &mult_rhs2))
3319 return false;
3320 add_rhs = rhs1;
3321 conv_stmt = conv2_stmt;
3323 else
3324 return false;
3326 to_mode = TYPE_MODE (type);
3327 from_mode = TYPE_MODE (type1);
3328 from_unsigned1 = TYPE_UNSIGNED (type1);
3329 from_unsigned2 = TYPE_UNSIGNED (type2);
3330 optype = type1;
3332 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3333 if (from_unsigned1 != from_unsigned2)
3335 if (!INTEGRAL_TYPE_P (type))
3336 return false;
3337 /* We can use a signed multiply with unsigned types as long as
3338 there is a wider mode to use, or it is the smaller of the two
3339 types that is unsigned. Note that type1 >= type2, always. */
3340 if ((from_unsigned1
3341 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3342 || (from_unsigned2
3343 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3345 from_mode = GET_MODE_WIDER_MODE (from_mode);
3346 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3347 return false;
3350 from_unsigned1 = from_unsigned2 = false;
3351 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3352 false);
3355 /* If there was a conversion between the multiply and addition
3356 then we need to make sure it fits a multiply-and-accumulate.
3357 The should be a single mode change which does not change the
3358 value. */
3359 if (conv_stmt)
3361 /* We use the original, unmodified data types for this. */
3362 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3363 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3364 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3365 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3367 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3369 /* Conversion is a truncate. */
3370 if (TYPE_PRECISION (to_type) < data_size)
3371 return false;
3373 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3375 /* Conversion is an extend. Check it's the right sort. */
3376 if (TYPE_UNSIGNED (from_type) != is_unsigned
3377 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3378 return false;
3380 /* else convert is a no-op for our purposes. */
3383 /* Verify that the machine can perform a widening multiply
3384 accumulate in this mode/signedness combination, otherwise
3385 this transformation is likely to pessimize code. */
3386 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3387 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3388 from_mode, 0, &actual_mode);
3390 if (handler == CODE_FOR_nothing)
3391 return false;
3393 /* Ensure that the inputs to the handler are in the correct precison
3394 for the opcode. This will be the full mode size. */
3395 actual_precision = GET_MODE_PRECISION (actual_mode);
3396 if (actual_precision != TYPE_PRECISION (type1)
3397 || from_unsigned1 != TYPE_UNSIGNED (type1))
3398 mult_rhs1 = build_and_insert_cast (gsi, loc,
3399 build_nonstandard_integer_type
3400 (actual_precision, from_unsigned1),
3401 mult_rhs1);
3402 if (actual_precision != TYPE_PRECISION (type2)
3403 || from_unsigned2 != TYPE_UNSIGNED (type2))
3404 mult_rhs2 = build_and_insert_cast (gsi, loc,
3405 build_nonstandard_integer_type
3406 (actual_precision, from_unsigned2),
3407 mult_rhs2);
3409 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3410 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3412 /* Handle constants. */
3413 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3414 mult_rhs1 = fold_convert (type1, mult_rhs1);
3415 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3416 mult_rhs2 = fold_convert (type2, mult_rhs2);
3418 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3419 add_rhs);
3420 update_stmt (gsi_stmt (*gsi));
3421 widen_mul_stats.maccs_inserted++;
3422 return true;
3425 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3426 with uses in additions and subtractions to form fused multiply-add
3427 operations. Returns true if successful and MUL_STMT should be removed. */
3429 static bool
3430 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3432 tree mul_result = gimple_get_lhs (mul_stmt);
3433 tree type = TREE_TYPE (mul_result);
3434 gimple *use_stmt, *neguse_stmt;
3435 gassign *fma_stmt;
3436 use_operand_p use_p;
3437 imm_use_iterator imm_iter;
3439 if (FLOAT_TYPE_P (type)
3440 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3441 return false;
3443 /* We don't want to do bitfield reduction ops. */
3444 if (INTEGRAL_TYPE_P (type)
3445 && (TYPE_PRECISION (type)
3446 != GET_MODE_PRECISION (TYPE_MODE (type))))
3447 return false;
3449 /* If the target doesn't support it, don't generate it. We assume that
3450 if fma isn't available then fms, fnma or fnms are not either. */
3451 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3452 return false;
3454 /* If the multiplication has zero uses, it is kept around probably because
3455 of -fnon-call-exceptions. Don't optimize it away in that case,
3456 it is DCE job. */
3457 if (has_zero_uses (mul_result))
3458 return false;
3460 /* Make sure that the multiplication statement becomes dead after
3461 the transformation, thus that all uses are transformed to FMAs.
3462 This means we assume that an FMA operation has the same cost
3463 as an addition. */
3464 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3466 enum tree_code use_code;
3467 tree result = mul_result;
3468 bool negate_p = false;
3470 use_stmt = USE_STMT (use_p);
3472 if (is_gimple_debug (use_stmt))
3473 continue;
3475 /* For now restrict this operations to single basic blocks. In theory
3476 we would want to support sinking the multiplication in
3477 m = a*b;
3478 if ()
3479 ma = m + c;
3480 else
3481 d = m;
3482 to form a fma in the then block and sink the multiplication to the
3483 else block. */
3484 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3485 return false;
3487 if (!is_gimple_assign (use_stmt))
3488 return false;
3490 use_code = gimple_assign_rhs_code (use_stmt);
3492 /* A negate on the multiplication leads to FNMA. */
3493 if (use_code == NEGATE_EXPR)
3495 ssa_op_iter iter;
3496 use_operand_p usep;
3498 result = gimple_assign_lhs (use_stmt);
3500 /* Make sure the negate statement becomes dead with this
3501 single transformation. */
3502 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3503 &use_p, &neguse_stmt))
3504 return false;
3506 /* Make sure the multiplication isn't also used on that stmt. */
3507 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3508 if (USE_FROM_PTR (usep) == mul_result)
3509 return false;
3511 /* Re-validate. */
3512 use_stmt = neguse_stmt;
3513 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3514 return false;
3515 if (!is_gimple_assign (use_stmt))
3516 return false;
3518 use_code = gimple_assign_rhs_code (use_stmt);
3519 negate_p = true;
3522 switch (use_code)
3524 case MINUS_EXPR:
3525 if (gimple_assign_rhs2 (use_stmt) == result)
3526 negate_p = !negate_p;
3527 break;
3528 case PLUS_EXPR:
3529 break;
3530 default:
3531 /* FMA can only be formed from PLUS and MINUS. */
3532 return false;
3535 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3536 by a MULT_EXPR that we'll visit later, we might be able to
3537 get a more profitable match with fnma.
3538 OTOH, if we don't, a negate / fma pair has likely lower latency
3539 that a mult / subtract pair. */
3540 if (use_code == MINUS_EXPR && !negate_p
3541 && gimple_assign_rhs1 (use_stmt) == result
3542 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3543 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3545 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3547 if (TREE_CODE (rhs2) == SSA_NAME)
3549 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3550 if (has_single_use (rhs2)
3551 && is_gimple_assign (stmt2)
3552 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3553 return false;
3557 /* We can't handle a * b + a * b. */
3558 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3559 return false;
3561 /* While it is possible to validate whether or not the exact form
3562 that we've recognized is available in the backend, the assumption
3563 is that the transformation is never a loss. For instance, suppose
3564 the target only has the plain FMA pattern available. Consider
3565 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3566 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3567 still have 3 operations, but in the FMA form the two NEGs are
3568 independent and could be run in parallel. */
3571 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3573 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3574 enum tree_code use_code;
3575 tree addop, mulop1 = op1, result = mul_result;
3576 bool negate_p = false;
3578 if (is_gimple_debug (use_stmt))
3579 continue;
3581 use_code = gimple_assign_rhs_code (use_stmt);
3582 if (use_code == NEGATE_EXPR)
3584 result = gimple_assign_lhs (use_stmt);
3585 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3586 gsi_remove (&gsi, true);
3587 release_defs (use_stmt);
3589 use_stmt = neguse_stmt;
3590 gsi = gsi_for_stmt (use_stmt);
3591 use_code = gimple_assign_rhs_code (use_stmt);
3592 negate_p = true;
3595 if (gimple_assign_rhs1 (use_stmt) == result)
3597 addop = gimple_assign_rhs2 (use_stmt);
3598 /* a * b - c -> a * b + (-c) */
3599 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3600 addop = force_gimple_operand_gsi (&gsi,
3601 build1 (NEGATE_EXPR,
3602 type, addop),
3603 true, NULL_TREE, true,
3604 GSI_SAME_STMT);
3606 else
3608 addop = gimple_assign_rhs1 (use_stmt);
3609 /* a - b * c -> (-b) * c + a */
3610 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3611 negate_p = !negate_p;
3614 if (negate_p)
3615 mulop1 = force_gimple_operand_gsi (&gsi,
3616 build1 (NEGATE_EXPR,
3617 type, mulop1),
3618 true, NULL_TREE, true,
3619 GSI_SAME_STMT);
3621 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3622 FMA_EXPR, mulop1, op2, addop);
3623 gsi_replace (&gsi, fma_stmt, true);
3624 widen_mul_stats.fmas_inserted++;
3627 return true;
3631 /* Helper function of match_uaddsub_overflow. Return 1
3632 if USE_STMT is unsigned overflow check ovf != 0 for
3633 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3634 and 0 otherwise. */
3636 static int
3637 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3639 enum tree_code ccode = ERROR_MARK;
3640 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3641 if (gimple_code (use_stmt) == GIMPLE_COND)
3643 ccode = gimple_cond_code (use_stmt);
3644 crhs1 = gimple_cond_lhs (use_stmt);
3645 crhs2 = gimple_cond_rhs (use_stmt);
3647 else if (is_gimple_assign (use_stmt))
3649 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3651 ccode = gimple_assign_rhs_code (use_stmt);
3652 crhs1 = gimple_assign_rhs1 (use_stmt);
3653 crhs2 = gimple_assign_rhs2 (use_stmt);
3655 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3657 tree cond = gimple_assign_rhs1 (use_stmt);
3658 if (COMPARISON_CLASS_P (cond))
3660 ccode = TREE_CODE (cond);
3661 crhs1 = TREE_OPERAND (cond, 0);
3662 crhs2 = TREE_OPERAND (cond, 1);
3664 else
3665 return 0;
3667 else
3668 return 0;
3670 else
3671 return 0;
3673 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3674 return 0;
3676 enum tree_code code = gimple_assign_rhs_code (stmt);
3677 tree lhs = gimple_assign_lhs (stmt);
3678 tree rhs1 = gimple_assign_rhs1 (stmt);
3679 tree rhs2 = gimple_assign_rhs2 (stmt);
3681 switch (ccode)
3683 case GT_EXPR:
3684 case LE_EXPR:
3685 /* r = a - b; r > a or r <= a
3686 r = a + b; a > r or a <= r or b > r or b <= r. */
3687 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3688 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3689 && crhs2 == lhs))
3690 return ccode == GT_EXPR ? 1 : -1;
3691 break;
3692 case LT_EXPR:
3693 case GE_EXPR:
3694 /* r = a - b; a < r or a >= r
3695 r = a + b; r < a or r >= a or r < b or r >= b. */
3696 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3697 || (code == PLUS_EXPR && crhs1 == lhs
3698 && (crhs2 == rhs1 || crhs2 == rhs2)))
3699 return ccode == LT_EXPR ? 1 : -1;
3700 break;
3701 default:
3702 break;
3704 return 0;
3707 /* Recognize for unsigned x
3708 x = y - z;
3709 if (x > y)
3710 where there are other uses of x and replace it with
3711 _7 = SUB_OVERFLOW (y, z);
3712 x = REALPART_EXPR <_7>;
3713 _8 = IMAGPART_EXPR <_7>;
3714 if (_8)
3715 and similarly for addition. */
3717 static bool
3718 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3719 enum tree_code code)
3721 tree lhs = gimple_assign_lhs (stmt);
3722 tree type = TREE_TYPE (lhs);
3723 use_operand_p use_p;
3724 imm_use_iterator iter;
3725 bool use_seen = false;
3726 bool ovf_use_seen = false;
3727 gimple *use_stmt;
3729 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3730 if (!INTEGRAL_TYPE_P (type)
3731 || !TYPE_UNSIGNED (type)
3732 || has_zero_uses (lhs)
3733 || has_single_use (lhs)
3734 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3735 TYPE_MODE (type)) == CODE_FOR_nothing)
3736 return false;
3738 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3740 use_stmt = USE_STMT (use_p);
3741 if (is_gimple_debug (use_stmt))
3742 continue;
3744 if (uaddsub_overflow_check_p (stmt, use_stmt))
3745 ovf_use_seen = true;
3746 else
3747 use_seen = true;
3748 if (ovf_use_seen && use_seen)
3749 break;
3752 if (!ovf_use_seen || !use_seen)
3753 return false;
3755 tree ctype = build_complex_type (type);
3756 tree rhs1 = gimple_assign_rhs1 (stmt);
3757 tree rhs2 = gimple_assign_rhs2 (stmt);
3758 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3759 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3760 2, rhs1, rhs2);
3761 tree ctmp = make_ssa_name (ctype);
3762 gimple_call_set_lhs (g, ctmp);
3763 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3764 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3765 build1 (REALPART_EXPR, type, ctmp));
3766 gsi_replace (gsi, g2, true);
3767 tree ovf = make_ssa_name (type);
3768 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3769 build1 (IMAGPART_EXPR, type, ctmp));
3770 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3772 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3774 if (is_gimple_debug (use_stmt))
3775 continue;
3777 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3778 if (ovf_use == 0)
3779 continue;
3780 if (gimple_code (use_stmt) == GIMPLE_COND)
3782 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3783 gimple_cond_set_lhs (cond_stmt, ovf);
3784 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3785 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3787 else
3789 gcc_checking_assert (is_gimple_assign (use_stmt));
3790 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3792 gimple_assign_set_rhs1 (use_stmt, ovf);
3793 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3794 gimple_assign_set_rhs_code (use_stmt,
3795 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3797 else
3799 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3800 == COND_EXPR);
3801 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3802 boolean_type_node, ovf,
3803 build_int_cst (type, 0));
3804 gimple_assign_set_rhs1 (use_stmt, cond);
3807 update_stmt (use_stmt);
3809 return true;
3812 /* Return true if target has support for divmod. */
3814 static bool
3815 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3817 /* If target supports hardware divmod insn, use it for divmod. */
3818 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3819 return true;
3821 /* Check if libfunc for divmod is available. */
3822 rtx libfunc = optab_libfunc (divmod_optab, mode);
3823 if (libfunc != NULL_RTX)
3825 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3826 we don't want to use the libfunc even if it exists for given mode. */
3827 for (machine_mode div_mode = mode;
3828 div_mode != VOIDmode;
3829 div_mode = GET_MODE_WIDER_MODE (div_mode))
3830 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3831 return false;
3833 return targetm.expand_divmod_libfunc != NULL;
3836 return false;
3839 /* Check if stmt is candidate for divmod transform. */
3841 static bool
3842 divmod_candidate_p (gassign *stmt)
3844 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3845 enum machine_mode mode = TYPE_MODE (type);
3846 optab divmod_optab, div_optab;
3848 if (TYPE_UNSIGNED (type))
3850 divmod_optab = udivmod_optab;
3851 div_optab = udiv_optab;
3853 else
3855 divmod_optab = sdivmod_optab;
3856 div_optab = sdiv_optab;
3859 tree op1 = gimple_assign_rhs1 (stmt);
3860 tree op2 = gimple_assign_rhs2 (stmt);
3862 /* Disable the transform if either is a constant, since division-by-constant
3863 may have specialized expansion. */
3864 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3865 return false;
3867 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3868 expand using the [su]divv optabs. */
3869 if (TYPE_OVERFLOW_TRAPS (type))
3870 return false;
3872 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3873 return false;
3875 return true;
3878 /* This function looks for:
3879 t1 = a TRUNC_DIV_EXPR b;
3880 t2 = a TRUNC_MOD_EXPR b;
3881 and transforms it to the following sequence:
3882 complex_tmp = DIVMOD (a, b);
3883 t1 = REALPART_EXPR(a);
3884 t2 = IMAGPART_EXPR(b);
3885 For conditions enabling the transform see divmod_candidate_p().
3887 The pass has three parts:
3888 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3889 other trunc_div_expr and trunc_mod_expr stmts.
3890 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3891 to stmts vector.
3892 3) Insert DIVMOD call just before top_stmt and update entries in
3893 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3894 IMAGPART_EXPR for mod). */
3896 static bool
3897 convert_to_divmod (gassign *stmt)
3899 if (stmt_can_throw_internal (stmt)
3900 || !divmod_candidate_p (stmt))
3901 return false;
3903 tree op1 = gimple_assign_rhs1 (stmt);
3904 tree op2 = gimple_assign_rhs2 (stmt);
3906 imm_use_iterator use_iter;
3907 gimple *use_stmt;
3908 auto_vec<gimple *> stmts;
3910 gimple *top_stmt = stmt;
3911 basic_block top_bb = gimple_bb (stmt);
3913 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3914 at-least stmt and possibly other trunc_div/trunc_mod stmts
3915 having same operands as stmt. */
3917 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3919 if (is_gimple_assign (use_stmt)
3920 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3921 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3922 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3923 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3925 if (stmt_can_throw_internal (use_stmt))
3926 continue;
3928 basic_block bb = gimple_bb (use_stmt);
3930 if (bb == top_bb)
3932 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3933 top_stmt = use_stmt;
3935 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3937 top_bb = bb;
3938 top_stmt = use_stmt;
3943 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3944 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3946 stmts.safe_push (top_stmt);
3947 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3949 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3950 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3951 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3952 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3954 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3956 if (is_gimple_assign (use_stmt)
3957 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3958 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3959 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3960 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3962 if (use_stmt == top_stmt
3963 || stmt_can_throw_internal (use_stmt)
3964 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3965 continue;
3967 stmts.safe_push (use_stmt);
3968 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3969 div_seen = true;
3973 if (!div_seen)
3974 return false;
3976 /* Part 3: Create libcall to internal fn DIVMOD:
3977 divmod_tmp = DIVMOD (op1, op2). */
3979 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3980 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3981 call_stmt, "divmod_tmp");
3982 gimple_call_set_lhs (call_stmt, res);
3984 /* Insert the call before top_stmt. */
3985 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3986 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3988 widen_mul_stats.divmod_calls_inserted++;
3990 /* Update all statements in stmts vector:
3991 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3992 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3994 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3996 tree new_rhs;
3998 switch (gimple_assign_rhs_code (use_stmt))
4000 case TRUNC_DIV_EXPR:
4001 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
4002 break;
4004 case TRUNC_MOD_EXPR:
4005 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
4006 break;
4008 default:
4009 gcc_unreachable ();
4012 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
4013 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
4014 update_stmt (use_stmt);
4017 return true;
4020 /* Find integer multiplications where the operands are extended from
4021 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4022 where appropriate. */
4024 namespace {
4026 const pass_data pass_data_optimize_widening_mul =
4028 GIMPLE_PASS, /* type */
4029 "widening_mul", /* name */
4030 OPTGROUP_NONE, /* optinfo_flags */
4031 TV_NONE, /* tv_id */
4032 PROP_ssa, /* properties_required */
4033 0, /* properties_provided */
4034 0, /* properties_destroyed */
4035 0, /* todo_flags_start */
4036 TODO_update_ssa, /* todo_flags_finish */
4039 class pass_optimize_widening_mul : public gimple_opt_pass
4041 public:
4042 pass_optimize_widening_mul (gcc::context *ctxt)
4043 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
4046 /* opt_pass methods: */
4047 virtual bool gate (function *)
4049 return flag_expensive_optimizations && optimize;
4052 virtual unsigned int execute (function *);
4054 }; // class pass_optimize_widening_mul
4056 unsigned int
4057 pass_optimize_widening_mul::execute (function *fun)
4059 basic_block bb;
4060 bool cfg_changed = false;
4062 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
4063 calculate_dominance_info (CDI_DOMINATORS);
4064 renumber_gimple_stmt_uids ();
4066 FOR_EACH_BB_FN (bb, fun)
4068 gimple_stmt_iterator gsi;
4070 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4072 gimple *stmt = gsi_stmt (gsi);
4073 enum tree_code code;
4075 if (is_gimple_assign (stmt))
4077 code = gimple_assign_rhs_code (stmt);
4078 switch (code)
4080 case MULT_EXPR:
4081 if (!convert_mult_to_widen (stmt, &gsi)
4082 && convert_mult_to_fma (stmt,
4083 gimple_assign_rhs1 (stmt),
4084 gimple_assign_rhs2 (stmt)))
4086 gsi_remove (&gsi, true);
4087 release_defs (stmt);
4088 continue;
4090 break;
4092 case PLUS_EXPR:
4093 case MINUS_EXPR:
4094 if (!convert_plusminus_to_widen (&gsi, stmt, code))
4095 match_uaddsub_overflow (&gsi, stmt, code);
4096 break;
4098 case TRUNC_MOD_EXPR:
4099 convert_to_divmod (as_a<gassign *> (stmt));
4100 break;
4102 default:;
4105 else if (is_gimple_call (stmt)
4106 && gimple_call_lhs (stmt))
4108 tree fndecl = gimple_call_fndecl (stmt);
4109 if (fndecl
4110 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
4112 switch (DECL_FUNCTION_CODE (fndecl))
4114 case BUILT_IN_POWF:
4115 case BUILT_IN_POW:
4116 case BUILT_IN_POWL:
4117 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
4118 && real_equal
4119 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
4120 &dconst2)
4121 && convert_mult_to_fma (stmt,
4122 gimple_call_arg (stmt, 0),
4123 gimple_call_arg (stmt, 0)))
4125 unlink_stmt_vdef (stmt);
4126 if (gsi_remove (&gsi, true)
4127 && gimple_purge_dead_eh_edges (bb))
4128 cfg_changed = true;
4129 release_defs (stmt);
4130 continue;
4132 break;
4134 default:;
4138 gsi_next (&gsi);
4142 statistics_counter_event (fun, "widening multiplications inserted",
4143 widen_mul_stats.widen_mults_inserted);
4144 statistics_counter_event (fun, "widening maccs inserted",
4145 widen_mul_stats.maccs_inserted);
4146 statistics_counter_event (fun, "fused multiply-adds inserted",
4147 widen_mul_stats.fmas_inserted);
4148 statistics_counter_event (fun, "divmod calls inserted",
4149 widen_mul_stats.divmod_calls_inserted);
4151 return cfg_changed ? TODO_cleanup_cfg : 0;
4154 } // anon namespace
4156 gimple_opt_pass *
4157 make_pass_optimize_widening_mul (gcc::context *ctxt)
4159 return new pass_optimize_widening_mul (ctxt);