gcc:
[official-gcc.git] / libquadmath / printf / mul.c
blobd31fa36fa98eb7477ef968d0e9654e43d0136957
1 /* mpn_mul -- Multiply two natural numbers.
3 Copyright (C) 1991, 1993, 1994, 1996 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15 License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 MA 02111-1307, USA. */
22 #include <config.h>
23 #include "gmp-impl.h"
25 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
26 and v (pointed to by VP, with VSIZE limbs), and store the result at
27 PRODP. USIZE + VSIZE limbs are always stored, but if the input
28 operands are normalized. Return the most significant limb of the
29 result.
31 NOTE: The space pointed to by PRODP is overwritten before finished
32 with U and V, so overlap is an error.
34 Argument constraints:
35 1. USIZE >= VSIZE.
36 2. PRODP != UP and PRODP != VP, i.e. the destination
37 must be distinct from the multiplier and the multiplicand. */
39 /* If KARATSUBA_THRESHOLD is not already defined, define it to a
40 value which is good on most machines. */
41 #ifndef KARATSUBA_THRESHOLD
42 #define KARATSUBA_THRESHOLD 32
43 #endif
45 mp_limb_t
46 #if __STDC__
47 mpn_mul (mp_ptr prodp,
48 mp_srcptr up, mp_size_t usize,
49 mp_srcptr vp, mp_size_t vsize)
50 #else
51 mpn_mul (prodp, up, usize, vp, vsize)
52 mp_ptr prodp;
53 mp_srcptr up;
54 mp_size_t usize;
55 mp_srcptr vp;
56 mp_size_t vsize;
57 #endif
59 mp_ptr prod_endp = prodp + usize + vsize - 1;
60 mp_limb_t cy;
61 mp_ptr tspace;
63 if (vsize < KARATSUBA_THRESHOLD)
65 /* Handle simple cases with traditional multiplication.
67 This is the most critical code of the entire function. All
68 multiplies rely on this, both small and huge. Small ones arrive
69 here immediately. Huge ones arrive here as this is the base case
70 for Karatsuba's recursive algorithm below. */
71 mp_size_t i;
72 mp_limb_t cy_limb;
73 mp_limb_t v_limb;
75 if (vsize == 0)
76 return 0;
78 /* Multiply by the first limb in V separately, as the result can be
79 stored (not added) to PROD. We also avoid a loop for zeroing. */
80 v_limb = vp[0];
81 if (v_limb <= 1)
83 if (v_limb == 1)
84 MPN_COPY (prodp, up, usize);
85 else
86 MPN_ZERO (prodp, usize);
87 cy_limb = 0;
89 else
90 cy_limb = mpn_mul_1 (prodp, up, usize, v_limb);
92 prodp[usize] = cy_limb;
93 prodp++;
95 /* For each iteration in the outer loop, multiply one limb from
96 U with one limb from V, and add it to PROD. */
97 for (i = 1; i < vsize; i++)
99 v_limb = vp[i];
100 if (v_limb <= 1)
102 cy_limb = 0;
103 if (v_limb == 1)
104 cy_limb = mpn_add_n (prodp, prodp, up, usize);
106 else
107 cy_limb = mpn_addmul_1 (prodp, up, usize, v_limb);
109 prodp[usize] = cy_limb;
110 prodp++;
112 return cy_limb;
115 tspace = (mp_ptr) alloca (2 * vsize * BYTES_PER_MP_LIMB);
116 MPN_MUL_N_RECURSE (prodp, up, vp, vsize, tspace);
118 prodp += vsize;
119 up += vsize;
120 usize -= vsize;
121 if (usize >= vsize)
123 mp_ptr tp = (mp_ptr) alloca (2 * vsize * BYTES_PER_MP_LIMB);
126 MPN_MUL_N_RECURSE (tp, up, vp, vsize, tspace);
127 cy = mpn_add_n (prodp, prodp, tp, vsize);
128 mpn_add_1 (prodp + vsize, tp + vsize, vsize, cy);
129 prodp += vsize;
130 up += vsize;
131 usize -= vsize;
133 while (usize >= vsize);
136 /* True: usize < vsize. */
138 /* Make life simple: Recurse. */
140 if (usize != 0)
142 mpn_mul (tspace, vp, vsize, up, usize);
143 cy = mpn_add_n (prodp, prodp, tspace, vsize);
144 mpn_add_1 (prodp + vsize, tspace + vsize, usize, cy);
147 return *prod_endp;