1 /* mpn_mul_n -- Multiply two natural numbers of length n.
3 Copyright (C) 1991, 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15 License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 MA 02111-1307, USA. */
25 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
26 both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
27 always stored. Return the most significant limb.
30 1. PRODP != UP and PRODP != VP, i.e. the destination
31 must be distinct from the multiplier and the multiplicand. */
33 /* If KARATSUBA_THRESHOLD is not already defined, define it to a
34 value which is good on most machines. */
35 #ifndef KARATSUBA_THRESHOLD
36 #define KARATSUBA_THRESHOLD 32
39 /* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */
40 #if KARATSUBA_THRESHOLD < 2
41 #undef KARATSUBA_THRESHOLD
42 #define KARATSUBA_THRESHOLD 2
45 /* Handle simple cases with traditional multiplication.
47 This is the most critical code of multiplication. All multiplies rely
48 on this, both small and huge. Small ones arrive here immediately. Huge
49 ones arrive here as this is the base case for Karatsuba's recursive
54 impn_mul_n_basecase (mp_ptr prodp
, mp_srcptr up
, mp_srcptr vp
, mp_size_t size
)
56 impn_mul_n_basecase (prodp
, up
, vp
, size
)
67 /* Multiply by the first limb in V separately, as the result can be
68 stored (not added) to PROD. We also avoid a loop for zeroing. */
73 MPN_COPY (prodp
, up
, size
);
75 MPN_ZERO (prodp
, size
);
79 cy_limb
= mpn_mul_1 (prodp
, up
, size
, v_limb
);
81 prodp
[size
] = cy_limb
;
84 /* For each iteration in the outer loop, multiply one limb from
85 U with one limb from V, and add it to PROD. */
86 for (i
= 1; i
< size
; i
++)
93 cy_limb
= mpn_add_n (prodp
, prodp
, up
, size
);
96 cy_limb
= mpn_addmul_1 (prodp
, up
, size
, v_limb
);
98 prodp
[size
] = cy_limb
;
105 impn_mul_n (mp_ptr prodp
,
106 mp_srcptr up
, mp_srcptr vp
, mp_size_t size
, mp_ptr tspace
)
108 impn_mul_n (prodp
, up
, vp
, size
, tspace
)
118 /* The size is odd, the code code below doesn't handle that.
119 Multiply the least significant (size - 1) limbs with a recursive
120 call, and handle the most significant limb of S1 and S2
122 /* A slightly faster way to do this would be to make the Karatsuba
123 code below behave as if the size were even, and let it check for
124 odd size in the end. I.e., in essence move this code to the end.
125 Doing so would save us a recursive call, and potentially make the
126 stack grow a lot less. */
128 mp_size_t esize
= size
- 1; /* even size */
131 MPN_MUL_N_RECURSE (prodp
, up
, vp
, esize
, tspace
);
132 cy_limb
= mpn_addmul_1 (prodp
+ esize
, up
, esize
, vp
[esize
]);
133 prodp
[esize
+ esize
] = cy_limb
;
134 cy_limb
= mpn_addmul_1 (prodp
+ esize
, vp
, size
, up
[esize
]);
136 prodp
[esize
+ size
] = cy_limb
;
140 /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
142 Split U in two pieces, U1 and U0, such that
144 and V in V1 and V0, such that
147 UV is then computed recursively using the identity
150 UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
153 Where B = 2**BITS_PER_MP_LIMB. */
155 mp_size_t hsize
= size
>> 1;
159 /*** Product H. ________________ ________________
160 |_____U1 x V1____||____U0 x V0_____| */
161 /* Put result in upper part of PROD and pass low part of TSPACE
163 MPN_MUL_N_RECURSE (prodp
+ size
, up
+ hsize
, vp
+ hsize
, hsize
, tspace
);
165 /*** Product M. ________________
166 |_(U1-U0)(V0-V1)_| */
167 if (mpn_cmp (up
+ hsize
, up
, hsize
) >= 0)
169 mpn_sub_n (prodp
, up
+ hsize
, up
, hsize
);
174 mpn_sub_n (prodp
, up
, up
+ hsize
, hsize
);
177 if (mpn_cmp (vp
+ hsize
, vp
, hsize
) >= 0)
179 mpn_sub_n (prodp
+ hsize
, vp
+ hsize
, vp
, hsize
);
184 mpn_sub_n (prodp
+ hsize
, vp
, vp
+ hsize
, hsize
);
185 /* No change of NEGFLG. */
187 /* Read temporary operands from low part of PROD.
188 Put result in low part of TSPACE using upper part of TSPACE
190 MPN_MUL_N_RECURSE (tspace
, prodp
, prodp
+ hsize
, hsize
, tspace
+ size
);
192 /*** Add/copy product H. */
193 MPN_COPY (prodp
+ hsize
, prodp
+ size
, hsize
);
194 cy
= mpn_add_n (prodp
+ size
, prodp
+ size
, prodp
+ size
+ hsize
, hsize
);
196 /*** Add product M (if NEGFLG M is a negative number). */
198 cy
-= mpn_sub_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
200 cy
+= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
202 /*** Product L. ________________ ________________
203 |________________||____U0 x V0_____| */
204 /* Read temporary operands from low part of PROD.
205 Put result in low part of TSPACE using upper part of TSPACE
207 MPN_MUL_N_RECURSE (tspace
, up
, vp
, hsize
, tspace
+ size
);
209 /*** Add/copy Product L (twice). */
211 cy
+= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
213 mpn_add_1 (prodp
+ hsize
+ size
, prodp
+ hsize
+ size
, hsize
, cy
);
215 MPN_COPY (prodp
, tspace
, hsize
);
216 cy
= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
+ hsize
, hsize
);
218 mpn_add_1 (prodp
+ size
, prodp
+ size
, size
, 1);
224 impn_sqr_n_basecase (mp_ptr prodp
, mp_srcptr up
, mp_size_t size
)
226 impn_sqr_n_basecase (prodp
, up
, size
)
236 /* Multiply by the first limb in V separately, as the result can be
237 stored (not added) to PROD. We also avoid a loop for zeroing. */
242 MPN_COPY (prodp
, up
, size
);
244 MPN_ZERO (prodp
, size
);
248 cy_limb
= mpn_mul_1 (prodp
, up
, size
, v_limb
);
250 prodp
[size
] = cy_limb
;
253 /* For each iteration in the outer loop, multiply one limb from
254 U with one limb from V, and add it to PROD. */
255 for (i
= 1; i
< size
; i
++)
262 cy_limb
= mpn_add_n (prodp
, prodp
, up
, size
);
265 cy_limb
= mpn_addmul_1 (prodp
, up
, size
, v_limb
);
267 prodp
[size
] = cy_limb
;
274 impn_sqr_n (mp_ptr prodp
,
275 mp_srcptr up
, mp_size_t size
, mp_ptr tspace
)
277 impn_sqr_n (prodp
, up
, size
, tspace
)
286 /* The size is odd, the code code below doesn't handle that.
287 Multiply the least significant (size - 1) limbs with a recursive
288 call, and handle the most significant limb of S1 and S2
290 /* A slightly faster way to do this would be to make the Karatsuba
291 code below behave as if the size were even, and let it check for
292 odd size in the end. I.e., in essence move this code to the end.
293 Doing so would save us a recursive call, and potentially make the
294 stack grow a lot less. */
296 mp_size_t esize
= size
- 1; /* even size */
299 MPN_SQR_N_RECURSE (prodp
, up
, esize
, tspace
);
300 cy_limb
= mpn_addmul_1 (prodp
+ esize
, up
, esize
, up
[esize
]);
301 prodp
[esize
+ esize
] = cy_limb
;
302 cy_limb
= mpn_addmul_1 (prodp
+ esize
, up
, size
, up
[esize
]);
304 prodp
[esize
+ size
] = cy_limb
;
308 mp_size_t hsize
= size
>> 1;
311 /*** Product H. ________________ ________________
312 |_____U1 x U1____||____U0 x U0_____| */
313 /* Put result in upper part of PROD and pass low part of TSPACE
315 MPN_SQR_N_RECURSE (prodp
+ size
, up
+ hsize
, hsize
, tspace
);
317 /*** Product M. ________________
318 |_(U1-U0)(U0-U1)_| */
319 if (mpn_cmp (up
+ hsize
, up
, hsize
) >= 0)
321 mpn_sub_n (prodp
, up
+ hsize
, up
, hsize
);
325 mpn_sub_n (prodp
, up
, up
+ hsize
, hsize
);
328 /* Read temporary operands from low part of PROD.
329 Put result in low part of TSPACE using upper part of TSPACE
331 MPN_SQR_N_RECURSE (tspace
, prodp
, hsize
, tspace
+ size
);
333 /*** Add/copy product H. */
334 MPN_COPY (prodp
+ hsize
, prodp
+ size
, hsize
);
335 cy
= mpn_add_n (prodp
+ size
, prodp
+ size
, prodp
+ size
+ hsize
, hsize
);
337 /*** Add product M (if NEGFLG M is a negative number). */
338 cy
-= mpn_sub_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
340 /*** Product L. ________________ ________________
341 |________________||____U0 x U0_____| */
342 /* Read temporary operands from low part of PROD.
343 Put result in low part of TSPACE using upper part of TSPACE
345 MPN_SQR_N_RECURSE (tspace
, up
, hsize
, tspace
+ size
);
347 /*** Add/copy Product L (twice). */
349 cy
+= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
351 mpn_add_1 (prodp
+ hsize
+ size
, prodp
+ hsize
+ size
, hsize
, cy
);
353 MPN_COPY (prodp
, tspace
, hsize
);
354 cy
= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
+ hsize
, hsize
);
356 mpn_add_1 (prodp
+ size
, prodp
+ size
, size
, 1);
360 /* This should be made into an inline function in gmp.h. */
363 mpn_mul_n (mp_ptr prodp
, mp_srcptr up
, mp_srcptr vp
, mp_size_t size
)
365 mpn_mul_n (prodp
, up
, vp
, size
)
376 if (size
< KARATSUBA_THRESHOLD
)
378 impn_sqr_n_basecase (prodp
, up
, size
);
383 tspace
= (mp_ptr
) TMP_ALLOC (2 * size
* BYTES_PER_MP_LIMB
);
384 impn_sqr_n (prodp
, up
, size
, tspace
);
389 if (size
< KARATSUBA_THRESHOLD
)
391 impn_mul_n_basecase (prodp
, up
, vp
, size
);
396 tspace
= (mp_ptr
) TMP_ALLOC (2 * size
* BYTES_PER_MP_LIMB
);
397 impn_mul_n (prodp
, up
, vp
, size
, tspace
);