1 /* mpn_mul_n -- Multiply two natural numbers of length n.
3 Copyright (C) 1991-2017 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15 License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, see
19 <http://www.gnu.org/licenses/>. */
24 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
25 both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
26 always stored. Return the most significant limb.
29 1. PRODP != UP and PRODP != VP, i.e. the destination
30 must be distinct from the multiplier and the multiplicand. */
32 /* If KARATSUBA_THRESHOLD is not already defined, define it to a
33 value which is good on most machines. */
34 #ifndef KARATSUBA_THRESHOLD
35 #define KARATSUBA_THRESHOLD 32
38 /* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */
39 #if KARATSUBA_THRESHOLD < 2
40 #undef KARATSUBA_THRESHOLD
41 #define KARATSUBA_THRESHOLD 2
44 /* Handle simple cases with traditional multiplication.
46 This is the most critical code of multiplication. All multiplies rely
47 on this, both small and huge. Small ones arrive here immediately. Huge
48 ones arrive here as this is the base case for Karatsuba's recursive
52 impn_mul_n_basecase (mp_ptr prodp
, mp_srcptr up
, mp_srcptr vp
, mp_size_t size
)
58 /* Multiply by the first limb in V separately, as the result can be
59 stored (not added) to PROD. We also avoid a loop for zeroing. */
64 MPN_COPY (prodp
, up
, size
);
66 MPN_ZERO (prodp
, size
);
70 cy_limb
= mpn_mul_1 (prodp
, up
, size
, v_limb
);
72 prodp
[size
] = cy_limb
;
75 /* For each iteration in the outer loop, multiply one limb from
76 U with one limb from V, and add it to PROD. */
77 for (i
= 1; i
< size
; i
++)
84 cy_limb
= mpn_add_n (prodp
, prodp
, up
, size
);
87 cy_limb
= mpn_addmul_1 (prodp
, up
, size
, v_limb
);
89 prodp
[size
] = cy_limb
;
95 impn_mul_n (mp_ptr prodp
,
96 mp_srcptr up
, mp_srcptr vp
, mp_size_t size
, mp_ptr tspace
)
100 /* The size is odd, the code code below doesn't handle that.
101 Multiply the least significant (size - 1) limbs with a recursive
102 call, and handle the most significant limb of S1 and S2
104 /* A slightly faster way to do this would be to make the Karatsuba
105 code below behave as if the size were even, and let it check for
106 odd size in the end. I.e., in essence move this code to the end.
107 Doing so would save us a recursive call, and potentially make the
108 stack grow a lot less. */
110 mp_size_t esize
= size
- 1; /* even size */
113 MPN_MUL_N_RECURSE (prodp
, up
, vp
, esize
, tspace
);
114 cy_limb
= mpn_addmul_1 (prodp
+ esize
, up
, esize
, vp
[esize
]);
115 prodp
[esize
+ esize
] = cy_limb
;
116 cy_limb
= mpn_addmul_1 (prodp
+ esize
, vp
, size
, up
[esize
]);
118 prodp
[esize
+ size
] = cy_limb
;
122 /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
124 Split U in two pieces, U1 and U0, such that
126 and V in V1 and V0, such that
129 UV is then computed recursively using the identity
132 UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
135 Where B = 2**BITS_PER_MP_LIMB. */
137 mp_size_t hsize
= size
>> 1;
141 /*** Product H. ________________ ________________
142 |_____U1 x V1____||____U0 x V0_____| */
143 /* Put result in upper part of PROD and pass low part of TSPACE
145 MPN_MUL_N_RECURSE (prodp
+ size
, up
+ hsize
, vp
+ hsize
, hsize
, tspace
);
147 /*** Product M. ________________
148 |_(U1-U0)(V0-V1)_| */
149 if (mpn_cmp (up
+ hsize
, up
, hsize
) >= 0)
151 mpn_sub_n (prodp
, up
+ hsize
, up
, hsize
);
156 mpn_sub_n (prodp
, up
, up
+ hsize
, hsize
);
159 if (mpn_cmp (vp
+ hsize
, vp
, hsize
) >= 0)
161 mpn_sub_n (prodp
+ hsize
, vp
+ hsize
, vp
, hsize
);
166 mpn_sub_n (prodp
+ hsize
, vp
, vp
+ hsize
, hsize
);
167 /* No change of NEGFLG. */
169 /* Read temporary operands from low part of PROD.
170 Put result in low part of TSPACE using upper part of TSPACE
172 MPN_MUL_N_RECURSE (tspace
, prodp
, prodp
+ hsize
, hsize
, tspace
+ size
);
174 /*** Add/copy product H. */
175 MPN_COPY (prodp
+ hsize
, prodp
+ size
, hsize
);
176 cy
= mpn_add_n (prodp
+ size
, prodp
+ size
, prodp
+ size
+ hsize
, hsize
);
178 /*** Add product M (if NEGFLG M is a negative number). */
180 cy
-= mpn_sub_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
182 cy
+= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
184 /*** Product L. ________________ ________________
185 |________________||____U0 x V0_____| */
186 /* Read temporary operands from low part of PROD.
187 Put result in low part of TSPACE using upper part of TSPACE
189 MPN_MUL_N_RECURSE (tspace
, up
, vp
, hsize
, tspace
+ size
);
191 /*** Add/copy Product L (twice). */
193 cy
+= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
195 mpn_add_1 (prodp
+ hsize
+ size
, prodp
+ hsize
+ size
, hsize
, cy
);
197 MPN_COPY (prodp
, tspace
, hsize
);
198 cy
= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
+ hsize
, hsize
);
200 mpn_add_1 (prodp
+ size
, prodp
+ size
, size
, 1);
205 impn_sqr_n_basecase (mp_ptr prodp
, mp_srcptr up
, mp_size_t size
)
211 /* Multiply by the first limb in V separately, as the result can be
212 stored (not added) to PROD. We also avoid a loop for zeroing. */
217 MPN_COPY (prodp
, up
, size
);
219 MPN_ZERO (prodp
, size
);
223 cy_limb
= mpn_mul_1 (prodp
, up
, size
, v_limb
);
225 prodp
[size
] = cy_limb
;
228 /* For each iteration in the outer loop, multiply one limb from
229 U with one limb from V, and add it to PROD. */
230 for (i
= 1; i
< size
; i
++)
237 cy_limb
= mpn_add_n (prodp
, prodp
, up
, size
);
240 cy_limb
= mpn_addmul_1 (prodp
, up
, size
, v_limb
);
242 prodp
[size
] = cy_limb
;
248 impn_sqr_n (mp_ptr prodp
,
249 mp_srcptr up
, mp_size_t size
, mp_ptr tspace
)
253 /* The size is odd, the code code below doesn't handle that.
254 Multiply the least significant (size - 1) limbs with a recursive
255 call, and handle the most significant limb of S1 and S2
257 /* A slightly faster way to do this would be to make the Karatsuba
258 code below behave as if the size were even, and let it check for
259 odd size in the end. I.e., in essence move this code to the end.
260 Doing so would save us a recursive call, and potentially make the
261 stack grow a lot less. */
263 mp_size_t esize
= size
- 1; /* even size */
266 MPN_SQR_N_RECURSE (prodp
, up
, esize
, tspace
);
267 cy_limb
= mpn_addmul_1 (prodp
+ esize
, up
, esize
, up
[esize
]);
268 prodp
[esize
+ esize
] = cy_limb
;
269 cy_limb
= mpn_addmul_1 (prodp
+ esize
, up
, size
, up
[esize
]);
271 prodp
[esize
+ size
] = cy_limb
;
275 mp_size_t hsize
= size
>> 1;
278 /*** Product H. ________________ ________________
279 |_____U1 x U1____||____U0 x U0_____| */
280 /* Put result in upper part of PROD and pass low part of TSPACE
282 MPN_SQR_N_RECURSE (prodp
+ size
, up
+ hsize
, hsize
, tspace
);
284 /*** Product M. ________________
285 |_(U1-U0)(U0-U1)_| */
286 if (mpn_cmp (up
+ hsize
, up
, hsize
) >= 0)
288 mpn_sub_n (prodp
, up
+ hsize
, up
, hsize
);
292 mpn_sub_n (prodp
, up
, up
+ hsize
, hsize
);
295 /* Read temporary operands from low part of PROD.
296 Put result in low part of TSPACE using upper part of TSPACE
298 MPN_SQR_N_RECURSE (tspace
, prodp
, hsize
, tspace
+ size
);
300 /*** Add/copy product H. */
301 MPN_COPY (prodp
+ hsize
, prodp
+ size
, hsize
);
302 cy
= mpn_add_n (prodp
+ size
, prodp
+ size
, prodp
+ size
+ hsize
, hsize
);
304 /*** Add product M (if NEGFLG M is a negative number). */
305 cy
-= mpn_sub_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
307 /*** Product L. ________________ ________________
308 |________________||____U0 x U0_____| */
309 /* Read temporary operands from low part of PROD.
310 Put result in low part of TSPACE using upper part of TSPACE
312 MPN_SQR_N_RECURSE (tspace
, up
, hsize
, tspace
+ size
);
314 /*** Add/copy Product L (twice). */
316 cy
+= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
318 mpn_add_1 (prodp
+ hsize
+ size
, prodp
+ hsize
+ size
, hsize
, cy
);
320 MPN_COPY (prodp
, tspace
, hsize
);
321 cy
= mpn_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
+ hsize
, hsize
);
323 mpn_add_1 (prodp
+ size
, prodp
+ size
, size
, 1);
327 /* This should be made into an inline function in gmp.h. */
329 mpn_mul_n (mp_ptr prodp
, mp_srcptr up
, mp_srcptr vp
, mp_size_t size
)
335 if (size
< KARATSUBA_THRESHOLD
)
337 impn_sqr_n_basecase (prodp
, up
, size
);
342 tspace
= (mp_ptr
) TMP_ALLOC (2 * size
* BYTES_PER_MP_LIMB
);
343 impn_sqr_n (prodp
, up
, size
, tspace
);
348 if (size
< KARATSUBA_THRESHOLD
)
350 impn_mul_n_basecase (prodp
, up
, vp
, size
);
355 tspace
= (mp_ptr
) TMP_ALLOC (2 * size
* BYTES_PER_MP_LIMB
);
356 impn_mul_n (prodp
, up
, vp
, size
, tspace
);