1 /* mpn_mul -- Multiply two natural numbers.
3 Copyright (C) 1991-2014 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15 License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, see
19 <http://www.gnu.org/licenses/>. */
24 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
25 and v (pointed to by VP, with VSIZE limbs), and store the result at
26 PRODP. USIZE + VSIZE limbs are always stored, but if the input
27 operands are normalized. Return the most significant limb of the
30 NOTE: The space pointed to by PRODP is overwritten before finished
31 with U and V, so overlap is an error.
35 2. PRODP != UP and PRODP != VP, i.e. the destination
36 must be distinct from the multiplier and the multiplicand. */
38 /* If KARATSUBA_THRESHOLD is not already defined, define it to a
39 value which is good on most machines. */
40 #ifndef KARATSUBA_THRESHOLD
41 #define KARATSUBA_THRESHOLD 32
46 mpn_mul (mp_ptr prodp
,
47 mp_srcptr up
, mp_size_t usize
,
48 mp_srcptr vp
, mp_size_t vsize
)
50 mpn_mul (prodp
, up
, usize
, vp
, vsize
)
58 mp_ptr prod_endp
= prodp
+ usize
+ vsize
- 1;
63 if (vsize
< KARATSUBA_THRESHOLD
)
65 /* Handle simple cases with traditional multiplication.
67 This is the most critical code of the entire function. All
68 multiplies rely on this, both small and huge. Small ones arrive
69 here immediately. Huge ones arrive here as this is the base case
70 for Karatsuba's recursive algorithm below. */
78 /* Multiply by the first limb in V separately, as the result can be
79 stored (not added) to PROD. We also avoid a loop for zeroing. */
84 MPN_COPY (prodp
, up
, usize
);
86 MPN_ZERO (prodp
, usize
);
90 cy_limb
= mpn_mul_1 (prodp
, up
, usize
, v_limb
);
92 prodp
[usize
] = cy_limb
;
95 /* For each iteration in the outer loop, multiply one limb from
96 U with one limb from V, and add it to PROD. */
97 for (i
= 1; i
< vsize
; i
++)
104 cy_limb
= mpn_add_n (prodp
, prodp
, up
, usize
);
107 cy_limb
= mpn_addmul_1 (prodp
, up
, usize
, v_limb
);
109 prodp
[usize
] = cy_limb
;
117 tspace
= (mp_ptr
) TMP_ALLOC (2 * vsize
* BYTES_PER_MP_LIMB
);
118 MPN_MUL_N_RECURSE (prodp
, up
, vp
, vsize
, tspace
);
125 mp_ptr tp
= (mp_ptr
) TMP_ALLOC (2 * vsize
* BYTES_PER_MP_LIMB
);
128 MPN_MUL_N_RECURSE (tp
, up
, vp
, vsize
, tspace
);
129 cy
= mpn_add_n (prodp
, prodp
, tp
, vsize
);
130 mpn_add_1 (prodp
+ vsize
, tp
+ vsize
, vsize
, cy
);
135 while (usize
>= vsize
);
138 /* True: usize < vsize. */
140 /* Make life simple: Recurse. */
144 mpn_mul (tspace
, vp
, vsize
, up
, usize
);
145 cy
= mpn_add_n (prodp
, prodp
, tspace
, vsize
);
146 mpn_add_1 (prodp
+ vsize
, tspace
+ vsize
, usize
, cy
);