1 /* mpn_toom42_mul -- Multiply {ap,an} and {bp,bn} where an is nominally twice
2 as large as bn. Or more accurately, (3/2)bn < an < 4bn.
4 Contributed to the GNU project by Torbjorn Granlund.
5 Additional improvements by Marco Bodrato.
7 The idea of applying toom to unbalanced multiplication is due to Marco
8 Bodrato and Alberto Zanoni.
10 THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE. IT IS ONLY
11 SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
12 GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
14 Copyright 2006-2008, 2012, 2014 Free Software Foundation, Inc.
16 This file is part of the GNU MP Library.
18 The GNU MP Library is free software; you can redistribute it and/or modify
19 it under the terms of either:
21 * the GNU Lesser General Public License as published by the Free
22 Software Foundation; either version 3 of the License, or (at your
23 option) any later version.
27 * the GNU General Public License as published by the Free Software
28 Foundation; either version 2 of the License, or (at your option) any
31 or both in parallel, as here.
33 The GNU MP Library is distributed in the hope that it will be useful, but
34 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
35 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
38 You should have received copies of the GNU General Public License and the
39 GNU Lesser General Public License along with the GNU MP Library. If not,
40 see https://www.gnu.org/licenses/. */
46 /* Evaluate in: -1, 0, +1, +2, +inf
48 <-s-><--n--><--n--><--n-->
49 ___ ______ ______ ______
50 |a3_|___a2_|___a1_|___a0_|
54 v0 = a0 * b0 # A(0)*B(0)
55 v1 = (a0+ a1+ a2+ a3)*(b0+ b1) # A(1)*B(1) ah <= 3 bh <= 1
56 vm1 = (a0- a1+ a2- a3)*(b0- b1) # A(-1)*B(-1) |ah| <= 1 bh = 0
57 v2 = (a0+2a1+4a2+8a3)*(b0+2b1) # A(2)*B(2) ah <= 14 bh <= 2
58 vinf= a3 * b1 # A(inf)*B(inf)
61 #define TOOM42_MUL_N_REC(p, a, b, n, ws) \
63 mpn_mul_n (p, a, b, n); \
67 mpn_toom42_mul (mp_ptr pp
,
68 mp_srcptr ap
, mp_size_t an
,
69 mp_srcptr bp
, mp_size_t bn
,
76 mp_ptr as1
, asm1
, as2
;
77 mp_ptr bs1
, bsm1
, bs2
;
88 n
= an
>= 2 * bn
? (an
+ 3) >> 2 : (bn
+ 1) >> 1;
93 ASSERT (0 < s
&& s
<= n
);
94 ASSERT (0 < t
&& t
<= n
);
98 tmp
= TMP_ALLOC_LIMBS (6 * n
+ 5);
99 as1
= tmp
; tmp
+= n
+ 1;
100 asm1
= tmp
; tmp
+= n
+ 1;
101 as2
= tmp
; tmp
+= n
+ 1;
102 bs1
= tmp
; tmp
+= n
+ 1;
103 bsm1
= tmp
; tmp
+= n
;
104 bs2
= tmp
; tmp
+= n
+ 1;
108 /* Compute as1 and asm1. */
109 vm1_neg
= mpn_toom_eval_dgr3_pm1 (as1
, asm1
, ap
, n
, s
, a0_a2
) & 1;
112 #if HAVE_NATIVE_mpn_addlsh1_n
113 cy
= mpn_addlsh1_n (as2
, a2
, a3
, s
);
115 cy
= mpn_add_1 (as2
+ s
, a2
+ s
, n
- s
, cy
);
116 cy
= 2 * cy
+ mpn_addlsh1_n (as2
, a1
, as2
, n
);
117 cy
= 2 * cy
+ mpn_addlsh1_n (as2
, a0
, as2
, n
);
119 cy
= mpn_lshift (as2
, a3
, s
, 1);
120 cy
+= mpn_add_n (as2
, a2
, as2
, s
);
122 cy
= mpn_add_1 (as2
+ s
, a2
+ s
, n
- s
, cy
);
123 cy
= 2 * cy
+ mpn_lshift (as2
, as2
, n
, 1);
124 cy
+= mpn_add_n (as2
, a1
, as2
, n
);
125 cy
= 2 * cy
+ mpn_lshift (as2
, as2
, n
, 1);
126 cy
+= mpn_add_n (as2
, a0
, as2
, n
);
130 /* Compute bs1 and bsm1. */
133 #if HAVE_NATIVE_mpn_add_n_sub_n
134 if (mpn_cmp (b0
, b1
, n
) < 0)
136 cy
= mpn_add_n_sub_n (bs1
, bsm1
, b1
, b0
, n
);
141 cy
= mpn_add_n_sub_n (bs1
, bsm1
, b0
, b1
, n
);
145 bs1
[n
] = mpn_add_n (bs1
, b0
, b1
, n
);
147 if (mpn_cmp (b0
, b1
, n
) < 0)
149 mpn_sub_n (bsm1
, b1
, b0
, n
);
154 mpn_sub_n (bsm1
, b0
, b1
, n
);
160 bs1
[n
] = mpn_add (bs1
, b0
, n
, b1
, t
);
162 if (mpn_zero_p (b0
+ t
, n
- t
) && mpn_cmp (b0
, b1
, t
) < 0)
164 mpn_sub_n (bsm1
, b1
, b0
, t
);
165 MPN_ZERO (bsm1
+ t
, n
- t
);
170 mpn_sub (bsm1
, b0
, n
, b1
, t
);
174 /* Compute bs2, recycling bs1. bs2=bs1+b1 */
175 mpn_add (bs2
, bs1
, n
+ 1, b1
, t
);
177 ASSERT (as1
[n
] <= 3);
178 ASSERT (bs1
[n
] <= 1);
179 ASSERT (asm1
[n
] <= 1);
180 /*ASSERT (bsm1[n] == 0);*/
181 ASSERT (as2
[n
] <= 14);
182 ASSERT (bs2
[n
] <= 2);
184 #define v0 pp /* 2n */
185 #define v1 (pp + 2 * n) /* 2n+1 */
186 #define vinf (pp + 4 * n) /* s+t */
187 #define vm1 scratch /* 2n+1 */
188 #define v2 (scratch + 2 * n + 1) /* 2n+2 */
189 #define scratch_out scratch + 4 * n + 4 /* Currently unused. */
191 /* vm1, 2n+1 limbs */
192 TOOM42_MUL_N_REC (vm1
, asm1
, bsm1
, n
, scratch_out
);
195 cy
= mpn_add_n (vm1
+ n
, vm1
+ n
, bsm1
, n
);
198 TOOM42_MUL_N_REC (v2
, as2
, bs2
, n
+ 1, scratch_out
); /* v2, 2n+1 limbs */
200 /* vinf, s+t limbs */
201 if (s
> t
) mpn_mul (vinf
, a3
, s
, b1
, t
);
202 else mpn_mul (vinf
, b1
, t
, a3
, s
);
204 vinf0
= vinf
[0]; /* v1 overlaps with this */
207 TOOM42_MUL_N_REC (v1
, as1
, bs1
, n
, scratch_out
);
210 cy
= bs1
[n
] + mpn_add_n (v1
+ n
, v1
+ n
, bs1
, n
);
212 else if (as1
[n
] == 2)
214 #if HAVE_NATIVE_mpn_addlsh1_n
215 cy
= 2 * bs1
[n
] + mpn_addlsh1_n (v1
+ n
, v1
+ n
, bs1
, n
);
217 cy
= 2 * bs1
[n
] + mpn_addmul_1 (v1
+ n
, bs1
, n
, CNST_LIMB(2));
220 else if (as1
[n
] == 3)
222 cy
= 3 * bs1
[n
] + mpn_addmul_1 (v1
+ n
, bs1
, n
, CNST_LIMB(3));
227 cy
+= mpn_add_n (v1
+ n
, v1
+ n
, as1
, n
);
230 TOOM42_MUL_N_REC (v0
, ap
, bp
, n
, scratch_out
); /* v0, 2n limbs */
232 mpn_toom_interpolate_5pts (pp
, v2
, vm1
, n
, s
+ t
, vm1_neg
, vinf0
);