3 THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES. IT IS ONLY
4 SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
5 GUARANTEED THAT THEY'LL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
7 Copyright 2011, 2012 Free Software Foundation, Inc.
9 This file is part of the GNU MP Library.
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
14 * the GNU Lesser General Public License as published by the Free
15 Software Foundation; either version 3 of the License, or (at your
16 option) any later version.
20 * the GNU General Public License as published by the Free Software
21 Foundation; either version 2 of the License, or (at your option) any
24 or both in parallel, as here.
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library. If not,
33 see https://www.gnu.org/licenses/. */
39 /* Computes R -= A * B. Result must be non-negative. Normalized down
40 to size an, and resulting size is returned. */
42 submul (mp_ptr rp
, mp_size_t rn
,
43 mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
51 ASSERT (an
+ bn
<= rn
+ 1);
54 tp
= TMP_ALLOC_LIMBS (an
+ bn
);
56 mpn_mul (tp
, ap
, an
, bp
, bn
);
57 ASSERT ((an
+ bn
<= rn
) || (tp
[rn
] == 0));
58 ASSERT_NOCARRY (mpn_sub (rp
, rp
, rn
, tp
, an
+ bn
- (an
+ bn
> rn
)));
61 while (rn
> an
&& (rp
[rn
-1] == 0))
67 /* Computes (a, b) <-- M^{-1} (a; b) */
69 x Take scratch parameter, and figure out scratch need.
71 x Use some fallback for small M->n?
74 hgcd_matrix_apply (const struct hgcd_matrix
*M
,
78 mp_size_t an
, bn
, un
, vn
, nn
;
81 mp_ptr tp
, sp
, scratch
;
87 ASSERT ( (ap
[n
-1] | bp
[n
-1]) > 0);
90 MPN_NORMALIZE (ap
, an
);
92 MPN_NORMALIZE (bp
, bn
);
94 for (i
= 0; i
< 2; i
++)
95 for (j
= 0; j
< 2; j
++)
99 MPN_NORMALIZE (M
->p
[i
][j
], k
);
103 ASSERT (mn
[0][0] > 0);
104 ASSERT (mn
[1][1] > 0);
105 ASSERT ( (mn
[0][1] | mn
[1][0]) > 0);
111 /* A unchanged, M = (1, 0; q, 1) */
112 ASSERT (mn
[0][0] == 1);
113 ASSERT (M
->p
[0][0][0] == 1);
114 ASSERT (mn
[1][1] == 1);
115 ASSERT (M
->p
[1][1][0] == 1);
117 /* Put B <-- B - q A */
118 nn
= submul (bp
, bn
, ap
, an
, M
->p
[1][0], mn
[1][0]);
120 else if (mn
[1][0] == 0)
122 /* B unchanged, M = (1, q; 0, 1) */
123 ASSERT (mn
[0][0] == 1);
124 ASSERT (M
->p
[0][0][0] == 1);
125 ASSERT (mn
[1][1] == 1);
126 ASSERT (M
->p
[1][1][0] == 1);
128 /* Put A <-- A - q * B */
129 nn
= submul (ap
, an
, bp
, bn
, M
->p
[0][1], mn
[0][1]);
133 /* A = m00 a + m01 b ==> a <= A / m00, b <= A / m01.
134 B = m10 a + m11 b ==> a <= B / m10, b <= B / m11. */
135 un
= MIN (an
- mn
[0][0], bn
- mn
[1][0]) + 1;
136 vn
= MIN (an
- mn
[0][1], bn
- mn
[1][1]) + 1;
139 /* In the range of interest, mulmod_bnm1 should always beat mullo. */
140 modn
= mpn_mulmod_bnm1_next_size (nn
+ 1);
142 TMP_ALLOC_LIMBS_3 (tp
, modn
,
144 scratch
, mpn_mulmod_bnm1_itch (modn
, modn
, M
->n
));
146 ASSERT (n
<= 2*modn
);
150 cy
= mpn_add (ap
, ap
, modn
, ap
+ modn
, n
- modn
);
151 MPN_INCR_U (ap
, modn
, cy
);
153 cy
= mpn_add (bp
, bp
, modn
, bp
+ modn
, n
- modn
);
154 MPN_INCR_U (bp
, modn
, cy
);
159 mpn_mulmod_bnm1 (tp
, modn
, ap
, n
, M
->p
[1][1], mn
[1][1], scratch
);
160 mpn_mulmod_bnm1 (sp
, modn
, bp
, n
, M
->p
[0][1], mn
[0][1], scratch
);
162 /* FIXME: Handle the small n case in some better way. */
163 if (n
+ mn
[1][1] < modn
)
164 MPN_ZERO (tp
+ n
+ mn
[1][1], modn
- n
- mn
[1][1]);
165 if (n
+ mn
[0][1] < modn
)
166 MPN_ZERO (sp
+ n
+ mn
[0][1], modn
- n
- mn
[0][1]);
168 cy
= mpn_sub_n (tp
, tp
, sp
, modn
);
169 MPN_DECR_U (tp
, modn
, cy
);
171 ASSERT (mpn_zero_p (tp
+ nn
, modn
- nn
));
173 mpn_mulmod_bnm1 (sp
, modn
, ap
, n
, M
->p
[1][0], mn
[1][0], scratch
);
174 MPN_COPY (ap
, tp
, nn
);
175 mpn_mulmod_bnm1 (tp
, modn
, bp
, n
, M
->p
[0][0], mn
[0][0], scratch
);
177 if (n
+ mn
[1][0] < modn
)
178 MPN_ZERO (sp
+ n
+ mn
[1][0], modn
- n
- mn
[1][0]);
179 if (n
+ mn
[0][0] < modn
)
180 MPN_ZERO (tp
+ n
+ mn
[0][0], modn
- n
- mn
[0][0]);
182 cy
= mpn_sub_n (tp
, tp
, sp
, modn
);
183 MPN_DECR_U (tp
, modn
, cy
);
185 ASSERT (mpn_zero_p (tp
+ nn
, modn
- nn
));
186 MPN_COPY (bp
, tp
, nn
);
188 while ( (ap
[nn
-1] | bp
[nn
-1]) == 0)
200 mpn_hgcd_reduce_itch (mp_size_t n
, mp_size_t p
)
203 if (BELOW_THRESHOLD (n
, HGCD_REDUCE_THRESHOLD
))
205 itch
= mpn_hgcd_itch (n
-p
);
207 /* For arbitrary p, the storage for _adjust is 2*(p + M->n) = 2 *
208 (p + ceil((n-p)/2) - 1 <= n + p - 1 */
209 if (itch
< n
+ p
- 1)
214 itch
= 2*(n
-p
) + mpn_hgcd_itch (n
-p
);
215 /* Currently, hgcd_matrix_apply allocates its own storage. */
220 /* FIXME: Document storage need. */
222 mpn_hgcd_reduce (struct hgcd_matrix
*M
,
223 mp_ptr ap
, mp_ptr bp
, mp_size_t n
, mp_size_t p
,
227 if (BELOW_THRESHOLD (n
, HGCD_REDUCE_THRESHOLD
))
229 nn
= mpn_hgcd (ap
+ p
, bp
+ p
, n
- p
, M
, tp
);
231 /* Needs 2*(p + M->n) <= 2*(floor(n/2) + ceil(n/2) - 1)
233 return mpn_hgcd_matrix_adjust (M
, p
+ nn
, ap
, bp
, p
, tp
);
237 MPN_COPY (tp
, ap
+ p
, n
- p
);
238 MPN_COPY (tp
+ n
- p
, bp
+ p
, n
- p
);
239 if (mpn_hgcd_appr (tp
, tp
+ n
- p
, n
- p
, M
, tp
+ 2*(n
-p
)))
240 return hgcd_matrix_apply (M
, ap
, bp
, n
);