beta-0.89.2
[luatex.git] / source / libs / gmp / gmp-src / mini-gmp / mini-gmp.c
blob7300614dd170a96357aa97e45f5652ed590d2187
1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
3 Contributed to the GNU project by Niels Möller
5 Copyright 1991-1997, 1999-2015 Free Software Foundation, Inc.
7 This file is part of the GNU MP Library.
9 The GNU MP Library is free software; you can redistribute it and/or modify
10 it under the terms of either:
12 * the GNU Lesser General Public License as published by the Free
13 Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
18 * the GNU General Public License as published by the Free Software
19 Foundation; either version 2 of the License, or (at your option) any
20 later version.
22 or both in parallel, as here.
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received copies of the GNU General Public License and the
30 GNU Lesser General Public License along with the GNU MP Library. If not,
31 see https://www.gnu.org/licenses/. */
33 /* NOTE: All functions in this file which are not declared in
34 mini-gmp.h are internal, and are not intended to be compatible
35 neither with GMP nor with future versions of mini-gmp. */
37 /* Much of the material copied from GMP files, including: gmp-impl.h,
38 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39 mpn/generic/lshift.c, mpn/generic/mul_1.c,
40 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42 mpn/generic/submul_1.c. */
44 #include <assert.h>
45 #include <ctype.h>
46 #include <limits.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
51 #include "mini-gmp.h"
54 /* Macros */
55 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
57 #define GMP_LIMB_MAX (~ (mp_limb_t) 0)
58 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
60 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
61 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
63 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
64 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
66 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
67 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
69 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
70 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
72 #define gmp_assert_nocarry(x) do { \
73 mp_limb_t __cy = x; \
74 assert (__cy == 0); \
75 } while (0)
77 #define gmp_clz(count, x) do { \
78 mp_limb_t __clz_x = (x); \
79 unsigned __clz_c; \
80 for (__clz_c = 0; \
81 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
82 __clz_c += 8) \
83 __clz_x <<= 8; \
84 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
85 __clz_x <<= 1; \
86 (count) = __clz_c; \
87 } while (0)
89 #define gmp_ctz(count, x) do { \
90 mp_limb_t __ctz_x = (x); \
91 unsigned __ctz_c = 0; \
92 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
93 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
94 } while (0)
96 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
97 do { \
98 mp_limb_t __x; \
99 __x = (al) + (bl); \
100 (sh) = (ah) + (bh) + (__x < (al)); \
101 (sl) = __x; \
102 } while (0)
104 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
105 do { \
106 mp_limb_t __x; \
107 __x = (al) - (bl); \
108 (sh) = (ah) - (bh) - ((al) < (bl)); \
109 (sl) = __x; \
110 } while (0)
112 #define gmp_umul_ppmm(w1, w0, u, v) \
113 do { \
114 mp_limb_t __x0, __x1, __x2, __x3; \
115 unsigned __ul, __vl, __uh, __vh; \
116 mp_limb_t __u = (u), __v = (v); \
118 __ul = __u & GMP_LLIMB_MASK; \
119 __uh = __u >> (GMP_LIMB_BITS / 2); \
120 __vl = __v & GMP_LLIMB_MASK; \
121 __vh = __v >> (GMP_LIMB_BITS / 2); \
123 __x0 = (mp_limb_t) __ul * __vl; \
124 __x1 = (mp_limb_t) __ul * __vh; \
125 __x2 = (mp_limb_t) __uh * __vl; \
126 __x3 = (mp_limb_t) __uh * __vh; \
128 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
129 __x1 += __x2; /* but this indeed can */ \
130 if (__x1 < __x2) /* did we get it? */ \
131 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
133 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
134 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
135 } while (0)
137 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
138 do { \
139 mp_limb_t _qh, _ql, _r, _mask; \
140 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
141 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
142 _r = (nl) - _qh * (d); \
143 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
144 _qh += _mask; \
145 _r += _mask & (d); \
146 if (_r >= (d)) \
148 _r -= (d); \
149 _qh++; \
152 (r) = _r; \
153 (q) = _qh; \
154 } while (0)
156 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
157 do { \
158 mp_limb_t _q0, _t1, _t0, _mask; \
159 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
160 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
162 /* Compute the two most significant limbs of n - q'd */ \
163 (r1) = (n1) - (d1) * (q); \
164 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
165 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
166 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
167 (q)++; \
169 /* Conditionally adjust q and the remainders */ \
170 _mask = - (mp_limb_t) ((r1) >= _q0); \
171 (q) += _mask; \
172 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
173 if ((r1) >= (d1)) \
175 if ((r1) > (d1) || (r0) >= (d0)) \
177 (q)++; \
178 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
181 } while (0)
183 /* Swap macros. */
184 #define MP_LIMB_T_SWAP(x, y) \
185 do { \
186 mp_limb_t __mp_limb_t_swap__tmp = (x); \
187 (x) = (y); \
188 (y) = __mp_limb_t_swap__tmp; \
189 } while (0)
190 #define MP_SIZE_T_SWAP(x, y) \
191 do { \
192 mp_size_t __mp_size_t_swap__tmp = (x); \
193 (x) = (y); \
194 (y) = __mp_size_t_swap__tmp; \
195 } while (0)
196 #define MP_BITCNT_T_SWAP(x,y) \
197 do { \
198 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
199 (x) = (y); \
200 (y) = __mp_bitcnt_t_swap__tmp; \
201 } while (0)
202 #define MP_PTR_SWAP(x, y) \
203 do { \
204 mp_ptr __mp_ptr_swap__tmp = (x); \
205 (x) = (y); \
206 (y) = __mp_ptr_swap__tmp; \
207 } while (0)
208 #define MP_SRCPTR_SWAP(x, y) \
209 do { \
210 mp_srcptr __mp_srcptr_swap__tmp = (x); \
211 (x) = (y); \
212 (y) = __mp_srcptr_swap__tmp; \
213 } while (0)
215 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
216 do { \
217 MP_PTR_SWAP (xp, yp); \
218 MP_SIZE_T_SWAP (xs, ys); \
219 } while(0)
220 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
221 do { \
222 MP_SRCPTR_SWAP (xp, yp); \
223 MP_SIZE_T_SWAP (xs, ys); \
224 } while(0)
226 #define MPZ_PTR_SWAP(x, y) \
227 do { \
228 mpz_ptr __mpz_ptr_swap__tmp = (x); \
229 (x) = (y); \
230 (y) = __mpz_ptr_swap__tmp; \
231 } while (0)
232 #define MPZ_SRCPTR_SWAP(x, y) \
233 do { \
234 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
235 (x) = (y); \
236 (y) = __mpz_srcptr_swap__tmp; \
237 } while (0)
239 const int mp_bits_per_limb = GMP_LIMB_BITS;
242 /* Memory allocation and other helper functions. */
243 static void
244 gmp_die (const char *msg)
246 fprintf (stderr, "%s\n", msg);
247 abort();
250 static void *
251 gmp_default_alloc (size_t size)
253 void *p;
255 assert (size > 0);
257 p = malloc (size);
258 if (!p)
259 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
261 return p;
264 static void *
265 gmp_default_realloc (void *old, size_t old_size, size_t new_size)
267 void * p;
269 p = realloc (old, new_size);
271 if (!p)
272 gmp_die("gmp_default_realloc: Virtual memory exhausted.");
274 return p;
277 static void
278 gmp_default_free (void *p, size_t size)
280 free (p);
283 static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
284 static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
285 static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
287 void
288 mp_get_memory_functions (void *(**alloc_func) (size_t),
289 void *(**realloc_func) (void *, size_t, size_t),
290 void (**free_func) (void *, size_t))
292 if (alloc_func)
293 *alloc_func = gmp_allocate_func;
295 if (realloc_func)
296 *realloc_func = gmp_reallocate_func;
298 if (free_func)
299 *free_func = gmp_free_func;
302 void
303 mp_set_memory_functions (void *(*alloc_func) (size_t),
304 void *(*realloc_func) (void *, size_t, size_t),
305 void (*free_func) (void *, size_t))
307 if (!alloc_func)
308 alloc_func = gmp_default_alloc;
309 if (!realloc_func)
310 realloc_func = gmp_default_realloc;
311 if (!free_func)
312 free_func = gmp_default_free;
314 gmp_allocate_func = alloc_func;
315 gmp_reallocate_func = realloc_func;
316 gmp_free_func = free_func;
319 #define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
320 #define gmp_free(p) ((*gmp_free_func) ((p), 0))
322 static mp_ptr
323 gmp_xalloc_limbs (mp_size_t size)
325 return (mp_ptr) gmp_xalloc (size * sizeof (mp_limb_t));
328 static mp_ptr
329 gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
331 assert (size > 0);
332 return (mp_ptr) (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
336 /* MPN interface */
338 void
339 mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
341 mp_size_t i;
342 for (i = 0; i < n; i++)
343 d[i] = s[i];
346 void
347 mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
349 while (--n >= 0)
350 d[n] = s[n];
354 mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
356 while (--n >= 0)
358 if (ap[n] != bp[n])
359 return ap[n] > bp[n] ? 1 : -1;
361 return 0;
364 static int
365 mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
367 if (an != bn)
368 return an < bn ? -1 : 1;
369 else
370 return mpn_cmp (ap, bp, an);
373 static mp_size_t
374 mpn_normalized_size (mp_srcptr xp, mp_size_t n)
376 while (n > 0 && xp[n-1] == 0)
377 --n;
378 return n;
382 mpn_zero_p(mp_srcptr rp, mp_size_t n)
384 return mpn_normalized_size (rp, n) == 0;
387 void
388 mpn_zero (mp_ptr rp, mp_size_t n)
390 while (--n >= 0)
391 rp[n] = 0;
394 mp_limb_t
395 mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
397 mp_size_t i;
399 assert (n > 0);
400 i = 0;
403 mp_limb_t r = ap[i] + b;
404 /* Carry out */
405 b = (r < b);
406 rp[i] = r;
408 while (++i < n);
410 return b;
413 mp_limb_t
414 mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
416 mp_size_t i;
417 mp_limb_t cy;
419 for (i = 0, cy = 0; i < n; i++)
421 mp_limb_t a, b, r;
422 a = ap[i]; b = bp[i];
423 r = a + cy;
424 cy = (r < cy);
425 r += b;
426 cy += (r < b);
427 rp[i] = r;
429 return cy;
432 mp_limb_t
433 mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
435 mp_limb_t cy;
437 assert (an >= bn);
439 cy = mpn_add_n (rp, ap, bp, bn);
440 if (an > bn)
441 cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
442 return cy;
445 mp_limb_t
446 mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
448 mp_size_t i;
450 assert (n > 0);
452 i = 0;
455 mp_limb_t a = ap[i];
456 /* Carry out */
457 mp_limb_t cy = a < b;;
458 rp[i] = a - b;
459 b = cy;
461 while (++i < n);
463 return b;
466 mp_limb_t
467 mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
469 mp_size_t i;
470 mp_limb_t cy;
472 for (i = 0, cy = 0; i < n; i++)
474 mp_limb_t a, b;
475 a = ap[i]; b = bp[i];
476 b += cy;
477 cy = (b < cy);
478 cy += (a < b);
479 rp[i] = a - b;
481 return cy;
484 mp_limb_t
485 mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
487 mp_limb_t cy;
489 assert (an >= bn);
491 cy = mpn_sub_n (rp, ap, bp, bn);
492 if (an > bn)
493 cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
494 return cy;
497 mp_limb_t
498 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
500 mp_limb_t ul, cl, hpl, lpl;
502 assert (n >= 1);
504 cl = 0;
507 ul = *up++;
508 gmp_umul_ppmm (hpl, lpl, ul, vl);
510 lpl += cl;
511 cl = (lpl < cl) + hpl;
513 *rp++ = lpl;
515 while (--n != 0);
517 return cl;
520 mp_limb_t
521 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
523 mp_limb_t ul, cl, hpl, lpl, rl;
525 assert (n >= 1);
527 cl = 0;
530 ul = *up++;
531 gmp_umul_ppmm (hpl, lpl, ul, vl);
533 lpl += cl;
534 cl = (lpl < cl) + hpl;
536 rl = *rp;
537 lpl = rl + lpl;
538 cl += lpl < rl;
539 *rp++ = lpl;
541 while (--n != 0);
543 return cl;
546 mp_limb_t
547 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
549 mp_limb_t ul, cl, hpl, lpl, rl;
551 assert (n >= 1);
553 cl = 0;
556 ul = *up++;
557 gmp_umul_ppmm (hpl, lpl, ul, vl);
559 lpl += cl;
560 cl = (lpl < cl) + hpl;
562 rl = *rp;
563 lpl = rl - lpl;
564 cl += lpl > rl;
565 *rp++ = lpl;
567 while (--n != 0);
569 return cl;
572 mp_limb_t
573 mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
575 assert (un >= vn);
576 assert (vn >= 1);
578 /* We first multiply by the low order limb. This result can be
579 stored, not added, to rp. We also avoid a loop for zeroing this
580 way. */
582 rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
584 /* Now accumulate the product of up[] and the next higher limb from
585 vp[]. */
587 while (--vn >= 1)
589 rp += 1, vp += 1;
590 rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
592 return rp[un];
595 void
596 mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
598 mpn_mul (rp, ap, n, bp, n);
601 void
602 mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
604 mpn_mul (rp, ap, n, ap, n);
607 mp_limb_t
608 mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
610 mp_limb_t high_limb, low_limb;
611 unsigned int tnc;
612 mp_limb_t retval;
614 assert (n >= 1);
615 assert (cnt >= 1);
616 assert (cnt < GMP_LIMB_BITS);
618 up += n;
619 rp += n;
621 tnc = GMP_LIMB_BITS - cnt;
622 low_limb = *--up;
623 retval = low_limb >> tnc;
624 high_limb = (low_limb << cnt);
626 while (--n != 0)
628 low_limb = *--up;
629 *--rp = high_limb | (low_limb >> tnc);
630 high_limb = (low_limb << cnt);
632 *--rp = high_limb;
634 return retval;
637 mp_limb_t
638 mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
640 mp_limb_t high_limb, low_limb;
641 unsigned int tnc;
642 mp_limb_t retval;
644 assert (n >= 1);
645 assert (cnt >= 1);
646 assert (cnt < GMP_LIMB_BITS);
648 tnc = GMP_LIMB_BITS - cnt;
649 high_limb = *up++;
650 retval = (high_limb << tnc);
651 low_limb = high_limb >> cnt;
653 while (--n != 0)
655 high_limb = *up++;
656 *rp++ = low_limb | (high_limb << tnc);
657 low_limb = high_limb >> cnt;
659 *rp = low_limb;
661 return retval;
664 static mp_bitcnt_t
665 mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
666 mp_limb_t ux)
668 unsigned cnt;
670 assert (ux == 0 || ux == GMP_LIMB_MAX);
671 assert (0 <= i && i <= un );
673 while (limb == 0)
675 i++;
676 if (i == un)
677 return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
678 limb = ux ^ up[i];
680 gmp_ctz (cnt, limb);
681 return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
684 mp_bitcnt_t
685 mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
687 mp_size_t i;
688 i = bit / GMP_LIMB_BITS;
690 return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
691 i, ptr, i, 0);
694 mp_bitcnt_t
695 mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
697 mp_size_t i;
698 i = bit / GMP_LIMB_BITS;
700 return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
701 i, ptr, i, GMP_LIMB_MAX);
705 /* MPN division interface. */
706 mp_limb_t
707 mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
709 mp_limb_t r, p, m;
710 unsigned ul, uh;
711 unsigned ql, qh;
713 /* First, do a 2/1 inverse. */
714 /* The inverse m is defined as floor( (B^2 - 1 - u1)/u1 ), so that 0 <
715 * B^2 - (B + m) u1 <= u1 */
716 assert (u1 >= GMP_LIMB_HIGHBIT);
718 ul = u1 & GMP_LLIMB_MASK;
719 uh = u1 >> (GMP_LIMB_BITS / 2);
721 qh = ~u1 / uh;
722 r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
724 p = (mp_limb_t) qh * ul;
725 /* Adjustment steps taken from udiv_qrnnd_c */
726 if (r < p)
728 qh--;
729 r += u1;
730 if (r >= u1) /* i.e. we didn't get carry when adding to r */
731 if (r < p)
733 qh--;
734 r += u1;
737 r -= p;
739 /* Do a 3/2 division (with half limb size) */
740 p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
741 ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
743 /* By the 3/2 method, we don't need the high half limb. */
744 r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
746 if (r >= (p << (GMP_LIMB_BITS / 2)))
748 ql--;
749 r += u1;
751 m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
752 if (r >= u1)
754 m++;
755 r -= u1;
758 if (u0 > 0)
760 mp_limb_t th, tl;
761 r = ~r;
762 r += u0;
763 if (r < u0)
765 m--;
766 if (r >= u1)
768 m--;
769 r -= u1;
771 r -= u1;
773 gmp_umul_ppmm (th, tl, u0, m);
774 r += th;
775 if (r < th)
777 m--;
778 m -= ((r > u1) | ((r == u1) & (tl > u0)));
782 return m;
785 struct gmp_div_inverse
787 /* Normalization shift count. */
788 unsigned shift;
789 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
790 mp_limb_t d1, d0;
791 /* Inverse, for 2/1 or 3/2. */
792 mp_limb_t di;
795 static void
796 mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
798 unsigned shift;
800 assert (d > 0);
801 gmp_clz (shift, d);
802 inv->shift = shift;
803 inv->d1 = d << shift;
804 inv->di = mpn_invert_limb (inv->d1);
807 static void
808 mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
809 mp_limb_t d1, mp_limb_t d0)
811 unsigned shift;
813 assert (d1 > 0);
814 gmp_clz (shift, d1);
815 inv->shift = shift;
816 if (shift > 0)
818 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
819 d0 <<= shift;
821 inv->d1 = d1;
822 inv->d0 = d0;
823 inv->di = mpn_invert_3by2 (d1, d0);
826 static void
827 mpn_div_qr_invert (struct gmp_div_inverse *inv,
828 mp_srcptr dp, mp_size_t dn)
830 assert (dn > 0);
832 if (dn == 1)
833 mpn_div_qr_1_invert (inv, dp[0]);
834 else if (dn == 2)
835 mpn_div_qr_2_invert (inv, dp[1], dp[0]);
836 else
838 unsigned shift;
839 mp_limb_t d1, d0;
841 d1 = dp[dn-1];
842 d0 = dp[dn-2];
843 assert (d1 > 0);
844 gmp_clz (shift, d1);
845 inv->shift = shift;
846 if (shift > 0)
848 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
849 d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
851 inv->d1 = d1;
852 inv->d0 = d0;
853 inv->di = mpn_invert_3by2 (d1, d0);
857 /* Not matching current public gmp interface, rather corresponding to
858 the sbpi1_div_* functions. */
859 static mp_limb_t
860 mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
861 const struct gmp_div_inverse *inv)
863 mp_limb_t d, di;
864 mp_limb_t r;
865 mp_ptr tp = NULL;
867 if (inv->shift > 0)
869 tp = gmp_xalloc_limbs (nn);
870 r = mpn_lshift (tp, np, nn, inv->shift);
871 np = tp;
873 else
874 r = 0;
876 d = inv->d1;
877 di = inv->di;
878 while (--nn >= 0)
880 mp_limb_t q;
882 gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
883 if (qp)
884 qp[nn] = q;
886 if (inv->shift > 0)
887 gmp_free (tp);
889 return r >> inv->shift;
892 static mp_limb_t
893 mpn_div_qr_1 (mp_ptr qp, mp_srcptr np, mp_size_t nn, mp_limb_t d)
895 assert (d > 0);
897 /* Special case for powers of two. */
898 if ((d & (d-1)) == 0)
900 mp_limb_t r = np[0] & (d-1);
901 if (qp)
903 if (d <= 1)
904 mpn_copyi (qp, np, nn);
905 else
907 unsigned shift;
908 gmp_ctz (shift, d);
909 mpn_rshift (qp, np, nn, shift);
912 return r;
914 else
916 struct gmp_div_inverse inv;
917 mpn_div_qr_1_invert (&inv, d);
918 return mpn_div_qr_1_preinv (qp, np, nn, &inv);
922 static void
923 mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
924 const struct gmp_div_inverse *inv)
926 unsigned shift;
927 mp_size_t i;
928 mp_limb_t d1, d0, di, r1, r0;
929 mp_ptr tp;
931 assert (nn >= 2);
932 shift = inv->shift;
933 d1 = inv->d1;
934 d0 = inv->d0;
935 di = inv->di;
937 if (shift > 0)
939 tp = gmp_xalloc_limbs (nn);
940 r1 = mpn_lshift (tp, np, nn, shift);
941 np = tp;
943 else
944 r1 = 0;
946 r0 = np[nn - 1];
948 i = nn - 2;
951 mp_limb_t n0, q;
952 n0 = np[i];
953 gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
955 if (qp)
956 qp[i] = q;
958 while (--i >= 0);
960 if (shift > 0)
962 assert ((r0 << (GMP_LIMB_BITS - shift)) == 0);
963 r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
964 r1 >>= shift;
966 gmp_free (tp);
969 rp[1] = r1;
970 rp[0] = r0;
973 #if 0
974 static void
975 mpn_div_qr_2 (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
976 mp_limb_t d1, mp_limb_t d0)
978 struct gmp_div_inverse inv;
979 assert (nn >= 2);
981 mpn_div_qr_2_invert (&inv, d1, d0);
982 mpn_div_qr_2_preinv (qp, rp, np, nn, &inv);
984 #endif
986 static void
987 mpn_div_qr_pi1 (mp_ptr qp,
988 mp_ptr np, mp_size_t nn, mp_limb_t n1,
989 mp_srcptr dp, mp_size_t dn,
990 mp_limb_t dinv)
992 mp_size_t i;
994 mp_limb_t d1, d0;
995 mp_limb_t cy, cy1;
996 mp_limb_t q;
998 assert (dn > 2);
999 assert (nn >= dn);
1001 d1 = dp[dn - 1];
1002 d0 = dp[dn - 2];
1004 assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
1005 /* Iteration variable is the index of the q limb.
1007 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1008 * by <d1, d0, dp[dn-3], ..., dp[0] >
1011 i = nn - dn;
1014 mp_limb_t n0 = np[dn-1+i];
1016 if (n1 == d1 && n0 == d0)
1018 q = GMP_LIMB_MAX;
1019 mpn_submul_1 (np+i, dp, dn, q);
1020 n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */
1022 else
1024 gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
1026 cy = mpn_submul_1 (np + i, dp, dn-2, q);
1028 cy1 = n0 < cy;
1029 n0 = n0 - cy;
1030 cy = n1 < cy1;
1031 n1 = n1 - cy1;
1032 np[dn-2+i] = n0;
1034 if (cy != 0)
1036 n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
1037 q--;
1041 if (qp)
1042 qp[i] = q;
1044 while (--i >= 0);
1046 np[dn - 1] = n1;
1049 static void
1050 mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
1051 mp_srcptr dp, mp_size_t dn,
1052 const struct gmp_div_inverse *inv)
1054 assert (dn > 0);
1055 assert (nn >= dn);
1057 if (dn == 1)
1058 np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
1059 else if (dn == 2)
1060 mpn_div_qr_2_preinv (qp, np, np, nn, inv);
1061 else
1063 mp_limb_t nh;
1064 unsigned shift;
1066 assert (inv->d1 == dp[dn-1]);
1067 assert (inv->d0 == dp[dn-2]);
1068 assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
1070 shift = inv->shift;
1071 if (shift > 0)
1072 nh = mpn_lshift (np, np, nn, shift);
1073 else
1074 nh = 0;
1076 mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
1078 if (shift > 0)
1079 gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
1083 static void
1084 mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
1086 struct gmp_div_inverse inv;
1087 mp_ptr tp = NULL;
1089 assert (dn > 0);
1090 assert (nn >= dn);
1092 mpn_div_qr_invert (&inv, dp, dn);
1093 if (dn > 2 && inv.shift > 0)
1095 tp = gmp_xalloc_limbs (dn);
1096 gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
1097 dp = tp;
1099 mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
1100 if (tp)
1101 gmp_free (tp);
1105 /* MPN base conversion. */
1106 static unsigned
1107 mpn_base_power_of_two_p (unsigned b)
1109 switch (b)
1111 case 2: return 1;
1112 case 4: return 2;
1113 case 8: return 3;
1114 case 16: return 4;
1115 case 32: return 5;
1116 case 64: return 6;
1117 case 128: return 7;
1118 case 256: return 8;
1119 default: return 0;
1123 struct mpn_base_info
1125 /* bb is the largest power of the base which fits in one limb, and
1126 exp is the corresponding exponent. */
1127 unsigned exp;
1128 mp_limb_t bb;
1131 static void
1132 mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
1134 mp_limb_t m;
1135 mp_limb_t p;
1136 unsigned exp;
1138 m = GMP_LIMB_MAX / b;
1139 for (exp = 1, p = b; p <= m; exp++)
1140 p *= b;
1142 info->exp = exp;
1143 info->bb = p;
1146 static mp_bitcnt_t
1147 mpn_limb_size_in_base_2 (mp_limb_t u)
1149 unsigned shift;
1151 assert (u > 0);
1152 gmp_clz (shift, u);
1153 return GMP_LIMB_BITS - shift;
1156 static size_t
1157 mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
1159 unsigned char mask;
1160 size_t sn, j;
1161 mp_size_t i;
1162 int shift;
1164 sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
1165 + bits - 1) / bits;
1167 mask = (1U << bits) - 1;
1169 for (i = 0, j = sn, shift = 0; j-- > 0;)
1171 unsigned char digit = up[i] >> shift;
1173 shift += bits;
1175 if (shift >= GMP_LIMB_BITS && ++i < un)
1177 shift -= GMP_LIMB_BITS;
1178 digit |= up[i] << (bits - shift);
1180 sp[j] = digit & mask;
1182 return sn;
1185 /* We generate digits from the least significant end, and reverse at
1186 the end. */
1187 static size_t
1188 mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
1189 const struct gmp_div_inverse *binv)
1191 mp_size_t i;
1192 for (i = 0; w > 0; i++)
1194 mp_limb_t h, l, r;
1196 h = w >> (GMP_LIMB_BITS - binv->shift);
1197 l = w << binv->shift;
1199 gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
1200 assert ( (r << (GMP_LIMB_BITS - binv->shift)) == 0);
1201 r >>= binv->shift;
1203 sp[i] = r;
1205 return i;
1208 static size_t
1209 mpn_get_str_other (unsigned char *sp,
1210 int base, const struct mpn_base_info *info,
1211 mp_ptr up, mp_size_t un)
1213 struct gmp_div_inverse binv;
1214 size_t sn;
1215 size_t i;
1217 mpn_div_qr_1_invert (&binv, base);
1219 sn = 0;
1221 if (un > 1)
1223 struct gmp_div_inverse bbinv;
1224 mpn_div_qr_1_invert (&bbinv, info->bb);
1228 mp_limb_t w;
1229 size_t done;
1230 w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
1231 un -= (up[un-1] == 0);
1232 done = mpn_limb_get_str (sp + sn, w, &binv);
1234 for (sn += done; done < info->exp; done++)
1235 sp[sn++] = 0;
1237 while (un > 1);
1239 sn += mpn_limb_get_str (sp + sn, up[0], &binv);
1241 /* Reverse order */
1242 for (i = 0; 2*i + 1 < sn; i++)
1244 unsigned char t = sp[i];
1245 sp[i] = sp[sn - i - 1];
1246 sp[sn - i - 1] = t;
1249 return sn;
1252 size_t
1253 mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
1255 unsigned bits;
1257 assert (un > 0);
1258 assert (up[un-1] > 0);
1260 bits = mpn_base_power_of_two_p (base);
1261 if (bits)
1262 return mpn_get_str_bits (sp, bits, up, un);
1263 else
1265 struct mpn_base_info info;
1267 mpn_get_base_info (&info, base);
1268 return mpn_get_str_other (sp, base, &info, up, un);
1272 static mp_size_t
1273 mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
1274 unsigned bits)
1276 mp_size_t rn;
1277 size_t j;
1278 unsigned shift;
1280 for (j = sn, rn = 0, shift = 0; j-- > 0; )
1282 if (shift == 0)
1284 rp[rn++] = sp[j];
1285 shift += bits;
1287 else
1289 rp[rn-1] |= (mp_limb_t) sp[j] << shift;
1290 shift += bits;
1291 if (shift >= GMP_LIMB_BITS)
1293 shift -= GMP_LIMB_BITS;
1294 if (shift > 0)
1295 rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
1299 rn = mpn_normalized_size (rp, rn);
1300 return rn;
1303 static mp_size_t
1304 mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
1305 mp_limb_t b, const struct mpn_base_info *info)
1307 mp_size_t rn;
1308 mp_limb_t w;
1309 unsigned k;
1310 size_t j;
1312 k = 1 + (sn - 1) % info->exp;
1314 j = 0;
1315 w = sp[j++];
1316 while (--k != 0)
1317 w = w * b + sp[j++];
1319 rp[0] = w;
1321 for (rn = (w > 0); j < sn;)
1323 mp_limb_t cy;
1325 w = sp[j++];
1326 for (k = 1; k < info->exp; k++)
1327 w = w * b + sp[j++];
1329 cy = mpn_mul_1 (rp, rp, rn, info->bb);
1330 cy += mpn_add_1 (rp, rp, rn, w);
1331 if (cy > 0)
1332 rp[rn++] = cy;
1334 assert (j == sn);
1336 return rn;
1339 mp_size_t
1340 mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
1342 unsigned bits;
1344 if (sn == 0)
1345 return 0;
1347 bits = mpn_base_power_of_two_p (base);
1348 if (bits)
1349 return mpn_set_str_bits (rp, sp, sn, bits);
1350 else
1352 struct mpn_base_info info;
1354 mpn_get_base_info (&info, base);
1355 return mpn_set_str_other (rp, sp, sn, base, &info);
1360 /* MPZ interface */
1361 void
1362 mpz_init (mpz_t r)
1364 r->_mp_alloc = 1;
1365 r->_mp_size = 0;
1366 r->_mp_d = gmp_xalloc_limbs (1);
1369 /* The utility of this function is a bit limited, since many functions
1370 assigns the result variable using mpz_swap. */
1371 void
1372 mpz_init2 (mpz_t r, mp_bitcnt_t bits)
1374 mp_size_t rn;
1376 bits -= (bits != 0); /* Round down, except if 0 */
1377 rn = 1 + bits / GMP_LIMB_BITS;
1379 r->_mp_alloc = rn;
1380 r->_mp_size = 0;
1381 r->_mp_d = gmp_xalloc_limbs (rn);
1384 void
1385 mpz_clear (mpz_t r)
1387 gmp_free (r->_mp_d);
1390 static mp_ptr
1391 mpz_realloc (mpz_t r, mp_size_t size)
1393 size = GMP_MAX (size, 1);
1395 r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
1396 r->_mp_alloc = size;
1398 if (GMP_ABS (r->_mp_size) > size)
1399 r->_mp_size = 0;
1401 return r->_mp_d;
1404 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1405 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1406 ? mpz_realloc(z,n) \
1407 : (z)->_mp_d)
1409 /* MPZ assignment and basic conversions. */
1410 void
1411 mpz_set_si (mpz_t r, signed long int x)
1413 if (x >= 0)
1414 mpz_set_ui (r, x);
1415 else /* (x < 0) */
1417 r->_mp_size = -1;
1418 r->_mp_d[0] = GMP_NEG_CAST (unsigned long int, x);
1422 void
1423 mpz_set_ui (mpz_t r, unsigned long int x)
1425 if (x > 0)
1427 r->_mp_size = 1;
1428 r->_mp_d[0] = x;
1430 else
1431 r->_mp_size = 0;
1434 void
1435 mpz_set (mpz_t r, const mpz_t x)
1437 /* Allow the NOP r == x */
1438 if (r != x)
1440 mp_size_t n;
1441 mp_ptr rp;
1443 n = GMP_ABS (x->_mp_size);
1444 rp = MPZ_REALLOC (r, n);
1446 mpn_copyi (rp, x->_mp_d, n);
1447 r->_mp_size = x->_mp_size;
1451 void
1452 mpz_init_set_si (mpz_t r, signed long int x)
1454 mpz_init (r);
1455 mpz_set_si (r, x);
1458 void
1459 mpz_init_set_ui (mpz_t r, unsigned long int x)
1461 mpz_init (r);
1462 mpz_set_ui (r, x);
1465 void
1466 mpz_init_set (mpz_t r, const mpz_t x)
1468 mpz_init (r);
1469 mpz_set (r, x);
1473 mpz_fits_slong_p (const mpz_t u)
1475 mp_size_t us = u->_mp_size;
1477 if (us == 1)
1478 return u->_mp_d[0] < GMP_LIMB_HIGHBIT;
1479 else if (us == -1)
1480 return u->_mp_d[0] <= GMP_LIMB_HIGHBIT;
1481 else
1482 return (us == 0);
1486 mpz_fits_ulong_p (const mpz_t u)
1488 mp_size_t us = u->_mp_size;
1490 return (us == (us > 0));
1493 long int
1494 mpz_get_si (const mpz_t u)
1496 mp_size_t us = u->_mp_size;
1498 if (us > 0)
1499 return (long) (u->_mp_d[0] & ~GMP_LIMB_HIGHBIT);
1500 else if (us < 0)
1501 return (long) (- u->_mp_d[0] | GMP_LIMB_HIGHBIT);
1502 else
1503 return 0;
1506 unsigned long int
1507 mpz_get_ui (const mpz_t u)
1509 return u->_mp_size == 0 ? 0 : u->_mp_d[0];
1512 size_t
1513 mpz_size (const mpz_t u)
1515 return GMP_ABS (u->_mp_size);
1518 mp_limb_t
1519 mpz_getlimbn (const mpz_t u, mp_size_t n)
1521 if (n >= 0 && n < GMP_ABS (u->_mp_size))
1522 return u->_mp_d[n];
1523 else
1524 return 0;
1527 void
1528 mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
1530 mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
1533 mp_srcptr
1534 mpz_limbs_read (mpz_srcptr x)
1536 return x->_mp_d;;
1539 mp_ptr
1540 mpz_limbs_modify (mpz_t x, mp_size_t n)
1542 assert (n > 0);
1543 return MPZ_REALLOC (x, n);
1546 mp_ptr
1547 mpz_limbs_write (mpz_t x, mp_size_t n)
1549 return mpz_limbs_modify (x, n);
1552 void
1553 mpz_limbs_finish (mpz_t x, mp_size_t xs)
1555 mp_size_t xn;
1556 xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
1557 x->_mp_size = xs < 0 ? -xn : xn;
1560 mpz_srcptr
1561 mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1563 x->_mp_alloc = 0;
1564 x->_mp_d = (mp_ptr) xp;
1565 mpz_limbs_finish (x, xs);
1566 return x;
1570 /* Conversions and comparison to double. */
1571 void
1572 mpz_set_d (mpz_t r, double x)
1574 int sign;
1575 mp_ptr rp;
1576 mp_size_t rn, i;
1577 double B;
1578 double Bi;
1579 mp_limb_t f;
1581 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1582 zero or infinity. */
1583 if (x != x || x == x * 0.5)
1585 r->_mp_size = 0;
1586 return;
1589 sign = x < 0.0 ;
1590 if (sign)
1591 x = - x;
1593 if (x < 1.0)
1595 r->_mp_size = 0;
1596 return;
1598 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1599 Bi = 1.0 / B;
1600 for (rn = 1; x >= B; rn++)
1601 x *= Bi;
1603 rp = MPZ_REALLOC (r, rn);
1605 f = (mp_limb_t) x;
1606 x -= f;
1607 assert (x < 1.0);
1608 i = rn-1;
1609 rp[i] = f;
1610 while (--i >= 0)
1612 x = B * x;
1613 f = (mp_limb_t) x;
1614 x -= f;
1615 assert (x < 1.0);
1616 rp[i] = f;
1619 r->_mp_size = sign ? - rn : rn;
1622 void
1623 mpz_init_set_d (mpz_t r, double x)
1625 mpz_init (r);
1626 mpz_set_d (r, x);
1629 double
1630 mpz_get_d (const mpz_t u)
1632 mp_size_t un;
1633 double x;
1634 double B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1636 un = GMP_ABS (u->_mp_size);
1638 if (un == 0)
1639 return 0.0;
1641 x = u->_mp_d[--un];
1642 while (un > 0)
1643 x = B*x + u->_mp_d[--un];
1645 if (u->_mp_size < 0)
1646 x = -x;
1648 return x;
1652 mpz_cmpabs_d (const mpz_t x, double d)
1654 mp_size_t xn;
1655 double B, Bi;
1656 mp_size_t i;
1658 xn = x->_mp_size;
1659 d = GMP_ABS (d);
1661 if (xn != 0)
1663 xn = GMP_ABS (xn);
1665 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1666 Bi = 1.0 / B;
1668 /* Scale d so it can be compared with the top limb. */
1669 for (i = 1; i < xn; i++)
1670 d *= Bi;
1672 if (d >= B)
1673 return -1;
1675 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1676 for (i = xn; i-- > 0;)
1678 mp_limb_t f, xl;
1680 f = (mp_limb_t) d;
1681 xl = x->_mp_d[i];
1682 if (xl > f)
1683 return 1;
1684 else if (xl < f)
1685 return -1;
1686 d = B * (d - f);
1689 return - (d > 0.0);
1693 mpz_cmp_d (const mpz_t x, double d)
1695 if (x->_mp_size < 0)
1697 if (d >= 0.0)
1698 return -1;
1699 else
1700 return -mpz_cmpabs_d (x, d);
1702 else
1704 if (d < 0.0)
1705 return 1;
1706 else
1707 return mpz_cmpabs_d (x, d);
1712 /* MPZ comparisons and the like. */
1714 mpz_sgn (const mpz_t u)
1716 mp_size_t usize = u->_mp_size;
1718 return (usize > 0) - (usize < 0);
1722 mpz_cmp_si (const mpz_t u, long v)
1724 mp_size_t usize = u->_mp_size;
1726 if (usize < -1)
1727 return -1;
1728 else if (v >= 0)
1729 return mpz_cmp_ui (u, v);
1730 else if (usize >= 0)
1731 return 1;
1732 else /* usize == -1 */
1734 mp_limb_t ul = u->_mp_d[0];
1735 if ((mp_limb_t)GMP_NEG_CAST (unsigned long int, v) < ul)
1736 return -1;
1737 else
1738 return (mp_limb_t)GMP_NEG_CAST (unsigned long int, v) > ul;
1743 mpz_cmp_ui (const mpz_t u, unsigned long v)
1745 mp_size_t usize = u->_mp_size;
1747 if (usize > 1)
1748 return 1;
1749 else if (usize < 0)
1750 return -1;
1751 else
1753 mp_limb_t ul = (usize > 0) ? u->_mp_d[0] : 0;
1754 return (ul > v) - (ul < v);
1759 mpz_cmp (const mpz_t a, const mpz_t b)
1761 mp_size_t asize = a->_mp_size;
1762 mp_size_t bsize = b->_mp_size;
1764 if (asize != bsize)
1765 return (asize < bsize) ? -1 : 1;
1766 else if (asize >= 0)
1767 return mpn_cmp (a->_mp_d, b->_mp_d, asize);
1768 else
1769 return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
1773 mpz_cmpabs_ui (const mpz_t u, unsigned long v)
1775 mp_size_t un = GMP_ABS (u->_mp_size);
1776 mp_limb_t ul;
1778 if (un > 1)
1779 return 1;
1781 ul = (un == 1) ? u->_mp_d[0] : 0;
1783 return (ul > v) - (ul < v);
1787 mpz_cmpabs (const mpz_t u, const mpz_t v)
1789 return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
1790 v->_mp_d, GMP_ABS (v->_mp_size));
1793 void
1794 mpz_abs (mpz_t r, const mpz_t u)
1796 mpz_set (r, u);
1797 r->_mp_size = GMP_ABS (r->_mp_size);
1800 void
1801 mpz_neg (mpz_t r, const mpz_t u)
1803 mpz_set (r, u);
1804 r->_mp_size = -r->_mp_size;
1807 void
1808 mpz_swap (mpz_t u, mpz_t v)
1810 MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
1811 MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
1812 MP_PTR_SWAP (u->_mp_d, v->_mp_d);
1816 /* MPZ addition and subtraction */
1818 /* Adds to the absolute value. Returns new size, but doesn't store it. */
1819 static mp_size_t
1820 mpz_abs_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1822 mp_size_t an;
1823 mp_ptr rp;
1824 mp_limb_t cy;
1826 an = GMP_ABS (a->_mp_size);
1827 if (an == 0)
1829 r->_mp_d[0] = b;
1830 return b > 0;
1833 rp = MPZ_REALLOC (r, an + 1);
1835 cy = mpn_add_1 (rp, a->_mp_d, an, b);
1836 rp[an] = cy;
1837 an += cy;
1839 return an;
1842 /* Subtract from the absolute value. Returns new size, (or -1 on underflow),
1843 but doesn't store it. */
1844 static mp_size_t
1845 mpz_abs_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1847 mp_size_t an = GMP_ABS (a->_mp_size);
1848 mp_ptr rp = MPZ_REALLOC (r, an);
1850 if (an == 0)
1852 rp[0] = b;
1853 return -(b > 0);
1855 else if (an == 1 && a->_mp_d[0] < b)
1857 rp[0] = b - a->_mp_d[0];
1858 return -1;
1860 else
1862 gmp_assert_nocarry (mpn_sub_1 (rp, a->_mp_d, an, b));
1863 return mpn_normalized_size (rp, an);
1867 void
1868 mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1870 if (a->_mp_size >= 0)
1871 r->_mp_size = mpz_abs_add_ui (r, a, b);
1872 else
1873 r->_mp_size = -mpz_abs_sub_ui (r, a, b);
1876 void
1877 mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1879 if (a->_mp_size < 0)
1880 r->_mp_size = -mpz_abs_add_ui (r, a, b);
1881 else
1882 r->_mp_size = mpz_abs_sub_ui (r, a, b);
1885 void
1886 mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
1888 if (b->_mp_size < 0)
1889 r->_mp_size = mpz_abs_add_ui (r, b, a);
1890 else
1891 r->_mp_size = -mpz_abs_sub_ui (r, b, a);
1894 static mp_size_t
1895 mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
1897 mp_size_t an = GMP_ABS (a->_mp_size);
1898 mp_size_t bn = GMP_ABS (b->_mp_size);
1899 mp_ptr rp;
1900 mp_limb_t cy;
1902 if (an < bn)
1904 MPZ_SRCPTR_SWAP (a, b);
1905 MP_SIZE_T_SWAP (an, bn);
1908 rp = MPZ_REALLOC (r, an + 1);
1909 cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
1911 rp[an] = cy;
1913 return an + cy;
1916 static mp_size_t
1917 mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
1919 mp_size_t an = GMP_ABS (a->_mp_size);
1920 mp_size_t bn = GMP_ABS (b->_mp_size);
1921 int cmp;
1922 mp_ptr rp;
1924 cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
1925 if (cmp > 0)
1927 rp = MPZ_REALLOC (r, an);
1928 gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
1929 return mpn_normalized_size (rp, an);
1931 else if (cmp < 0)
1933 rp = MPZ_REALLOC (r, bn);
1934 gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
1935 return -mpn_normalized_size (rp, bn);
1937 else
1938 return 0;
1941 void
1942 mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
1944 mp_size_t rn;
1946 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1947 rn = mpz_abs_add (r, a, b);
1948 else
1949 rn = mpz_abs_sub (r, a, b);
1951 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1954 void
1955 mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
1957 mp_size_t rn;
1959 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1960 rn = mpz_abs_sub (r, a, b);
1961 else
1962 rn = mpz_abs_add (r, a, b);
1964 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1968 /* MPZ multiplication */
1969 void
1970 mpz_mul_si (mpz_t r, const mpz_t u, long int v)
1972 if (v < 0)
1974 mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
1975 mpz_neg (r, r);
1977 else
1978 mpz_mul_ui (r, u, (unsigned long int) v);
1981 void
1982 mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
1984 mp_size_t un, us;
1985 mp_ptr tp;
1986 mp_limb_t cy;
1988 us = u->_mp_size;
1990 if (us == 0 || v == 0)
1992 r->_mp_size = 0;
1993 return;
1996 un = GMP_ABS (us);
1998 tp = MPZ_REALLOC (r, un + 1);
1999 cy = mpn_mul_1 (tp, u->_mp_d, un, v);
2000 tp[un] = cy;
2002 un += (cy > 0);
2003 r->_mp_size = (us < 0) ? - un : un;
2006 void
2007 mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
2009 int sign;
2010 mp_size_t un, vn, rn;
2011 mpz_t t;
2012 mp_ptr tp;
2014 un = u->_mp_size;
2015 vn = v->_mp_size;
2017 if (un == 0 || vn == 0)
2019 r->_mp_size = 0;
2020 return;
2023 sign = (un ^ vn) < 0;
2025 un = GMP_ABS (un);
2026 vn = GMP_ABS (vn);
2028 mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
2030 tp = t->_mp_d;
2031 if (un >= vn)
2032 mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
2033 else
2034 mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
2036 rn = un + vn;
2037 rn -= tp[rn-1] == 0;
2039 t->_mp_size = sign ? - rn : rn;
2040 mpz_swap (r, t);
2041 mpz_clear (t);
2044 void
2045 mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
2047 mp_size_t un, rn;
2048 mp_size_t limbs;
2049 unsigned shift;
2050 mp_ptr rp;
2052 un = GMP_ABS (u->_mp_size);
2053 if (un == 0)
2055 r->_mp_size = 0;
2056 return;
2059 limbs = bits / GMP_LIMB_BITS;
2060 shift = bits % GMP_LIMB_BITS;
2062 rn = un + limbs + (shift > 0);
2063 rp = MPZ_REALLOC (r, rn);
2064 if (shift > 0)
2066 mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
2067 rp[rn-1] = cy;
2068 rn -= (cy == 0);
2070 else
2071 mpn_copyd (rp + limbs, u->_mp_d, un);
2073 mpn_zero (rp, limbs);
2075 r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
2078 void
2079 mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2081 mpz_t t;
2082 mpz_init (t);
2083 mpz_mul_ui (t, u, v);
2084 mpz_add (r, r, t);
2085 mpz_clear (t);
2088 void
2089 mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2091 mpz_t t;
2092 mpz_init (t);
2093 mpz_mul_ui (t, u, v);
2094 mpz_sub (r, r, t);
2095 mpz_clear (t);
2098 void
2099 mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
2101 mpz_t t;
2102 mpz_init (t);
2103 mpz_mul (t, u, v);
2104 mpz_add (r, r, t);
2105 mpz_clear (t);
2108 void
2109 mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
2111 mpz_t t;
2112 mpz_init (t);
2113 mpz_mul (t, u, v);
2114 mpz_sub (r, r, t);
2115 mpz_clear (t);
2119 /* MPZ division */
2120 enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
2122 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2123 static int
2124 mpz_div_qr (mpz_t q, mpz_t r,
2125 const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
2127 mp_size_t ns, ds, nn, dn, qs;
2128 ns = n->_mp_size;
2129 ds = d->_mp_size;
2131 if (ds == 0)
2132 gmp_die("mpz_div_qr: Divide by zero.");
2134 if (ns == 0)
2136 if (q)
2137 q->_mp_size = 0;
2138 if (r)
2139 r->_mp_size = 0;
2140 return 0;
2143 nn = GMP_ABS (ns);
2144 dn = GMP_ABS (ds);
2146 qs = ds ^ ns;
2148 if (nn < dn)
2150 if (mode == GMP_DIV_CEIL && qs >= 0)
2152 /* q = 1, r = n - d */
2153 if (r)
2154 mpz_sub (r, n, d);
2155 if (q)
2156 mpz_set_ui (q, 1);
2158 else if (mode == GMP_DIV_FLOOR && qs < 0)
2160 /* q = -1, r = n + d */
2161 if (r)
2162 mpz_add (r, n, d);
2163 if (q)
2164 mpz_set_si (q, -1);
2166 else
2168 /* q = 0, r = d */
2169 if (r)
2170 mpz_set (r, n);
2171 if (q)
2172 q->_mp_size = 0;
2174 return 1;
2176 else
2178 mp_ptr np, qp;
2179 mp_size_t qn, rn;
2180 mpz_t tq, tr;
2182 mpz_init_set (tr, n);
2183 np = tr->_mp_d;
2185 qn = nn - dn + 1;
2187 if (q)
2189 mpz_init2 (tq, qn * GMP_LIMB_BITS);
2190 qp = tq->_mp_d;
2192 else
2193 qp = NULL;
2195 mpn_div_qr (qp, np, nn, d->_mp_d, dn);
2197 if (qp)
2199 qn -= (qp[qn-1] == 0);
2201 tq->_mp_size = qs < 0 ? -qn : qn;
2203 rn = mpn_normalized_size (np, dn);
2204 tr->_mp_size = ns < 0 ? - rn : rn;
2206 if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
2208 if (q)
2209 mpz_sub_ui (tq, tq, 1);
2210 if (r)
2211 mpz_add (tr, tr, d);
2213 else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
2215 if (q)
2216 mpz_add_ui (tq, tq, 1);
2217 if (r)
2218 mpz_sub (tr, tr, d);
2221 if (q)
2223 mpz_swap (tq, q);
2224 mpz_clear (tq);
2226 if (r)
2227 mpz_swap (tr, r);
2229 mpz_clear (tr);
2231 return rn != 0;
2235 void
2236 mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2238 mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
2241 void
2242 mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2244 mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
2247 void
2248 mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2250 mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
2253 void
2254 mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2256 mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
2259 void
2260 mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2262 mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
2265 void
2266 mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2268 mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
2271 void
2272 mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2274 mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
2277 void
2278 mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2280 mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
2283 void
2284 mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2286 mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
2289 void
2290 mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
2292 mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
2295 static void
2296 mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
2297 enum mpz_div_round_mode mode)
2299 mp_size_t un, qn;
2300 mp_size_t limb_cnt;
2301 mp_ptr qp;
2302 int adjust;
2304 un = u->_mp_size;
2305 if (un == 0)
2307 q->_mp_size = 0;
2308 return;
2310 limb_cnt = bit_index / GMP_LIMB_BITS;
2311 qn = GMP_ABS (un) - limb_cnt;
2312 bit_index %= GMP_LIMB_BITS;
2314 if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
2315 /* Note: Below, the final indexing at limb_cnt is valid because at
2316 that point we have qn > 0. */
2317 adjust = (qn <= 0
2318 || !mpn_zero_p (u->_mp_d, limb_cnt)
2319 || (u->_mp_d[limb_cnt]
2320 & (((mp_limb_t) 1 << bit_index) - 1)));
2321 else
2322 adjust = 0;
2324 if (qn <= 0)
2325 qn = 0;
2327 else
2329 qp = MPZ_REALLOC (q, qn);
2331 if (bit_index != 0)
2333 mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
2334 qn -= qp[qn - 1] == 0;
2336 else
2338 mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
2342 q->_mp_size = qn;
2344 if (adjust)
2345 mpz_add_ui (q, q, 1);
2346 if (un < 0)
2347 mpz_neg (q, q);
2350 static void
2351 mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
2352 enum mpz_div_round_mode mode)
2354 mp_size_t us, un, rn;
2355 mp_ptr rp;
2356 mp_limb_t mask;
2358 us = u->_mp_size;
2359 if (us == 0 || bit_index == 0)
2361 r->_mp_size = 0;
2362 return;
2364 rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
2365 assert (rn > 0);
2367 rp = MPZ_REALLOC (r, rn);
2368 un = GMP_ABS (us);
2370 mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
2372 if (rn > un)
2374 /* Quotient (with truncation) is zero, and remainder is
2375 non-zero */
2376 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2378 /* Have to negate and sign extend. */
2379 mp_size_t i;
2380 mp_limb_t cy;
2382 for (cy = 1, i = 0; i < un; i++)
2384 mp_limb_t s = ~u->_mp_d[i] + cy;
2385 cy = s < cy;
2386 rp[i] = s;
2388 assert (cy == 0);
2389 for (; i < rn - 1; i++)
2390 rp[i] = GMP_LIMB_MAX;
2392 rp[rn-1] = mask;
2393 us = -us;
2395 else
2397 /* Just copy */
2398 if (r != u)
2399 mpn_copyi (rp, u->_mp_d, un);
2401 rn = un;
2404 else
2406 if (r != u)
2407 mpn_copyi (rp, u->_mp_d, rn - 1);
2409 rp[rn-1] = u->_mp_d[rn-1] & mask;
2411 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2413 /* If r != 0, compute 2^{bit_count} - r. */
2414 mp_size_t i;
2416 for (i = 0; i < rn && rp[i] == 0; i++)
2418 if (i < rn)
2420 /* r > 0, need to flip sign. */
2421 rp[i] = ~rp[i] + 1;
2422 while (++i < rn)
2423 rp[i] = ~rp[i];
2425 rp[rn-1] &= mask;
2427 /* us is not used for anything else, so we can modify it
2428 here to indicate flipped sign. */
2429 us = -us;
2433 rn = mpn_normalized_size (rp, rn);
2434 r->_mp_size = us < 0 ? -rn : rn;
2437 void
2438 mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2440 mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
2443 void
2444 mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2446 mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
2449 void
2450 mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2452 mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
2455 void
2456 mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2458 mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
2461 void
2462 mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2464 mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
2467 void
2468 mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2470 mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
2473 void
2474 mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
2476 gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
2480 mpz_divisible_p (const mpz_t n, const mpz_t d)
2482 return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2486 mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
2488 mpz_t t;
2489 int res;
2491 /* a == b (mod 0) iff a == b */
2492 if (mpz_sgn (m) == 0)
2493 return (mpz_cmp (a, b) == 0);
2495 mpz_init (t);
2496 mpz_sub (t, a, b);
2497 res = mpz_divisible_p (t, m);
2498 mpz_clear (t);
2500 return res;
2503 static unsigned long
2504 mpz_div_qr_ui (mpz_t q, mpz_t r,
2505 const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
2507 mp_size_t ns, qn;
2508 mp_ptr qp;
2509 mp_limb_t rl;
2510 mp_size_t rs;
2512 ns = n->_mp_size;
2513 if (ns == 0)
2515 if (q)
2516 q->_mp_size = 0;
2517 if (r)
2518 r->_mp_size = 0;
2519 return 0;
2522 qn = GMP_ABS (ns);
2523 if (q)
2524 qp = MPZ_REALLOC (q, qn);
2525 else
2526 qp = NULL;
2528 rl = mpn_div_qr_1 (qp, n->_mp_d, qn, d);
2529 assert (rl < d);
2531 rs = rl > 0;
2532 rs = (ns < 0) ? -rs : rs;
2534 if (rl > 0 && ( (mode == GMP_DIV_FLOOR && ns < 0)
2535 || (mode == GMP_DIV_CEIL && ns >= 0)))
2537 if (q)
2538 gmp_assert_nocarry (mpn_add_1 (qp, qp, qn, 1));
2539 rl = d - rl;
2540 rs = -rs;
2543 if (r)
2545 r->_mp_d[0] = rl;
2546 r->_mp_size = rs;
2548 if (q)
2550 qn -= (qp[qn-1] == 0);
2551 assert (qn == 0 || qp[qn-1] > 0);
2553 q->_mp_size = (ns < 0) ? - qn : qn;
2556 return rl;
2559 unsigned long
2560 mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2562 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
2565 unsigned long
2566 mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2568 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
2571 unsigned long
2572 mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2574 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
2577 unsigned long
2578 mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2580 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
2583 unsigned long
2584 mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2586 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
2589 unsigned long
2590 mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2592 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
2595 unsigned long
2596 mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2598 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
2600 unsigned long
2601 mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2603 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2605 unsigned long
2606 mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2608 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
2611 unsigned long
2612 mpz_cdiv_ui (const mpz_t n, unsigned long d)
2614 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
2617 unsigned long
2618 mpz_fdiv_ui (const mpz_t n, unsigned long d)
2620 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
2623 unsigned long
2624 mpz_tdiv_ui (const mpz_t n, unsigned long d)
2626 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
2629 unsigned long
2630 mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
2632 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2635 void
2636 mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
2638 gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
2642 mpz_divisible_ui_p (const mpz_t n, unsigned long d)
2644 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2648 /* GCD */
2649 static mp_limb_t
2650 mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
2652 unsigned shift;
2654 assert ( (u | v) > 0);
2656 if (u == 0)
2657 return v;
2658 else if (v == 0)
2659 return u;
2661 gmp_ctz (shift, u | v);
2663 u >>= shift;
2664 v >>= shift;
2666 if ( (u & 1) == 0)
2667 MP_LIMB_T_SWAP (u, v);
2669 while ( (v & 1) == 0)
2670 v >>= 1;
2672 while (u != v)
2674 if (u > v)
2676 u -= v;
2678 u >>= 1;
2679 while ( (u & 1) == 0);
2681 else
2683 v -= u;
2685 v >>= 1;
2686 while ( (v & 1) == 0);
2689 return u << shift;
2692 unsigned long
2693 mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
2695 mp_size_t un;
2697 if (v == 0)
2699 if (g)
2700 mpz_abs (g, u);
2702 else
2704 un = GMP_ABS (u->_mp_size);
2705 if (un != 0)
2706 v = mpn_gcd_11 (mpn_div_qr_1 (NULL, u->_mp_d, un, v), v);
2708 if (g)
2709 mpz_set_ui (g, v);
2712 return v;
2715 static mp_bitcnt_t
2716 mpz_make_odd (mpz_t r)
2718 mp_bitcnt_t shift;
2720 assert (r->_mp_size > 0);
2721 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2722 shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
2723 mpz_tdiv_q_2exp (r, r, shift);
2725 return shift;
2728 void
2729 mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
2731 mpz_t tu, tv;
2732 mp_bitcnt_t uz, vz, gz;
2734 if (u->_mp_size == 0)
2736 mpz_abs (g, v);
2737 return;
2739 if (v->_mp_size == 0)
2741 mpz_abs (g, u);
2742 return;
2745 mpz_init (tu);
2746 mpz_init (tv);
2748 mpz_abs (tu, u);
2749 uz = mpz_make_odd (tu);
2750 mpz_abs (tv, v);
2751 vz = mpz_make_odd (tv);
2752 gz = GMP_MIN (uz, vz);
2754 if (tu->_mp_size < tv->_mp_size)
2755 mpz_swap (tu, tv);
2757 mpz_tdiv_r (tu, tu, tv);
2758 if (tu->_mp_size == 0)
2760 mpz_swap (g, tv);
2762 else
2763 for (;;)
2765 int c;
2767 mpz_make_odd (tu);
2768 c = mpz_cmp (tu, tv);
2769 if (c == 0)
2771 mpz_swap (g, tu);
2772 break;
2774 if (c < 0)
2775 mpz_swap (tu, tv);
2777 if (tv->_mp_size == 1)
2779 mp_limb_t vl = tv->_mp_d[0];
2780 mp_limb_t ul = mpz_tdiv_ui (tu, vl);
2781 mpz_set_ui (g, mpn_gcd_11 (ul, vl));
2782 break;
2784 mpz_sub (tu, tu, tv);
2786 mpz_clear (tu);
2787 mpz_clear (tv);
2788 mpz_mul_2exp (g, g, gz);
2791 void
2792 mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
2794 mpz_t tu, tv, s0, s1, t0, t1;
2795 mp_bitcnt_t uz, vz, gz;
2796 mp_bitcnt_t power;
2798 if (u->_mp_size == 0)
2800 /* g = 0 u + sgn(v) v */
2801 signed long sign = mpz_sgn (v);
2802 mpz_abs (g, v);
2803 if (s)
2804 mpz_set_ui (s, 0);
2805 if (t)
2806 mpz_set_si (t, sign);
2807 return;
2810 if (v->_mp_size == 0)
2812 /* g = sgn(u) u + 0 v */
2813 signed long sign = mpz_sgn (u);
2814 mpz_abs (g, u);
2815 if (s)
2816 mpz_set_si (s, sign);
2817 if (t)
2818 mpz_set_ui (t, 0);
2819 return;
2822 mpz_init (tu);
2823 mpz_init (tv);
2824 mpz_init (s0);
2825 mpz_init (s1);
2826 mpz_init (t0);
2827 mpz_init (t1);
2829 mpz_abs (tu, u);
2830 uz = mpz_make_odd (tu);
2831 mpz_abs (tv, v);
2832 vz = mpz_make_odd (tv);
2833 gz = GMP_MIN (uz, vz);
2835 uz -= gz;
2836 vz -= gz;
2838 /* Cofactors corresponding to odd gcd. gz handled later. */
2839 if (tu->_mp_size < tv->_mp_size)
2841 mpz_swap (tu, tv);
2842 MPZ_SRCPTR_SWAP (u, v);
2843 MPZ_PTR_SWAP (s, t);
2844 MP_BITCNT_T_SWAP (uz, vz);
2847 /* Maintain
2849 * u = t0 tu + t1 tv
2850 * v = s0 tu + s1 tv
2852 * where u and v denote the inputs with common factors of two
2853 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2855 * 2^p tu = s1 u - t1 v
2856 * 2^p tv = -s0 u + t0 v
2859 /* After initial division, tu = q tv + tu', we have
2861 * u = 2^uz (tu' + q tv)
2862 * v = 2^vz tv
2864 * or
2866 * t0 = 2^uz, t1 = 2^uz q
2867 * s0 = 0, s1 = 2^vz
2870 mpz_setbit (t0, uz);
2871 mpz_tdiv_qr (t1, tu, tu, tv);
2872 mpz_mul_2exp (t1, t1, uz);
2874 mpz_setbit (s1, vz);
2875 power = uz + vz;
2877 if (tu->_mp_size > 0)
2879 mp_bitcnt_t shift;
2880 shift = mpz_make_odd (tu);
2881 mpz_mul_2exp (t0, t0, shift);
2882 mpz_mul_2exp (s0, s0, shift);
2883 power += shift;
2885 for (;;)
2887 int c;
2888 c = mpz_cmp (tu, tv);
2889 if (c == 0)
2890 break;
2892 if (c < 0)
2894 /* tv = tv' + tu
2896 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2897 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2899 mpz_sub (tv, tv, tu);
2900 mpz_add (t0, t0, t1);
2901 mpz_add (s0, s0, s1);
2903 shift = mpz_make_odd (tv);
2904 mpz_mul_2exp (t1, t1, shift);
2905 mpz_mul_2exp (s1, s1, shift);
2907 else
2909 mpz_sub (tu, tu, tv);
2910 mpz_add (t1, t0, t1);
2911 mpz_add (s1, s0, s1);
2913 shift = mpz_make_odd (tu);
2914 mpz_mul_2exp (t0, t0, shift);
2915 mpz_mul_2exp (s0, s0, shift);
2917 power += shift;
2921 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2922 cofactors. */
2924 mpz_mul_2exp (tv, tv, gz);
2925 mpz_neg (s0, s0);
2927 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2928 adjust cofactors, we need u / g and v / g */
2930 mpz_divexact (s1, v, tv);
2931 mpz_abs (s1, s1);
2932 mpz_divexact (t1, u, tv);
2933 mpz_abs (t1, t1);
2935 while (power-- > 0)
2937 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2938 if (mpz_odd_p (s0) || mpz_odd_p (t0))
2940 mpz_sub (s0, s0, s1);
2941 mpz_add (t0, t0, t1);
2943 mpz_divexact_ui (s0, s0, 2);
2944 mpz_divexact_ui (t0, t0, 2);
2947 /* Arrange so that |s| < |u| / 2g */
2948 mpz_add (s1, s0, s1);
2949 if (mpz_cmpabs (s0, s1) > 0)
2951 mpz_swap (s0, s1);
2952 mpz_sub (t0, t0, t1);
2954 if (u->_mp_size < 0)
2955 mpz_neg (s0, s0);
2956 if (v->_mp_size < 0)
2957 mpz_neg (t0, t0);
2959 mpz_swap (g, tv);
2960 if (s)
2961 mpz_swap (s, s0);
2962 if (t)
2963 mpz_swap (t, t0);
2965 mpz_clear (tu);
2966 mpz_clear (tv);
2967 mpz_clear (s0);
2968 mpz_clear (s1);
2969 mpz_clear (t0);
2970 mpz_clear (t1);
2973 void
2974 mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
2976 mpz_t g;
2978 if (u->_mp_size == 0 || v->_mp_size == 0)
2980 r->_mp_size = 0;
2981 return;
2984 mpz_init (g);
2986 mpz_gcd (g, u, v);
2987 mpz_divexact (g, u, g);
2988 mpz_mul (r, g, v);
2990 mpz_clear (g);
2991 mpz_abs (r, r);
2994 void
2995 mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
2997 if (v == 0 || u->_mp_size == 0)
2999 r->_mp_size = 0;
3000 return;
3003 v /= mpz_gcd_ui (NULL, u, v);
3004 mpz_mul_ui (r, u, v);
3006 mpz_abs (r, r);
3010 mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
3012 mpz_t g, tr;
3013 int invertible;
3015 if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
3016 return 0;
3018 mpz_init (g);
3019 mpz_init (tr);
3021 mpz_gcdext (g, tr, NULL, u, m);
3022 invertible = (mpz_cmp_ui (g, 1) == 0);
3024 if (invertible)
3026 if (tr->_mp_size < 0)
3028 if (m->_mp_size >= 0)
3029 mpz_add (tr, tr, m);
3030 else
3031 mpz_sub (tr, tr, m);
3033 mpz_swap (r, tr);
3036 mpz_clear (g);
3037 mpz_clear (tr);
3038 return invertible;
3042 /* Higher level operations (sqrt, pow and root) */
3044 void
3045 mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
3047 unsigned long bit;
3048 mpz_t tr;
3049 mpz_init_set_ui (tr, 1);
3051 bit = GMP_ULONG_HIGHBIT;
3054 mpz_mul (tr, tr, tr);
3055 if (e & bit)
3056 mpz_mul (tr, tr, b);
3057 bit >>= 1;
3059 while (bit > 0);
3061 mpz_swap (r, tr);
3062 mpz_clear (tr);
3065 void
3066 mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
3068 mpz_t b;
3069 mpz_pow_ui (r, mpz_roinit_n (b, &blimb, 1), e);
3072 void
3073 mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
3075 mpz_t tr;
3076 mpz_t base;
3077 mp_size_t en, mn;
3078 mp_srcptr mp;
3079 struct gmp_div_inverse minv;
3080 unsigned shift;
3081 mp_ptr tp = NULL;
3083 en = GMP_ABS (e->_mp_size);
3084 mn = GMP_ABS (m->_mp_size);
3085 if (mn == 0)
3086 gmp_die ("mpz_powm: Zero modulo.");
3088 if (en == 0)
3090 mpz_set_ui (r, 1);
3091 return;
3094 mp = m->_mp_d;
3095 mpn_div_qr_invert (&minv, mp, mn);
3096 shift = minv.shift;
3098 if (shift > 0)
3100 /* To avoid shifts, we do all our reductions, except the final
3101 one, using a *normalized* m. */
3102 minv.shift = 0;
3104 tp = gmp_xalloc_limbs (mn);
3105 gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
3106 mp = tp;
3109 mpz_init (base);
3111 if (e->_mp_size < 0)
3113 if (!mpz_invert (base, b, m))
3114 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3116 else
3118 mp_size_t bn;
3119 mpz_abs (base, b);
3121 bn = base->_mp_size;
3122 if (bn >= mn)
3124 mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
3125 bn = mn;
3128 /* We have reduced the absolute value. Now take care of the
3129 sign. Note that we get zero represented non-canonically as
3130 m. */
3131 if (b->_mp_size < 0)
3133 mp_ptr bp = MPZ_REALLOC (base, mn);
3134 gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
3135 bn = mn;
3137 base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
3139 mpz_init_set_ui (tr, 1);
3141 while (--en >= 0)
3143 mp_limb_t w = e->_mp_d[en];
3144 mp_limb_t bit;
3146 bit = GMP_LIMB_HIGHBIT;
3149 mpz_mul (tr, tr, tr);
3150 if (w & bit)
3151 mpz_mul (tr, tr, base);
3152 if (tr->_mp_size > mn)
3154 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3155 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3157 bit >>= 1;
3159 while (bit > 0);
3162 /* Final reduction */
3163 if (tr->_mp_size >= mn)
3165 minv.shift = shift;
3166 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3167 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3169 if (tp)
3170 gmp_free (tp);
3172 mpz_swap (r, tr);
3173 mpz_clear (tr);
3174 mpz_clear (base);
3177 void
3178 mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
3180 mpz_t e;
3181 mpz_powm (r, b, mpz_roinit_n (e, &elimb, 1), m);
3184 /* x=trunc(y^(1/z)), r=y-x^z */
3185 void
3186 mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
3188 int sgn;
3189 mpz_t t, u;
3191 sgn = y->_mp_size < 0;
3192 if ((~z & sgn) != 0)
3193 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3194 if (z == 0)
3195 gmp_die ("mpz_rootrem: Zeroth root.");
3197 if (mpz_cmpabs_ui (y, 1) <= 0) {
3198 if (x)
3199 mpz_set (x, y);
3200 if (r)
3201 r->_mp_size = 0;
3202 return;
3205 mpz_init (u);
3207 mp_bitcnt_t tb;
3208 tb = mpz_sizeinbase (y, 2) / z + 1;
3209 mpz_init2 (t, tb + 1);
3210 mpz_setbit (t, tb);
3213 if (z == 2) /* simplify sqrt loop: z-1 == 1 */
3214 do {
3215 mpz_swap (u, t); /* u = x */
3216 mpz_tdiv_q (t, y, u); /* t = y/x */
3217 mpz_add (t, t, u); /* t = y/x + x */
3218 mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */
3219 } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
3220 else /* z != 2 */ {
3221 mpz_t v;
3223 mpz_init (v);
3224 if (sgn)
3225 mpz_neg (t, t);
3227 do {
3228 mpz_swap (u, t); /* u = x */
3229 mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */
3230 mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */
3231 mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */
3232 mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */
3233 mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */
3234 } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
3236 mpz_clear (v);
3239 if (r) {
3240 mpz_pow_ui (t, u, z);
3241 mpz_sub (r, y, t);
3243 if (x)
3244 mpz_swap (x, u);
3245 mpz_clear (u);
3246 mpz_clear (t);
3250 mpz_root (mpz_t x, const mpz_t y, unsigned long z)
3252 int res;
3253 mpz_t r;
3255 mpz_init (r);
3256 mpz_rootrem (x, r, y, z);
3257 res = r->_mp_size == 0;
3258 mpz_clear (r);
3260 return res;
3263 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3264 void
3265 mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
3267 mpz_rootrem (s, r, u, 2);
3270 void
3271 mpz_sqrt (mpz_t s, const mpz_t u)
3273 mpz_rootrem (s, NULL, u, 2);
3277 mpz_perfect_square_p (const mpz_t u)
3279 if (u->_mp_size <= 0)
3280 return (u->_mp_size == 0);
3281 else
3282 return mpz_root (NULL, u, 2);
3286 mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
3288 mpz_t t;
3290 assert (n > 0);
3291 assert (p [n-1] != 0);
3292 return mpz_root (NULL, mpz_roinit_n (t, p, n), 2);
3295 mp_size_t
3296 mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
3298 mpz_t s, r, u;
3299 mp_size_t res;
3301 assert (n > 0);
3302 assert (p [n-1] != 0);
3304 mpz_init (r);
3305 mpz_init (s);
3306 mpz_rootrem (s, r, mpz_roinit_n (u, p, n), 2);
3308 assert (s->_mp_size == (n+1)/2);
3309 mpn_copyd (sp, s->_mp_d, s->_mp_size);
3310 mpz_clear (s);
3311 res = r->_mp_size;
3312 if (rp)
3313 mpn_copyd (rp, r->_mp_d, res);
3314 mpz_clear (r);
3315 return res;
3318 /* Combinatorics */
3320 void
3321 mpz_fac_ui (mpz_t x, unsigned long n)
3323 mpz_set_ui (x, n + (n == 0));
3324 while (n > 2)
3325 mpz_mul_ui (x, x, --n);
3328 void
3329 mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
3331 mpz_t t;
3333 mpz_set_ui (r, k <= n);
3335 if (k > (n >> 1))
3336 k = (k <= n) ? n - k : 0;
3338 mpz_init (t);
3339 mpz_fac_ui (t, k);
3341 for (; k > 0; k--)
3342 mpz_mul_ui (r, r, n--);
3344 mpz_divexact (r, r, t);
3345 mpz_clear (t);
3349 /* Primality testing */
3350 static int
3351 gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
3352 const mpz_t q, mp_bitcnt_t k)
3354 assert (k > 0);
3356 /* Caller must initialize y to the base. */
3357 mpz_powm (y, y, q, n);
3359 if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
3360 return 1;
3362 while (--k > 0)
3364 mpz_powm_ui (y, y, 2, n);
3365 if (mpz_cmp (y, nm1) == 0)
3366 return 1;
3367 /* y == 1 means that the previous y was a non-trivial square root
3368 of 1 (mod n). y == 0 means that n is a power of the base.
3369 In either case, n is not prime. */
3370 if (mpz_cmp_ui (y, 1) <= 0)
3371 return 0;
3373 return 0;
3376 /* This product is 0xc0cfd797, and fits in 32 bits. */
3377 #define GMP_PRIME_PRODUCT \
3378 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3380 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3381 #define GMP_PRIME_MASK 0xc96996dcUL
3384 mpz_probab_prime_p (const mpz_t n, int reps)
3386 mpz_t nm1;
3387 mpz_t q;
3388 mpz_t y;
3389 mp_bitcnt_t k;
3390 int is_prime;
3391 int j;
3393 /* Note that we use the absolute value of n only, for compatibility
3394 with the real GMP. */
3395 if (mpz_even_p (n))
3396 return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
3398 /* Above test excludes n == 0 */
3399 assert (n->_mp_size != 0);
3401 if (mpz_cmpabs_ui (n, 64) < 0)
3402 return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
3404 if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
3405 return 0;
3407 /* All prime factors are >= 31. */
3408 if (mpz_cmpabs_ui (n, 31*31) < 0)
3409 return 2;
3411 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3412 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3413 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3414 30 (a[30] == 971 > 31*31 == 961). */
3416 mpz_init (nm1);
3417 mpz_init (q);
3418 mpz_init (y);
3420 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3421 nm1->_mp_size = mpz_abs_sub_ui (nm1, n, 1);
3422 k = mpz_scan1 (nm1, 0);
3423 mpz_tdiv_q_2exp (q, nm1, k);
3425 for (j = 0, is_prime = 1; is_prime & (j < reps); j++)
3427 mpz_set_ui (y, (unsigned long) j*j+j+41);
3428 if (mpz_cmp (y, nm1) >= 0)
3430 /* Don't try any further bases. This "early" break does not affect
3431 the result for any reasonable reps value (<=5000 was tested) */
3432 assert (j >= 30);
3433 break;
3435 is_prime = gmp_millerrabin (n, nm1, y, q, k);
3437 mpz_clear (nm1);
3438 mpz_clear (q);
3439 mpz_clear (y);
3441 return is_prime;
3445 /* Logical operations and bit manipulation. */
3447 /* Numbers are treated as if represented in two's complement (and
3448 infinitely sign extended). For a negative values we get the two's
3449 complement from -x = ~x + 1, where ~ is bitwise complement.
3450 Negation transforms
3452 xxxx10...0
3454 into
3456 yyyy10...0
3458 where yyyy is the bitwise complement of xxxx. So least significant
3459 bits, up to and including the first one bit, are unchanged, and
3460 the more significant bits are all complemented.
3462 To change a bit from zero to one in a negative number, subtract the
3463 corresponding power of two from the absolute value. This can never
3464 underflow. To change a bit from one to zero, add the corresponding
3465 power of two, and this might overflow. E.g., if x = -001111, the
3466 two's complement is 110001. Clearing the least significant bit, we
3467 get two's complement 110000, and -010000. */
3470 mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
3472 mp_size_t limb_index;
3473 unsigned shift;
3474 mp_size_t ds;
3475 mp_size_t dn;
3476 mp_limb_t w;
3477 int bit;
3479 ds = d->_mp_size;
3480 dn = GMP_ABS (ds);
3481 limb_index = bit_index / GMP_LIMB_BITS;
3482 if (limb_index >= dn)
3483 return ds < 0;
3485 shift = bit_index % GMP_LIMB_BITS;
3486 w = d->_mp_d[limb_index];
3487 bit = (w >> shift) & 1;
3489 if (ds < 0)
3491 /* d < 0. Check if any of the bits below is set: If so, our bit
3492 must be complemented. */
3493 if (shift > 0 && (w << (GMP_LIMB_BITS - shift)) > 0)
3494 return bit ^ 1;
3495 while (--limb_index >= 0)
3496 if (d->_mp_d[limb_index] > 0)
3497 return bit ^ 1;
3499 return bit;
3502 static void
3503 mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
3505 mp_size_t dn, limb_index;
3506 mp_limb_t bit;
3507 mp_ptr dp;
3509 dn = GMP_ABS (d->_mp_size);
3511 limb_index = bit_index / GMP_LIMB_BITS;
3512 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3514 if (limb_index >= dn)
3516 mp_size_t i;
3517 /* The bit should be set outside of the end of the number.
3518 We have to increase the size of the number. */
3519 dp = MPZ_REALLOC (d, limb_index + 1);
3521 dp[limb_index] = bit;
3522 for (i = dn; i < limb_index; i++)
3523 dp[i] = 0;
3524 dn = limb_index + 1;
3526 else
3528 mp_limb_t cy;
3530 dp = d->_mp_d;
3532 cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
3533 if (cy > 0)
3535 dp = MPZ_REALLOC (d, dn + 1);
3536 dp[dn++] = cy;
3540 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3543 static void
3544 mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
3546 mp_size_t dn, limb_index;
3547 mp_ptr dp;
3548 mp_limb_t bit;
3550 dn = GMP_ABS (d->_mp_size);
3551 dp = d->_mp_d;
3553 limb_index = bit_index / GMP_LIMB_BITS;
3554 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3556 assert (limb_index < dn);
3558 gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
3559 dn - limb_index, bit));
3560 dn = mpn_normalized_size (dp, dn);
3561 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3564 void
3565 mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
3567 if (!mpz_tstbit (d, bit_index))
3569 if (d->_mp_size >= 0)
3570 mpz_abs_add_bit (d, bit_index);
3571 else
3572 mpz_abs_sub_bit (d, bit_index);
3576 void
3577 mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
3579 if (mpz_tstbit (d, bit_index))
3581 if (d->_mp_size >= 0)
3582 mpz_abs_sub_bit (d, bit_index);
3583 else
3584 mpz_abs_add_bit (d, bit_index);
3588 void
3589 mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
3591 if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
3592 mpz_abs_sub_bit (d, bit_index);
3593 else
3594 mpz_abs_add_bit (d, bit_index);
3597 void
3598 mpz_com (mpz_t r, const mpz_t u)
3600 mpz_neg (r, u);
3601 mpz_sub_ui (r, r, 1);
3604 void
3605 mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
3607 mp_size_t un, vn, rn, i;
3608 mp_ptr up, vp, rp;
3610 mp_limb_t ux, vx, rx;
3611 mp_limb_t uc, vc, rc;
3612 mp_limb_t ul, vl, rl;
3614 un = GMP_ABS (u->_mp_size);
3615 vn = GMP_ABS (v->_mp_size);
3616 if (un < vn)
3618 MPZ_SRCPTR_SWAP (u, v);
3619 MP_SIZE_T_SWAP (un, vn);
3621 if (vn == 0)
3623 r->_mp_size = 0;
3624 return;
3627 uc = u->_mp_size < 0;
3628 vc = v->_mp_size < 0;
3629 rc = uc & vc;
3631 ux = -uc;
3632 vx = -vc;
3633 rx = -rc;
3635 /* If the smaller input is positive, higher limbs don't matter. */
3636 rn = vx ? un : vn;
3638 rp = MPZ_REALLOC (r, rn + rc);
3640 up = u->_mp_d;
3641 vp = v->_mp_d;
3643 i = 0;
3646 ul = (up[i] ^ ux) + uc;
3647 uc = ul < uc;
3649 vl = (vp[i] ^ vx) + vc;
3650 vc = vl < vc;
3652 rl = ( (ul & vl) ^ rx) + rc;
3653 rc = rl < rc;
3654 rp[i] = rl;
3656 while (++i < vn);
3657 assert (vc == 0);
3659 for (; i < rn; i++)
3661 ul = (up[i] ^ ux) + uc;
3662 uc = ul < uc;
3664 rl = ( (ul & vx) ^ rx) + rc;
3665 rc = rl < rc;
3666 rp[i] = rl;
3668 if (rc)
3669 rp[rn++] = rc;
3670 else
3671 rn = mpn_normalized_size (rp, rn);
3673 r->_mp_size = rx ? -rn : rn;
3676 void
3677 mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
3679 mp_size_t un, vn, rn, i;
3680 mp_ptr up, vp, rp;
3682 mp_limb_t ux, vx, rx;
3683 mp_limb_t uc, vc, rc;
3684 mp_limb_t ul, vl, rl;
3686 un = GMP_ABS (u->_mp_size);
3687 vn = GMP_ABS (v->_mp_size);
3688 if (un < vn)
3690 MPZ_SRCPTR_SWAP (u, v);
3691 MP_SIZE_T_SWAP (un, vn);
3693 if (vn == 0)
3695 mpz_set (r, u);
3696 return;
3699 uc = u->_mp_size < 0;
3700 vc = v->_mp_size < 0;
3701 rc = uc | vc;
3703 ux = -uc;
3704 vx = -vc;
3705 rx = -rc;
3707 /* If the smaller input is negative, by sign extension higher limbs
3708 don't matter. */
3709 rn = vx ? vn : un;
3711 rp = MPZ_REALLOC (r, rn + rc);
3713 up = u->_mp_d;
3714 vp = v->_mp_d;
3716 i = 0;
3719 ul = (up[i] ^ ux) + uc;
3720 uc = ul < uc;
3722 vl = (vp[i] ^ vx) + vc;
3723 vc = vl < vc;
3725 rl = ( (ul | vl) ^ rx) + rc;
3726 rc = rl < rc;
3727 rp[i] = rl;
3729 while (++i < vn);
3730 assert (vc == 0);
3732 for (; i < rn; i++)
3734 ul = (up[i] ^ ux) + uc;
3735 uc = ul < uc;
3737 rl = ( (ul | vx) ^ rx) + rc;
3738 rc = rl < rc;
3739 rp[i] = rl;
3741 if (rc)
3742 rp[rn++] = rc;
3743 else
3744 rn = mpn_normalized_size (rp, rn);
3746 r->_mp_size = rx ? -rn : rn;
3749 void
3750 mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
3752 mp_size_t un, vn, i;
3753 mp_ptr up, vp, rp;
3755 mp_limb_t ux, vx, rx;
3756 mp_limb_t uc, vc, rc;
3757 mp_limb_t ul, vl, rl;
3759 un = GMP_ABS (u->_mp_size);
3760 vn = GMP_ABS (v->_mp_size);
3761 if (un < vn)
3763 MPZ_SRCPTR_SWAP (u, v);
3764 MP_SIZE_T_SWAP (un, vn);
3766 if (vn == 0)
3768 mpz_set (r, u);
3769 return;
3772 uc = u->_mp_size < 0;
3773 vc = v->_mp_size < 0;
3774 rc = uc ^ vc;
3776 ux = -uc;
3777 vx = -vc;
3778 rx = -rc;
3780 rp = MPZ_REALLOC (r, un + rc);
3782 up = u->_mp_d;
3783 vp = v->_mp_d;
3785 i = 0;
3788 ul = (up[i] ^ ux) + uc;
3789 uc = ul < uc;
3791 vl = (vp[i] ^ vx) + vc;
3792 vc = vl < vc;
3794 rl = (ul ^ vl ^ rx) + rc;
3795 rc = rl < rc;
3796 rp[i] = rl;
3798 while (++i < vn);
3799 assert (vc == 0);
3801 for (; i < un; i++)
3803 ul = (up[i] ^ ux) + uc;
3804 uc = ul < uc;
3806 rl = (ul ^ ux) + rc;
3807 rc = rl < rc;
3808 rp[i] = rl;
3810 if (rc)
3811 rp[un++] = rc;
3812 else
3813 un = mpn_normalized_size (rp, un);
3815 r->_mp_size = rx ? -un : un;
3818 static unsigned
3819 gmp_popcount_limb (mp_limb_t x)
3821 unsigned c;
3823 /* Do 16 bits at a time, to avoid limb-sized constants. */
3824 for (c = 0; x > 0; x >>= 16)
3826 unsigned w = ((x >> 1) & 0x5555) + (x & 0x5555);
3827 w = ((w >> 2) & 0x3333) + (w & 0x3333);
3828 w = ((w >> 4) & 0x0f0f) + (w & 0x0f0f);
3829 w = (w >> 8) + (w & 0x00ff);
3830 c += w;
3832 return c;
3835 mp_bitcnt_t
3836 mpn_popcount (mp_srcptr p, mp_size_t n)
3838 mp_size_t i;
3839 mp_bitcnt_t c;
3841 for (c = 0, i = 0; i < n; i++)
3842 c += gmp_popcount_limb (p[i]);
3844 return c;
3847 mp_bitcnt_t
3848 mpz_popcount (const mpz_t u)
3850 mp_size_t un;
3852 un = u->_mp_size;
3854 if (un < 0)
3855 return ~(mp_bitcnt_t) 0;
3857 return mpn_popcount (u->_mp_d, un);
3860 mp_bitcnt_t
3861 mpz_hamdist (const mpz_t u, const mpz_t v)
3863 mp_size_t un, vn, i;
3864 mp_limb_t uc, vc, ul, vl, comp;
3865 mp_srcptr up, vp;
3866 mp_bitcnt_t c;
3868 un = u->_mp_size;
3869 vn = v->_mp_size;
3871 if ( (un ^ vn) < 0)
3872 return ~(mp_bitcnt_t) 0;
3874 comp = - (uc = vc = (un < 0));
3875 if (uc)
3877 assert (vn < 0);
3878 un = -un;
3879 vn = -vn;
3882 up = u->_mp_d;
3883 vp = v->_mp_d;
3885 if (un < vn)
3886 MPN_SRCPTR_SWAP (up, un, vp, vn);
3888 for (i = 0, c = 0; i < vn; i++)
3890 ul = (up[i] ^ comp) + uc;
3891 uc = ul < uc;
3893 vl = (vp[i] ^ comp) + vc;
3894 vc = vl < vc;
3896 c += gmp_popcount_limb (ul ^ vl);
3898 assert (vc == 0);
3900 for (; i < un; i++)
3902 ul = (up[i] ^ comp) + uc;
3903 uc = ul < uc;
3905 c += gmp_popcount_limb (ul ^ comp);
3908 return c;
3911 mp_bitcnt_t
3912 mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
3914 mp_ptr up;
3915 mp_size_t us, un, i;
3916 mp_limb_t limb, ux;
3918 us = u->_mp_size;
3919 un = GMP_ABS (us);
3920 i = starting_bit / GMP_LIMB_BITS;
3922 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
3923 for u<0. Notice this test picks up any u==0 too. */
3924 if (i >= un)
3925 return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
3927 up = u->_mp_d;
3928 ux = 0;
3929 limb = up[i];
3931 if (starting_bit != 0)
3933 if (us < 0)
3935 ux = mpn_zero_p (up, i);
3936 limb = ~ limb + ux;
3937 ux = - (mp_limb_t) (limb >= ux);
3940 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
3941 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3944 return mpn_common_scan (limb, i, up, un, ux);
3947 mp_bitcnt_t
3948 mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
3950 mp_ptr up;
3951 mp_size_t us, un, i;
3952 mp_limb_t limb, ux;
3954 us = u->_mp_size;
3955 ux = - (mp_limb_t) (us >= 0);
3956 un = GMP_ABS (us);
3957 i = starting_bit / GMP_LIMB_BITS;
3959 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
3960 u<0. Notice this test picks up all cases of u==0 too. */
3961 if (i >= un)
3962 return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
3964 up = u->_mp_d;
3965 limb = up[i] ^ ux;
3967 if (ux == 0)
3968 limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
3970 /* Mask all bits before starting_bit, thus ignoring them. */
3971 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3973 return mpn_common_scan (limb, i, up, un, ux);
3977 /* MPZ base conversion. */
3979 size_t
3980 mpz_sizeinbase (const mpz_t u, int base)
3982 mp_size_t un;
3983 mp_srcptr up;
3984 mp_ptr tp;
3985 mp_bitcnt_t bits;
3986 struct gmp_div_inverse bi;
3987 size_t ndigits;
3989 assert (base >= 2);
3990 assert (base <= 36);
3992 un = GMP_ABS (u->_mp_size);
3993 if (un == 0)
3994 return 1;
3996 up = u->_mp_d;
3998 bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
3999 switch (base)
4001 case 2:
4002 return bits;
4003 case 4:
4004 return (bits + 1) / 2;
4005 case 8:
4006 return (bits + 2) / 3;
4007 case 16:
4008 return (bits + 3) / 4;
4009 case 32:
4010 return (bits + 4) / 5;
4011 /* FIXME: Do something more clever for the common case of base
4012 10. */
4015 tp = gmp_xalloc_limbs (un);
4016 mpn_copyi (tp, up, un);
4017 mpn_div_qr_1_invert (&bi, base);
4019 ndigits = 0;
4022 ndigits++;
4023 mpn_div_qr_1_preinv (tp, tp, un, &bi);
4024 un -= (tp[un-1] == 0);
4026 while (un > 0);
4028 gmp_free (tp);
4029 return ndigits;
4032 char *
4033 mpz_get_str (char *sp, int base, const mpz_t u)
4035 unsigned bits;
4036 const char *digits;
4037 mp_size_t un;
4038 size_t i, sn;
4040 if (base >= 0)
4042 digits = "0123456789abcdefghijklmnopqrstuvwxyz";
4044 else
4046 base = -base;
4047 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
4049 if (base <= 1)
4050 base = 10;
4051 if (base > 36)
4052 return NULL;
4054 sn = 1 + mpz_sizeinbase (u, base);
4055 if (!sp)
4056 sp = (char *) gmp_xalloc (1 + sn);
4058 un = GMP_ABS (u->_mp_size);
4060 if (un == 0)
4062 sp[0] = '0';
4063 sp[1] = '\0';
4064 return sp;
4067 i = 0;
4069 if (u->_mp_size < 0)
4070 sp[i++] = '-';
4072 bits = mpn_base_power_of_two_p (base);
4074 if (bits)
4075 /* Not modified in this case. */
4076 sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
4077 else
4079 struct mpn_base_info info;
4080 mp_ptr tp;
4082 mpn_get_base_info (&info, base);
4083 tp = gmp_xalloc_limbs (un);
4084 mpn_copyi (tp, u->_mp_d, un);
4086 sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
4087 gmp_free (tp);
4090 for (; i < sn; i++)
4091 sp[i] = digits[(unsigned char) sp[i]];
4093 sp[sn] = '\0';
4094 return sp;
4098 mpz_set_str (mpz_t r, const char *sp, int base)
4100 unsigned bits;
4101 mp_size_t rn, alloc;
4102 mp_ptr rp;
4103 size_t sn;
4104 int sign;
4105 unsigned char *dp;
4107 assert (base == 0 || (base >= 2 && base <= 36));
4109 while (isspace( (unsigned char) *sp))
4110 sp++;
4112 sign = (*sp == '-');
4113 sp += sign;
4115 if (base == 0)
4117 if (*sp == '0')
4119 sp++;
4120 if (*sp == 'x' || *sp == 'X')
4122 base = 16;
4123 sp++;
4125 else if (*sp == 'b' || *sp == 'B')
4127 base = 2;
4128 sp++;
4130 else
4131 base = 8;
4133 else
4134 base = 10;
4137 sn = strlen (sp);
4138 dp = (unsigned char *) gmp_xalloc (sn + (sn == 0));
4140 for (sn = 0; *sp; sp++)
4142 unsigned digit;
4144 if (isspace ((unsigned char) *sp))
4145 continue;
4146 if (*sp >= '0' && *sp <= '9')
4147 digit = *sp - '0';
4148 else if (*sp >= 'a' && *sp <= 'z')
4149 digit = *sp - 'a' + 10;
4150 else if (*sp >= 'A' && *sp <= 'Z')
4151 digit = *sp - 'A' + 10;
4152 else
4153 digit = base; /* fail */
4155 if (digit >= base)
4157 gmp_free (dp);
4158 r->_mp_size = 0;
4159 return -1;
4162 dp[sn++] = digit;
4165 bits = mpn_base_power_of_two_p (base);
4167 if (bits > 0)
4169 alloc = (sn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
4170 rp = MPZ_REALLOC (r, alloc);
4171 rn = mpn_set_str_bits (rp, dp, sn, bits);
4173 else
4175 struct mpn_base_info info;
4176 mpn_get_base_info (&info, base);
4177 alloc = (sn + info.exp - 1) / info.exp;
4178 rp = MPZ_REALLOC (r, alloc);
4179 rn = mpn_set_str_other (rp, dp, sn, base, &info);
4181 assert (rn <= alloc);
4182 gmp_free (dp);
4184 r->_mp_size = sign ? - rn : rn;
4186 return 0;
4190 mpz_init_set_str (mpz_t r, const char *sp, int base)
4192 mpz_init (r);
4193 return mpz_set_str (r, sp, base);
4196 size_t
4197 mpz_out_str (FILE *stream, int base, const mpz_t x)
4199 char *str;
4200 size_t len;
4202 str = mpz_get_str (NULL, base, x);
4203 len = strlen (str);
4204 len = fwrite (str, 1, len, stream);
4205 gmp_free (str);
4206 return len;
4210 static int
4211 gmp_detect_endian (void)
4213 static const int i = 2;
4214 const unsigned char *p = (const unsigned char *) &i;
4215 return 1 - *p;
4218 /* Import and export. Does not support nails. */
4219 void
4220 mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
4221 size_t nails, const void *src)
4223 const unsigned char *p;
4224 ptrdiff_t word_step;
4225 mp_ptr rp;
4226 mp_size_t rn;
4228 /* The current (partial) limb. */
4229 mp_limb_t limb;
4230 /* The number of bytes already copied to this limb (starting from
4231 the low end). */
4232 size_t bytes;
4233 /* The index where the limb should be stored, when completed. */
4234 mp_size_t i;
4236 if (nails != 0)
4237 gmp_die ("mpz_import: Nails not supported.");
4239 assert (order == 1 || order == -1);
4240 assert (endian >= -1 && endian <= 1);
4242 if (endian == 0)
4243 endian = gmp_detect_endian ();
4245 p = (unsigned char *) src;
4247 word_step = (order != endian) ? 2 * size : 0;
4249 /* Process bytes from the least significant end, so point p at the
4250 least significant word. */
4251 if (order == 1)
4253 p += size * (count - 1);
4254 word_step = - word_step;
4257 /* And at least significant byte of that word. */
4258 if (endian == 1)
4259 p += (size - 1);
4261 rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
4262 rp = MPZ_REALLOC (r, rn);
4264 for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
4266 size_t j;
4267 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4269 limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
4270 if (bytes == sizeof(mp_limb_t))
4272 rp[i++] = limb;
4273 bytes = 0;
4274 limb = 0;
4278 assert (i + (bytes > 0) == rn);
4279 if (limb != 0)
4280 rp[i++] = limb;
4281 else
4282 i = mpn_normalized_size (rp, i);
4284 r->_mp_size = i;
4287 void *
4288 mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
4289 size_t nails, const mpz_t u)
4291 size_t count;
4292 mp_size_t un;
4294 if (nails != 0)
4295 gmp_die ("mpz_import: Nails not supported.");
4297 assert (order == 1 || order == -1);
4298 assert (endian >= -1 && endian <= 1);
4299 assert (size > 0 || u->_mp_size == 0);
4301 un = u->_mp_size;
4302 count = 0;
4303 if (un != 0)
4305 size_t k;
4306 unsigned char *p;
4307 ptrdiff_t word_step;
4308 /* The current (partial) limb. */
4309 mp_limb_t limb;
4310 /* The number of bytes left to to in this limb. */
4311 size_t bytes;
4312 /* The index where the limb was read. */
4313 mp_size_t i;
4315 un = GMP_ABS (un);
4317 /* Count bytes in top limb. */
4318 limb = u->_mp_d[un-1];
4319 assert (limb != 0);
4321 k = 0;
4322 do {
4323 k++; limb >>= CHAR_BIT;
4324 } while (limb != 0);
4326 count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
4328 if (!r)
4329 r = gmp_xalloc (count * size);
4331 if (endian == 0)
4332 endian = gmp_detect_endian ();
4334 p = (unsigned char *) r;
4336 word_step = (order != endian) ? 2 * size : 0;
4338 /* Process bytes from the least significant end, so point p at the
4339 least significant word. */
4340 if (order == 1)
4342 p += size * (count - 1);
4343 word_step = - word_step;
4346 /* And at least significant byte of that word. */
4347 if (endian == 1)
4348 p += (size - 1);
4350 for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
4352 size_t j;
4353 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4355 if (bytes == 0)
4357 if (i < un)
4358 limb = u->_mp_d[i++];
4359 bytes = sizeof (mp_limb_t);
4361 *p = limb;
4362 limb >>= CHAR_BIT;
4363 bytes--;
4366 assert (i == un);
4367 assert (k == count);
4370 if (countp)
4371 *countp = count;
4373 return r;