1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
3 Contributed to the GNU project by Niels Möller
5 Copyright 1991-1997, 1999-2020 Free Software Foundation, Inc.
7 This file is part of the GNU MP Library.
9 The GNU MP Library is free software; you can redistribute it and/or modify
10 it under the terms of either:
12 * the GNU Lesser General Public License as published by the Free
13 Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
18 * the GNU General Public License as published by the Free Software
19 Foundation; either version 2 of the License, or (at your option) any
22 or both in parallel, as here.
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 You should have received copies of the GNU General Public License and the
30 GNU Lesser General Public License along with the GNU MP Library. If not,
31 see https://www.gnu.org/licenses/. */
33 /* NOTE: All functions in this file which are not declared in
34 mini-gmp.h are internal, and are not intended to be compatible
35 with GMP or with future versions of mini-gmp. */
37 /* Much of the material copied from GMP files, including: gmp-impl.h,
38 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39 mpn/generic/lshift.c, mpn/generic/mul_1.c,
40 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42 mpn/generic/submul_1.c. */
53 #if !defined(MINI_GMP_DONT_USE_FLOAT_H)
59 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
61 #define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
62 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
64 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
65 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
67 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
68 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
70 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
71 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
73 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
74 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
76 #define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
78 #if defined(DBL_MANT_DIG) && FLT_RADIX == 2
79 #define GMP_DBL_MANT_BITS DBL_MANT_DIG
81 #define GMP_DBL_MANT_BITS (53)
84 /* Return non-zero if xp,xsize and yp,ysize overlap.
85 If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
86 overlap. If both these are false, there's an overlap. */
87 #define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \
88 ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
90 #define gmp_assert_nocarry(x) do { \
91 mp_limb_t __cy = (x); \
95 #define gmp_clz(count, x) do { \
96 mp_limb_t __clz_x = (x); \
97 unsigned __clz_c = 0; \
98 int LOCAL_SHIFT_BITS = 8; \
99 if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \
101 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
103 { __clz_x <<= LOCAL_SHIFT_BITS; } \
104 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
109 #define gmp_ctz(count, x) do { \
110 mp_limb_t __ctz_x = (x); \
111 unsigned __ctz_c = 0; \
112 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
113 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
116 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
120 (sh) = (ah) + (bh) + (__x < (al)); \
124 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
128 (sh) = (ah) - (bh) - ((al) < (bl)); \
132 #define gmp_umul_ppmm(w1, w0, u, v) \
134 int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \
135 if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \
137 unsigned int __ww = (unsigned int) (u) * (v); \
138 w0 = (mp_limb_t) __ww; \
139 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
141 else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \
143 unsigned long int __ww = (unsigned long int) (u) * (v); \
144 w0 = (mp_limb_t) __ww; \
145 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
148 mp_limb_t __x0, __x1, __x2, __x3; \
149 unsigned __ul, __vl, __uh, __vh; \
150 mp_limb_t __u = (u), __v = (v); \
152 __ul = __u & GMP_LLIMB_MASK; \
153 __uh = __u >> (GMP_LIMB_BITS / 2); \
154 __vl = __v & GMP_LLIMB_MASK; \
155 __vh = __v >> (GMP_LIMB_BITS / 2); \
157 __x0 = (mp_limb_t) __ul * __vl; \
158 __x1 = (mp_limb_t) __ul * __vh; \
159 __x2 = (mp_limb_t) __uh * __vl; \
160 __x3 = (mp_limb_t) __uh * __vh; \
162 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
163 __x1 += __x2; /* but this indeed can */ \
164 if (__x1 < __x2) /* did we get it? */ \
165 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
167 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
168 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
172 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
174 mp_limb_t _qh, _ql, _r, _mask; \
175 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
176 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
177 _r = (nl) - _qh * (d); \
178 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
191 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
193 mp_limb_t _q0, _t1, _t0, _mask; \
194 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
195 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
197 /* Compute the two most significant limbs of n - q'd */ \
198 (r1) = (n1) - (d1) * (q); \
199 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
200 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
201 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
204 /* Conditionally adjust q and the remainders */ \
205 _mask = - (mp_limb_t) ((r1) >= _q0); \
207 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
210 if ((r1) > (d1) || (r0) >= (d0)) \
213 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
219 #define MP_LIMB_T_SWAP(x, y) \
221 mp_limb_t __mp_limb_t_swap__tmp = (x); \
223 (y) = __mp_limb_t_swap__tmp; \
225 #define MP_SIZE_T_SWAP(x, y) \
227 mp_size_t __mp_size_t_swap__tmp = (x); \
229 (y) = __mp_size_t_swap__tmp; \
231 #define MP_BITCNT_T_SWAP(x,y) \
233 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
235 (y) = __mp_bitcnt_t_swap__tmp; \
237 #define MP_PTR_SWAP(x, y) \
239 mp_ptr __mp_ptr_swap__tmp = (x); \
241 (y) = __mp_ptr_swap__tmp; \
243 #define MP_SRCPTR_SWAP(x, y) \
245 mp_srcptr __mp_srcptr_swap__tmp = (x); \
247 (y) = __mp_srcptr_swap__tmp; \
250 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
252 MP_PTR_SWAP (xp, yp); \
253 MP_SIZE_T_SWAP (xs, ys); \
255 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
257 MP_SRCPTR_SWAP (xp, yp); \
258 MP_SIZE_T_SWAP (xs, ys); \
261 #define MPZ_PTR_SWAP(x, y) \
263 mpz_ptr __mpz_ptr_swap__tmp = (x); \
265 (y) = __mpz_ptr_swap__tmp; \
267 #define MPZ_SRCPTR_SWAP(x, y) \
269 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
271 (y) = __mpz_srcptr_swap__tmp; \
274 const int mp_bits_per_limb
= GMP_LIMB_BITS
;
277 /* Memory allocation and other helper functions. */
279 gmp_die (const char *msg
)
281 fprintf (stderr
, "%s\n", msg
);
286 gmp_default_alloc (size_t size
)
294 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
300 gmp_default_realloc (void *old
, size_t unused_old_size
, size_t new_size
)
304 p
= realloc (old
, new_size
);
307 gmp_die("gmp_default_realloc: Virtual memory exhausted.");
313 gmp_default_free (void *p
, size_t unused_size
)
318 static void * (*gmp_allocate_func
) (size_t) = gmp_default_alloc
;
319 static void * (*gmp_reallocate_func
) (void *, size_t, size_t) = gmp_default_realloc
;
320 static void (*gmp_free_func
) (void *, size_t) = gmp_default_free
;
323 mp_get_memory_functions (void *(**alloc_func
) (size_t),
324 void *(**realloc_func
) (void *, size_t, size_t),
325 void (**free_func
) (void *, size_t))
328 *alloc_func
= gmp_allocate_func
;
331 *realloc_func
= gmp_reallocate_func
;
334 *free_func
= gmp_free_func
;
338 mp_set_memory_functions (void *(*alloc_func
) (size_t),
339 void *(*realloc_func
) (void *, size_t, size_t),
340 void (*free_func
) (void *, size_t))
343 alloc_func
= gmp_default_alloc
;
345 realloc_func
= gmp_default_realloc
;
347 free_func
= gmp_default_free
;
349 gmp_allocate_func
= alloc_func
;
350 gmp_reallocate_func
= realloc_func
;
351 gmp_free_func
= free_func
;
354 #define gmp_alloc(size) ((*gmp_allocate_func)((size)))
355 #define gmp_free(p, size) ((*gmp_free_func) ((p), (size)))
356 #define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size))
359 gmp_alloc_limbs (mp_size_t size
)
361 return (mp_ptr
) gmp_alloc (size
* sizeof (mp_limb_t
));
365 gmp_realloc_limbs (mp_ptr old
, mp_size_t old_size
, mp_size_t size
)
368 return (mp_ptr
) gmp_realloc (old
, old_size
* sizeof (mp_limb_t
), size
* sizeof (mp_limb_t
));
372 gmp_free_limbs (mp_ptr old
, mp_size_t size
)
374 gmp_free (old
, size
* sizeof (mp_limb_t
));
381 mpn_copyi (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
384 for (i
= 0; i
< n
; i
++)
389 mpn_copyd (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
396 mpn_cmp (mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
401 return ap
[n
] > bp
[n
] ? 1 : -1;
407 mpn_cmp4 (mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
410 return an
< bn
? -1 : 1;
412 return mpn_cmp (ap
, bp
, an
);
416 mpn_normalized_size (mp_srcptr xp
, mp_size_t n
)
418 while (n
> 0 && xp
[n
-1] == 0)
424 mpn_zero_p(mp_srcptr rp
, mp_size_t n
)
426 return mpn_normalized_size (rp
, n
) == 0;
430 mpn_zero (mp_ptr rp
, mp_size_t n
)
437 mpn_add_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
445 mp_limb_t r
= ap
[i
] + b
;
456 mpn_add_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
461 for (i
= 0, cy
= 0; i
< n
; i
++)
464 a
= ap
[i
]; b
= bp
[i
];
475 mpn_add (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
481 cy
= mpn_add_n (rp
, ap
, bp
, bn
);
483 cy
= mpn_add_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
488 mpn_sub_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
499 mp_limb_t cy
= a
< b
;
509 mpn_sub_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
514 for (i
= 0, cy
= 0; i
< n
; i
++)
517 a
= ap
[i
]; b
= bp
[i
];
527 mpn_sub (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
533 cy
= mpn_sub_n (rp
, ap
, bp
, bn
);
535 cy
= mpn_sub_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
540 mpn_mul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
542 mp_limb_t ul
, cl
, hpl
, lpl
;
550 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
553 cl
= (lpl
< cl
) + hpl
;
563 mpn_addmul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
565 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
573 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
576 cl
= (lpl
< cl
) + hpl
;
589 mpn_submul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
591 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
599 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
602 cl
= (lpl
< cl
) + hpl
;
615 mpn_mul (mp_ptr rp
, mp_srcptr up
, mp_size_t un
, mp_srcptr vp
, mp_size_t vn
)
619 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, up
, un
));
620 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, vp
, vn
));
622 /* We first multiply by the low order limb. This result can be
623 stored, not added, to rp. We also avoid a loop for zeroing this
626 rp
[un
] = mpn_mul_1 (rp
, up
, un
, vp
[0]);
628 /* Now accumulate the product of up[] and the next higher limb from
634 rp
[un
] = mpn_addmul_1 (rp
, up
, un
, vp
[0]);
640 mpn_mul_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
642 mpn_mul (rp
, ap
, n
, bp
, n
);
646 mpn_sqr (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
)
648 mpn_mul (rp
, ap
, n
, ap
, n
);
652 mpn_lshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
654 mp_limb_t high_limb
, low_limb
;
660 assert (cnt
< GMP_LIMB_BITS
);
665 tnc
= GMP_LIMB_BITS
- cnt
;
667 retval
= low_limb
>> tnc
;
668 high_limb
= (low_limb
<< cnt
);
673 *--rp
= high_limb
| (low_limb
>> tnc
);
674 high_limb
= (low_limb
<< cnt
);
682 mpn_rshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
684 mp_limb_t high_limb
, low_limb
;
690 assert (cnt
< GMP_LIMB_BITS
);
692 tnc
= GMP_LIMB_BITS
- cnt
;
694 retval
= (high_limb
<< tnc
);
695 low_limb
= high_limb
>> cnt
;
700 *rp
++ = low_limb
| (high_limb
<< tnc
);
701 low_limb
= high_limb
>> cnt
;
709 mpn_common_scan (mp_limb_t limb
, mp_size_t i
, mp_srcptr up
, mp_size_t un
,
714 assert (ux
== 0 || ux
== GMP_LIMB_MAX
);
715 assert (0 <= i
&& i
<= un
);
721 return (ux
== 0 ? ~(mp_bitcnt_t
) 0 : un
* GMP_LIMB_BITS
);
725 return (mp_bitcnt_t
) i
* GMP_LIMB_BITS
+ cnt
;
729 mpn_scan1 (mp_srcptr ptr
, mp_bitcnt_t bit
)
732 i
= bit
/ GMP_LIMB_BITS
;
734 return mpn_common_scan ( ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
739 mpn_scan0 (mp_srcptr ptr
, mp_bitcnt_t bit
)
742 i
= bit
/ GMP_LIMB_BITS
;
744 return mpn_common_scan (~ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
745 i
, ptr
, i
, GMP_LIMB_MAX
);
749 mpn_com (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
756 mpn_neg (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
766 mpn_com (++rp
, ++up
, --n
);
771 /* MPN division interface. */
773 /* The 3/2 inverse is defined as
775 m = floor( (B^3-1) / (B u1 + u0)) - B
778 mpn_invert_3by2 (mp_limb_t u1
, mp_limb_t u0
)
786 /* For notation, let b denote the half-limb base, so that B = b^2.
787 Split u1 = b uh + ul. */
788 ul
= u1
& GMP_LLIMB_MASK
;
789 uh
= u1
>> (GMP_LIMB_BITS
/ 2);
791 /* Approximation of the high half of quotient. Differs from the 2/1
792 inverse of the half limb uh, since we have already subtracted
794 qh
= (u1
^ GMP_LIMB_MAX
) / uh
;
796 /* Adjust to get a half-limb 3/2 inverse, i.e., we want
798 qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
799 = floor( (b (~u) + b-1) / u),
803 r = b (~u) + b-1 - qh (b uh + ul)
804 = b (~u - qh uh) + b-1 - qh ul
806 Subtraction of qh ul may underflow, which implies adjustments.
807 But by normalization, 2 u >= B > qh ul, so we need to adjust by
811 r
= ((~u1
- (mp_limb_t
) qh
* uh
) << (GMP_LIMB_BITS
/ 2)) | GMP_LLIMB_MASK
;
813 p
= (mp_limb_t
) qh
* ul
;
814 /* Adjustment steps taken from udiv_qrnnd_c */
819 if (r
>= u1
) /* i.e. we didn't get carry when adding to r */
828 /* Low half of the quotient is
830 ql = floor ( (b r + b-1) / u1).
832 This is a 3/2 division (on half-limbs), for which qh is a
835 p
= (r
>> (GMP_LIMB_BITS
/ 2)) * qh
+ r
;
836 /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
837 work, it is essential that ql is a full mp_limb_t. */
838 ql
= (p
>> (GMP_LIMB_BITS
/ 2)) + 1;
840 /* By the 3/2 trick, we don't need the high half limb. */
841 r
= (r
<< (GMP_LIMB_BITS
/ 2)) + GMP_LLIMB_MASK
- ql
* u1
;
843 if (r
>= (GMP_LIMB_MAX
& (p
<< (GMP_LIMB_BITS
/ 2))))
848 m
= ((mp_limb_t
) qh
<< (GMP_LIMB_BITS
/ 2)) + ql
;
856 /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
873 gmp_umul_ppmm (th
, tl
, u0
, m
);
878 m
-= ((r
> u1
) | ((r
== u1
) & (tl
> u0
)));
885 struct gmp_div_inverse
887 /* Normalization shift count. */
889 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
891 /* Inverse, for 2/1 or 3/2. */
896 mpn_div_qr_1_invert (struct gmp_div_inverse
*inv
, mp_limb_t d
)
903 inv
->d1
= d
<< shift
;
904 inv
->di
= mpn_invert_limb (inv
->d1
);
908 mpn_div_qr_2_invert (struct gmp_div_inverse
*inv
,
909 mp_limb_t d1
, mp_limb_t d0
)
918 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
923 inv
->di
= mpn_invert_3by2 (d1
, d0
);
927 mpn_div_qr_invert (struct gmp_div_inverse
*inv
,
928 mp_srcptr dp
, mp_size_t dn
)
933 mpn_div_qr_1_invert (inv
, dp
[0]);
935 mpn_div_qr_2_invert (inv
, dp
[1], dp
[0]);
948 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
949 d0
= (d0
<< shift
) | (dp
[dn
-3] >> (GMP_LIMB_BITS
- shift
));
953 inv
->di
= mpn_invert_3by2 (d1
, d0
);
957 /* Not matching current public gmp interface, rather corresponding to
958 the sbpi1_div_* functions. */
960 mpn_div_qr_1_preinv (mp_ptr qp
, mp_srcptr np
, mp_size_t nn
,
961 const struct gmp_div_inverse
*inv
)
970 /* Shift, reusing qp area if possible. In-place shift if qp == np. */
975 tp
= gmp_alloc_limbs (tn
);
977 r
= mpn_lshift (tp
, np
, nn
, inv
->shift
);
989 gmp_udiv_qrnnd_preinv (q
, r
, r
, np
[nn
], d
, di
);
994 gmp_free_limbs (tp
, tn
);
996 return r
>> inv
->shift
;
1000 mpn_div_qr_2_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1001 const struct gmp_div_inverse
*inv
)
1005 mp_limb_t d1
, d0
, di
, r1
, r0
;
1014 r1
= mpn_lshift (np
, np
, nn
, shift
);
1025 gmp_udiv_qr_3by2 (q
, r1
, r0
, r1
, r0
, n0
, d1
, d0
, di
);
1034 assert ((r0
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- shift
))) == 0);
1035 r0
= (r0
>> shift
) | (r1
<< (GMP_LIMB_BITS
- shift
));
1044 mpn_div_qr_pi1 (mp_ptr qp
,
1045 mp_ptr np
, mp_size_t nn
, mp_limb_t n1
,
1046 mp_srcptr dp
, mp_size_t dn
,
1061 assert ((d1
& GMP_LIMB_HIGHBIT
) != 0);
1062 /* Iteration variable is the index of the q limb.
1064 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1065 * by <d1, d0, dp[dn-3], ..., dp[0] >
1071 mp_limb_t n0
= np
[dn
-1+i
];
1073 if (n1
== d1
&& n0
== d0
)
1076 mpn_submul_1 (np
+i
, dp
, dn
, q
);
1077 n1
= np
[dn
-1+i
]; /* update n1, last loop's value will now be invalid */
1081 gmp_udiv_qr_3by2 (q
, n1
, n0
, n1
, n0
, np
[dn
-2+i
], d1
, d0
, dinv
);
1083 cy
= mpn_submul_1 (np
+ i
, dp
, dn
-2, q
);
1093 n1
+= d1
+ mpn_add_n (np
+ i
, np
+ i
, dp
, dn
- 1);
1107 mpn_div_qr_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1108 mp_srcptr dp
, mp_size_t dn
,
1109 const struct gmp_div_inverse
*inv
)
1115 np
[0] = mpn_div_qr_1_preinv (qp
, np
, nn
, inv
);
1117 mpn_div_qr_2_preinv (qp
, np
, nn
, inv
);
1123 assert (inv
->d1
== dp
[dn
-1]);
1124 assert (inv
->d0
== dp
[dn
-2]);
1125 assert ((inv
->d1
& GMP_LIMB_HIGHBIT
) != 0);
1129 nh
= mpn_lshift (np
, np
, nn
, shift
);
1133 mpn_div_qr_pi1 (qp
, np
, nn
, nh
, dp
, dn
, inv
->di
);
1136 gmp_assert_nocarry (mpn_rshift (np
, np
, dn
, shift
));
1141 mpn_div_qr (mp_ptr qp
, mp_ptr np
, mp_size_t nn
, mp_srcptr dp
, mp_size_t dn
)
1143 struct gmp_div_inverse inv
;
1149 mpn_div_qr_invert (&inv
, dp
, dn
);
1150 if (dn
> 2 && inv
.shift
> 0)
1152 tp
= gmp_alloc_limbs (dn
);
1153 gmp_assert_nocarry (mpn_lshift (tp
, dp
, dn
, inv
.shift
));
1156 mpn_div_qr_preinv (qp
, np
, nn
, dp
, dn
, &inv
);
1158 gmp_free_limbs (tp
, dn
);
1162 /* MPN base conversion. */
1164 mpn_base_power_of_two_p (unsigned b
)
1180 struct mpn_base_info
1182 /* bb is the largest power of the base which fits in one limb, and
1183 exp is the corresponding exponent. */
1189 mpn_get_base_info (struct mpn_base_info
*info
, mp_limb_t b
)
1195 m
= GMP_LIMB_MAX
/ b
;
1196 for (exp
= 1, p
= b
; p
<= m
; exp
++)
1204 mpn_limb_size_in_base_2 (mp_limb_t u
)
1210 return GMP_LIMB_BITS
- shift
;
1214 mpn_get_str_bits (unsigned char *sp
, unsigned bits
, mp_srcptr up
, mp_size_t un
)
1221 sn
= ((un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1])
1224 mask
= (1U << bits
) - 1;
1226 for (i
= 0, j
= sn
, shift
= 0; j
-- > 0;)
1228 unsigned char digit
= up
[i
] >> shift
;
1232 if (shift
>= GMP_LIMB_BITS
&& ++i
< un
)
1234 shift
-= GMP_LIMB_BITS
;
1235 digit
|= up
[i
] << (bits
- shift
);
1237 sp
[j
] = digit
& mask
;
1242 /* We generate digits from the least significant end, and reverse at
1245 mpn_limb_get_str (unsigned char *sp
, mp_limb_t w
,
1246 const struct gmp_div_inverse
*binv
)
1249 for (i
= 0; w
> 0; i
++)
1253 h
= w
>> (GMP_LIMB_BITS
- binv
->shift
);
1254 l
= w
<< binv
->shift
;
1256 gmp_udiv_qrnnd_preinv (w
, r
, h
, l
, binv
->d1
, binv
->di
);
1257 assert ((r
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- binv
->shift
))) == 0);
1266 mpn_get_str_other (unsigned char *sp
,
1267 int base
, const struct mpn_base_info
*info
,
1268 mp_ptr up
, mp_size_t un
)
1270 struct gmp_div_inverse binv
;
1274 mpn_div_qr_1_invert (&binv
, base
);
1280 struct gmp_div_inverse bbinv
;
1281 mpn_div_qr_1_invert (&bbinv
, info
->bb
);
1287 w
= mpn_div_qr_1_preinv (up
, up
, un
, &bbinv
);
1288 un
-= (up
[un
-1] == 0);
1289 done
= mpn_limb_get_str (sp
+ sn
, w
, &binv
);
1291 for (sn
+= done
; done
< info
->exp
; done
++)
1296 sn
+= mpn_limb_get_str (sp
+ sn
, up
[0], &binv
);
1299 for (i
= 0; 2*i
+ 1 < sn
; i
++)
1301 unsigned char t
= sp
[i
];
1302 sp
[i
] = sp
[sn
- i
- 1];
1310 mpn_get_str (unsigned char *sp
, int base
, mp_ptr up
, mp_size_t un
)
1315 assert (up
[un
-1] > 0);
1317 bits
= mpn_base_power_of_two_p (base
);
1319 return mpn_get_str_bits (sp
, bits
, up
, un
);
1322 struct mpn_base_info info
;
1324 mpn_get_base_info (&info
, base
);
1325 return mpn_get_str_other (sp
, base
, &info
, up
, un
);
1330 mpn_set_str_bits (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1337 for (limb
= 0, rn
= 0, shift
= 0; sn
-- > 0; )
1339 limb
|= (mp_limb_t
) sp
[sn
] << shift
;
1341 if (shift
>= GMP_LIMB_BITS
)
1343 shift
-= GMP_LIMB_BITS
;
1345 /* Next line is correct also if shift == 0,
1346 bits == 8, and mp_limb_t == unsigned char. */
1347 limb
= (unsigned int) sp
[sn
] >> (bits
- shift
);
1353 rn
= mpn_normalized_size (rp
, rn
);
1357 /* Result is usually normalized, except for all-zero input, in which
1358 case a single zero limb is written at *RP, and 1 is returned. */
1360 mpn_set_str_other (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1361 mp_limb_t b
, const struct mpn_base_info
*info
)
1370 k
= 1 + (sn
- 1) % info
->exp
;
1375 w
= w
* b
+ sp
[j
++];
1379 for (rn
= 1; j
< sn
;)
1384 for (k
= 1; k
< info
->exp
; k
++)
1385 w
= w
* b
+ sp
[j
++];
1387 cy
= mpn_mul_1 (rp
, rp
, rn
, info
->bb
);
1388 cy
+= mpn_add_1 (rp
, rp
, rn
, w
);
1398 mpn_set_str (mp_ptr rp
, const unsigned char *sp
, size_t sn
, int base
)
1405 bits
= mpn_base_power_of_two_p (base
);
1407 return mpn_set_str_bits (rp
, sp
, sn
, bits
);
1410 struct mpn_base_info info
;
1412 mpn_get_base_info (&info
, base
);
1413 return mpn_set_str_other (rp
, sp
, sn
, base
, &info
);
1422 static const mp_limb_t dummy_limb
= GMP_LIMB_MAX
& 0xc1a0;
1426 r
->_mp_d
= (mp_ptr
) &dummy_limb
;
1429 /* The utility of this function is a bit limited, since many functions
1430 assigns the result variable using mpz_swap. */
1432 mpz_init2 (mpz_t r
, mp_bitcnt_t bits
)
1436 bits
-= (bits
!= 0); /* Round down, except if 0 */
1437 rn
= 1 + bits
/ GMP_LIMB_BITS
;
1441 r
->_mp_d
= gmp_alloc_limbs (rn
);
1448 gmp_free_limbs (r
->_mp_d
, r
->_mp_alloc
);
1452 mpz_realloc (mpz_t r
, mp_size_t size
)
1454 size
= GMP_MAX (size
, 1);
1457 r
->_mp_d
= gmp_realloc_limbs (r
->_mp_d
, r
->_mp_alloc
, size
);
1459 r
->_mp_d
= gmp_alloc_limbs (size
);
1460 r
->_mp_alloc
= size
;
1462 if (GMP_ABS (r
->_mp_size
) > size
)
1468 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1469 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1470 ? mpz_realloc(z,n) \
1473 /* MPZ assignment and basic conversions. */
1475 mpz_set_si (mpz_t r
, signed long int x
)
1480 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1482 mpz_set_ui (r
, GMP_NEG_CAST (unsigned long int, x
));
1488 MPZ_REALLOC (r
, 1)[0] = GMP_NEG_CAST (unsigned long int, x
);
1493 mpz_set_ui (mpz_t r
, unsigned long int x
)
1498 MPZ_REALLOC (r
, 1)[0] = x
;
1499 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1501 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1502 while (x
>>= LOCAL_GMP_LIMB_BITS
)
1505 MPZ_REALLOC (r
, r
->_mp_size
)[r
->_mp_size
- 1] = x
;
1514 mpz_set (mpz_t r
, const mpz_t x
)
1516 /* Allow the NOP r == x */
1522 n
= GMP_ABS (x
->_mp_size
);
1523 rp
= MPZ_REALLOC (r
, n
);
1525 mpn_copyi (rp
, x
->_mp_d
, n
);
1526 r
->_mp_size
= x
->_mp_size
;
1531 mpz_init_set_si (mpz_t r
, signed long int x
)
1538 mpz_init_set_ui (mpz_t r
, unsigned long int x
)
1545 mpz_init_set (mpz_t r
, const mpz_t x
)
1552 mpz_fits_slong_p (const mpz_t u
)
1554 return mpz_cmp_si (u
, LONG_MAX
) <= 0 && mpz_cmp_si (u
, LONG_MIN
) >= 0;
1558 mpn_absfits_ulong_p (mp_srcptr up
, mp_size_t un
)
1560 int ulongsize
= GMP_ULONG_BITS
/ GMP_LIMB_BITS
;
1561 mp_limb_t ulongrem
= 0;
1563 if (GMP_ULONG_BITS
% GMP_LIMB_BITS
!= 0)
1564 ulongrem
= (mp_limb_t
) (ULONG_MAX
>> GMP_LIMB_BITS
* ulongsize
) + 1;
1566 return un
<= ulongsize
|| (up
[ulongsize
] < ulongrem
&& un
== ulongsize
+ 1);
1570 mpz_fits_ulong_p (const mpz_t u
)
1572 mp_size_t us
= u
->_mp_size
;
1574 return us
>= 0 && mpn_absfits_ulong_p (u
->_mp_d
, us
);
1578 mpz_fits_sint_p (const mpz_t u
)
1580 return mpz_cmp_si (u
, INT_MAX
) <= 0 && mpz_cmp_si (u
, INT_MIN
) >= 0;
1584 mpz_fits_uint_p (const mpz_t u
)
1586 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, UINT_MAX
) <= 0;
1590 mpz_fits_sshort_p (const mpz_t u
)
1592 return mpz_cmp_si (u
, SHRT_MAX
) <= 0 && mpz_cmp_si (u
, SHRT_MIN
) >= 0;
1596 mpz_fits_ushort_p (const mpz_t u
)
1598 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, USHRT_MAX
) <= 0;
1602 mpz_get_si (const mpz_t u
)
1604 unsigned long r
= mpz_get_ui (u
);
1605 unsigned long c
= -LONG_MAX
- LONG_MIN
;
1607 if (u
->_mp_size
< 0)
1608 /* This expression is necessary to properly handle -LONG_MIN */
1609 return -(long) c
- (long) ((r
- c
) & LONG_MAX
);
1611 return (long) (r
& LONG_MAX
);
1615 mpz_get_ui (const mpz_t u
)
1617 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1619 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1620 unsigned long r
= 0;
1621 mp_size_t n
= GMP_ABS (u
->_mp_size
);
1622 n
= GMP_MIN (n
, 1 + (mp_size_t
) (GMP_ULONG_BITS
- 1) / GMP_LIMB_BITS
);
1624 r
= (r
<< LOCAL_GMP_LIMB_BITS
) + u
->_mp_d
[n
];
1628 return u
->_mp_size
== 0 ? 0 : u
->_mp_d
[0];
1632 mpz_size (const mpz_t u
)
1634 return GMP_ABS (u
->_mp_size
);
1638 mpz_getlimbn (const mpz_t u
, mp_size_t n
)
1640 if (n
>= 0 && n
< GMP_ABS (u
->_mp_size
))
1647 mpz_realloc2 (mpz_t x
, mp_bitcnt_t n
)
1649 mpz_realloc (x
, 1 + (n
- (n
!= 0)) / GMP_LIMB_BITS
);
1653 mpz_limbs_read (mpz_srcptr x
)
1659 mpz_limbs_modify (mpz_t x
, mp_size_t n
)
1662 return MPZ_REALLOC (x
, n
);
1666 mpz_limbs_write (mpz_t x
, mp_size_t n
)
1668 return mpz_limbs_modify (x
, n
);
1672 mpz_limbs_finish (mpz_t x
, mp_size_t xs
)
1675 xn
= mpn_normalized_size (x
->_mp_d
, GMP_ABS (xs
));
1676 x
->_mp_size
= xs
< 0 ? -xn
: xn
;
1680 mpz_roinit_normal_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1683 x
->_mp_d
= (mp_ptr
) xp
;
1689 mpz_roinit_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1691 mpz_roinit_normal_n (x
, xp
, xs
);
1692 mpz_limbs_finish (x
, xs
);
1697 /* Conversions and comparison to double. */
1699 mpz_set_d (mpz_t r
, double x
)
1708 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1709 zero or infinity. */
1710 if (x
!= x
|| x
== x
* 0.5)
1725 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1727 for (rn
= 1; x
>= B
; rn
++)
1730 rp
= MPZ_REALLOC (r
, rn
);
1746 r
->_mp_size
= sign
? - rn
: rn
;
1750 mpz_init_set_d (mpz_t r
, double x
)
1757 mpz_get_d (const mpz_t u
)
1763 double B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1765 un
= GMP_ABS (u
->_mp_size
);
1772 m
= m
+ GMP_DBL_MANT_BITS
- GMP_LIMB_BITS
;
1774 l
&= GMP_LIMB_MAX
<< -m
;
1776 for (x
= l
; --un
>= 0;)
1783 l
&= GMP_LIMB_MAX
<< -m
;
1788 if (u
->_mp_size
< 0)
1795 mpz_cmpabs_d (const mpz_t x
, double d
)
1808 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1811 /* Scale d so it can be compared with the top limb. */
1812 for (i
= 1; i
< xn
; i
++)
1818 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1819 for (i
= xn
; i
-- > 0;)
1836 mpz_cmp_d (const mpz_t x
, double d
)
1838 if (x
->_mp_size
< 0)
1843 return -mpz_cmpabs_d (x
, d
);
1850 return mpz_cmpabs_d (x
, d
);
1855 /* MPZ comparisons and the like. */
1857 mpz_sgn (const mpz_t u
)
1859 return GMP_CMP (u
->_mp_size
, 0);
1863 mpz_cmp_si (const mpz_t u
, long v
)
1865 mp_size_t usize
= u
->_mp_size
;
1868 return mpz_cmp_ui (u
, v
);
1869 else if (usize
>= 0)
1872 return - mpz_cmpabs_ui (u
, GMP_NEG_CAST (unsigned long int, v
));
1876 mpz_cmp_ui (const mpz_t u
, unsigned long v
)
1878 mp_size_t usize
= u
->_mp_size
;
1883 return mpz_cmpabs_ui (u
, v
);
1887 mpz_cmp (const mpz_t a
, const mpz_t b
)
1889 mp_size_t asize
= a
->_mp_size
;
1890 mp_size_t bsize
= b
->_mp_size
;
1893 return (asize
< bsize
) ? -1 : 1;
1894 else if (asize
>= 0)
1895 return mpn_cmp (a
->_mp_d
, b
->_mp_d
, asize
);
1897 return mpn_cmp (b
->_mp_d
, a
->_mp_d
, -asize
);
1901 mpz_cmpabs_ui (const mpz_t u
, unsigned long v
)
1903 mp_size_t un
= GMP_ABS (u
->_mp_size
);
1905 if (! mpn_absfits_ulong_p (u
->_mp_d
, un
))
1909 unsigned long uu
= mpz_get_ui (u
);
1910 return GMP_CMP(uu
, v
);
1915 mpz_cmpabs (const mpz_t u
, const mpz_t v
)
1917 return mpn_cmp4 (u
->_mp_d
, GMP_ABS (u
->_mp_size
),
1918 v
->_mp_d
, GMP_ABS (v
->_mp_size
));
1922 mpz_abs (mpz_t r
, const mpz_t u
)
1925 r
->_mp_size
= GMP_ABS (r
->_mp_size
);
1929 mpz_neg (mpz_t r
, const mpz_t u
)
1932 r
->_mp_size
= -r
->_mp_size
;
1936 mpz_swap (mpz_t u
, mpz_t v
)
1938 MP_SIZE_T_SWAP (u
->_mp_size
, v
->_mp_size
);
1939 MP_SIZE_T_SWAP (u
->_mp_alloc
, v
->_mp_alloc
);
1940 MP_PTR_SWAP (u
->_mp_d
, v
->_mp_d
);
1944 /* MPZ addition and subtraction */
1948 mpz_add_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1951 mpz_init_set_ui (bb
, b
);
1957 mpz_sub_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1959 mpz_ui_sub (r
, b
, a
);
1964 mpz_ui_sub (mpz_t r
, unsigned long a
, const mpz_t b
)
1967 mpz_add_ui (r
, r
, a
);
1971 mpz_abs_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
1973 mp_size_t an
= GMP_ABS (a
->_mp_size
);
1974 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
1980 MPZ_SRCPTR_SWAP (a
, b
);
1981 MP_SIZE_T_SWAP (an
, bn
);
1984 rp
= MPZ_REALLOC (r
, an
+ 1);
1985 cy
= mpn_add (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
);
1993 mpz_abs_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
1995 mp_size_t an
= GMP_ABS (a
->_mp_size
);
1996 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
2000 cmp
= mpn_cmp4 (a
->_mp_d
, an
, b
->_mp_d
, bn
);
2003 rp
= MPZ_REALLOC (r
, an
);
2004 gmp_assert_nocarry (mpn_sub (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
));
2005 return mpn_normalized_size (rp
, an
);
2009 rp
= MPZ_REALLOC (r
, bn
);
2010 gmp_assert_nocarry (mpn_sub (rp
, b
->_mp_d
, bn
, a
->_mp_d
, an
));
2011 return -mpn_normalized_size (rp
, bn
);
2018 mpz_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
2022 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2023 rn
= mpz_abs_add (r
, a
, b
);
2025 rn
= mpz_abs_sub (r
, a
, b
);
2027 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2031 mpz_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
2035 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2036 rn
= mpz_abs_sub (r
, a
, b
);
2038 rn
= mpz_abs_add (r
, a
, b
);
2040 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2044 /* MPZ multiplication */
2046 mpz_mul_si (mpz_t r
, const mpz_t u
, long int v
)
2050 mpz_mul_ui (r
, u
, GMP_NEG_CAST (unsigned long int, v
));
2054 mpz_mul_ui (r
, u
, v
);
2058 mpz_mul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2061 mpz_init_set_ui (vv
, v
);
2068 mpz_mul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2071 mp_size_t un
, vn
, rn
;
2078 if (un
== 0 || vn
== 0)
2084 sign
= (un
^ vn
) < 0;
2089 mpz_init2 (t
, (un
+ vn
) * GMP_LIMB_BITS
);
2093 mpn_mul (tp
, u
->_mp_d
, un
, v
->_mp_d
, vn
);
2095 mpn_mul (tp
, v
->_mp_d
, vn
, u
->_mp_d
, un
);
2098 rn
-= tp
[rn
-1] == 0;
2100 t
->_mp_size
= sign
? - rn
: rn
;
2106 mpz_mul_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bits
)
2113 un
= GMP_ABS (u
->_mp_size
);
2120 limbs
= bits
/ GMP_LIMB_BITS
;
2121 shift
= bits
% GMP_LIMB_BITS
;
2123 rn
= un
+ limbs
+ (shift
> 0);
2124 rp
= MPZ_REALLOC (r
, rn
);
2127 mp_limb_t cy
= mpn_lshift (rp
+ limbs
, u
->_mp_d
, un
, shift
);
2132 mpn_copyd (rp
+ limbs
, u
->_mp_d
, un
);
2134 mpn_zero (rp
, limbs
);
2136 r
->_mp_size
= (u
->_mp_size
< 0) ? - rn
: rn
;
2140 mpz_addmul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2143 mpz_init_set_ui (t
, v
);
2150 mpz_submul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2153 mpz_init_set_ui (t
, v
);
2160 mpz_addmul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2170 mpz_submul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2181 enum mpz_div_round_mode
{ GMP_DIV_FLOOR
, GMP_DIV_CEIL
, GMP_DIV_TRUNC
};
2183 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2185 mpz_div_qr (mpz_t q
, mpz_t r
,
2186 const mpz_t n
, const mpz_t d
, enum mpz_div_round_mode mode
)
2188 mp_size_t ns
, ds
, nn
, dn
, qs
;
2193 gmp_die("mpz_div_qr: Divide by zero.");
2211 if (mode
== GMP_DIV_CEIL
&& qs
>= 0)
2213 /* q = 1, r = n - d */
2219 else if (mode
== GMP_DIV_FLOOR
&& qs
< 0)
2221 /* q = -1, r = n + d */
2243 mpz_init_set (tr
, n
);
2250 mpz_init2 (tq
, qn
* GMP_LIMB_BITS
);
2256 mpn_div_qr (qp
, np
, nn
, d
->_mp_d
, dn
);
2260 qn
-= (qp
[qn
-1] == 0);
2262 tq
->_mp_size
= qs
< 0 ? -qn
: qn
;
2264 rn
= mpn_normalized_size (np
, dn
);
2265 tr
->_mp_size
= ns
< 0 ? - rn
: rn
;
2267 if (mode
== GMP_DIV_FLOOR
&& qs
< 0 && rn
!= 0)
2270 mpz_sub_ui (tq
, tq
, 1);
2272 mpz_add (tr
, tr
, d
);
2274 else if (mode
== GMP_DIV_CEIL
&& qs
>= 0 && rn
!= 0)
2277 mpz_add_ui (tq
, tq
, 1);
2279 mpz_sub (tr
, tr
, d
);
2297 mpz_cdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2299 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_CEIL
);
2303 mpz_fdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2305 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2309 mpz_tdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2311 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2315 mpz_cdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2317 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2321 mpz_fdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2323 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2327 mpz_tdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2329 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2333 mpz_cdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2335 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2339 mpz_fdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2341 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2345 mpz_tdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2347 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2351 mpz_mod (mpz_t r
, const mpz_t n
, const mpz_t d
)
2353 mpz_div_qr (NULL
, r
, n
, d
, d
->_mp_size
>= 0 ? GMP_DIV_FLOOR
: GMP_DIV_CEIL
);
2357 mpz_div_q_2exp (mpz_t q
, const mpz_t u
, mp_bitcnt_t bit_index
,
2358 enum mpz_div_round_mode mode
)
2371 limb_cnt
= bit_index
/ GMP_LIMB_BITS
;
2372 qn
= GMP_ABS (un
) - limb_cnt
;
2373 bit_index
%= GMP_LIMB_BITS
;
2375 if (mode
== ((un
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* un != 0 here. */
2376 /* Note: Below, the final indexing at limb_cnt is valid because at
2377 that point we have qn > 0. */
2379 || !mpn_zero_p (u
->_mp_d
, limb_cnt
)
2380 || (u
->_mp_d
[limb_cnt
]
2381 & (((mp_limb_t
) 1 << bit_index
) - 1)));
2389 qp
= MPZ_REALLOC (q
, qn
);
2393 mpn_rshift (qp
, u
->_mp_d
+ limb_cnt
, qn
, bit_index
);
2394 qn
-= qp
[qn
- 1] == 0;
2398 mpn_copyi (qp
, u
->_mp_d
+ limb_cnt
, qn
);
2405 mpz_add_ui (q
, q
, 1);
2411 mpz_div_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bit_index
,
2412 enum mpz_div_round_mode mode
)
2414 mp_size_t us
, un
, rn
;
2419 if (us
== 0 || bit_index
== 0)
2424 rn
= (bit_index
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
2427 rp
= MPZ_REALLOC (r
, rn
);
2430 mask
= GMP_LIMB_MAX
>> (rn
* GMP_LIMB_BITS
- bit_index
);
2434 /* Quotient (with truncation) is zero, and remainder is
2436 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2438 /* Have to negate and sign extend. */
2441 gmp_assert_nocarry (! mpn_neg (rp
, u
->_mp_d
, un
));
2442 for (i
= un
; i
< rn
- 1; i
++)
2443 rp
[i
] = GMP_LIMB_MAX
;
2452 mpn_copyi (rp
, u
->_mp_d
, un
);
2460 mpn_copyi (rp
, u
->_mp_d
, rn
- 1);
2462 rp
[rn
-1] = u
->_mp_d
[rn
-1] & mask
;
2464 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2466 /* If r != 0, compute 2^{bit_count} - r. */
2467 mpn_neg (rp
, rp
, rn
);
2471 /* us is not used for anything else, so we can modify it
2472 here to indicate flipped sign. */
2476 rn
= mpn_normalized_size (rp
, rn
);
2477 r
->_mp_size
= us
< 0 ? -rn
: rn
;
2481 mpz_cdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2483 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2487 mpz_fdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2489 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2493 mpz_tdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2495 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2499 mpz_cdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2501 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2505 mpz_fdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2507 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2511 mpz_tdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2513 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2517 mpz_divexact (mpz_t q
, const mpz_t n
, const mpz_t d
)
2519 gmp_assert_nocarry (mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2523 mpz_divisible_p (const mpz_t n
, const mpz_t d
)
2525 return mpz_div_qr (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2529 mpz_congruent_p (const mpz_t a
, const mpz_t b
, const mpz_t m
)
2534 /* a == b (mod 0) iff a == b */
2535 if (mpz_sgn (m
) == 0)
2536 return (mpz_cmp (a
, b
) == 0);
2540 res
= mpz_divisible_p (t
, m
);
2546 static unsigned long
2547 mpz_div_qr_ui (mpz_t q
, mpz_t r
,
2548 const mpz_t n
, unsigned long d
, enum mpz_div_round_mode mode
)
2554 mpz_init_set_ui (dd
, d
);
2555 mpz_div_qr (q
, rr
, n
, dd
, mode
);
2557 ret
= mpz_get_ui (rr
);
2567 mpz_cdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2569 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_CEIL
);
2573 mpz_fdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2575 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2579 mpz_tdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2581 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2585 mpz_cdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2587 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2591 mpz_fdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2593 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2597 mpz_tdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2599 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2603 mpz_cdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2605 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2608 mpz_fdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2610 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2613 mpz_tdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2615 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2619 mpz_cdiv_ui (const mpz_t n
, unsigned long d
)
2621 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_CEIL
);
2625 mpz_fdiv_ui (const mpz_t n
, unsigned long d
)
2627 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2631 mpz_tdiv_ui (const mpz_t n
, unsigned long d
)
2633 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2637 mpz_mod_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2639 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2643 mpz_divexact_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2645 gmp_assert_nocarry (mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2649 mpz_divisible_ui_p (const mpz_t n
, unsigned long d
)
2651 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2657 mpn_gcd_11 (mp_limb_t u
, mp_limb_t v
)
2661 assert ( (u
| v
) > 0);
2668 gmp_ctz (shift
, u
| v
);
2674 MP_LIMB_T_SWAP (u
, v
);
2676 while ( (v
& 1) == 0)
2686 while ( (u
& 1) == 0);
2693 while ( (v
& 1) == 0);
2700 mpz_gcd_ui (mpz_t g
, const mpz_t u
, unsigned long v
)
2703 mpz_init_set_ui(t
, v
);
2717 mpz_make_odd (mpz_t r
)
2721 assert (r
->_mp_size
> 0);
2722 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2723 shift
= mpn_scan1 (r
->_mp_d
, 0);
2724 mpz_tdiv_q_2exp (r
, r
, shift
);
2730 mpz_gcd (mpz_t g
, const mpz_t u
, const mpz_t v
)
2733 mp_bitcnt_t uz
, vz
, gz
;
2735 if (u
->_mp_size
== 0)
2740 if (v
->_mp_size
== 0)
2750 uz
= mpz_make_odd (tu
);
2752 vz
= mpz_make_odd (tv
);
2753 gz
= GMP_MIN (uz
, vz
);
2755 if (tu
->_mp_size
< tv
->_mp_size
)
2758 mpz_tdiv_r (tu
, tu
, tv
);
2759 if (tu
->_mp_size
== 0)
2769 c
= mpz_cmp (tu
, tv
);
2778 if (tv
->_mp_size
== 1)
2782 mpz_tdiv_r (tu
, tu
, tv
);
2783 gp
= MPZ_REALLOC (g
, 1); /* gp = mpz_limbs_modify (g, 1); */
2784 *gp
= mpn_gcd_11 (tu
->_mp_d
[0], tv
->_mp_d
[0]);
2786 g
->_mp_size
= *gp
!= 0; /* mpz_limbs_finish (g, 1); */
2789 mpz_sub (tu
, tu
, tv
);
2793 mpz_mul_2exp (g
, g
, gz
);
2797 mpz_gcdext (mpz_t g
, mpz_t s
, mpz_t t
, const mpz_t u
, const mpz_t v
)
2799 mpz_t tu
, tv
, s0
, s1
, t0
, t1
;
2800 mp_bitcnt_t uz
, vz
, gz
;
2803 if (u
->_mp_size
== 0)
2805 /* g = 0 u + sgn(v) v */
2806 signed long sign
= mpz_sgn (v
);
2811 mpz_set_si (t
, sign
);
2815 if (v
->_mp_size
== 0)
2817 /* g = sgn(u) u + 0 v */
2818 signed long sign
= mpz_sgn (u
);
2821 mpz_set_si (s
, sign
);
2835 uz
= mpz_make_odd (tu
);
2837 vz
= mpz_make_odd (tv
);
2838 gz
= GMP_MIN (uz
, vz
);
2843 /* Cofactors corresponding to odd gcd. gz handled later. */
2844 if (tu
->_mp_size
< tv
->_mp_size
)
2847 MPZ_SRCPTR_SWAP (u
, v
);
2848 MPZ_PTR_SWAP (s
, t
);
2849 MP_BITCNT_T_SWAP (uz
, vz
);
2857 * where u and v denote the inputs with common factors of two
2858 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2860 * 2^p tu = s1 u - t1 v
2861 * 2^p tv = -s0 u + t0 v
2864 /* After initial division, tu = q tv + tu', we have
2866 * u = 2^uz (tu' + q tv)
2871 * t0 = 2^uz, t1 = 2^uz q
2875 mpz_tdiv_qr (t1
, tu
, tu
, tv
);
2876 mpz_mul_2exp (t1
, t1
, uz
);
2878 mpz_setbit (s1
, vz
);
2881 if (tu
->_mp_size
> 0)
2884 shift
= mpz_make_odd (tu
);
2885 mpz_setbit (t0
, uz
+ shift
);
2891 c
= mpz_cmp (tu
, tv
);
2899 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2900 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2902 mpz_sub (tv
, tv
, tu
);
2903 mpz_add (t0
, t0
, t1
);
2904 mpz_add (s0
, s0
, s1
);
2906 shift
= mpz_make_odd (tv
);
2907 mpz_mul_2exp (t1
, t1
, shift
);
2908 mpz_mul_2exp (s1
, s1
, shift
);
2912 mpz_sub (tu
, tu
, tv
);
2913 mpz_add (t1
, t0
, t1
);
2914 mpz_add (s1
, s0
, s1
);
2916 shift
= mpz_make_odd (tu
);
2917 mpz_mul_2exp (t0
, t0
, shift
);
2918 mpz_mul_2exp (s0
, s0
, shift
);
2924 mpz_setbit (t0
, uz
);
2926 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2929 mpz_mul_2exp (tv
, tv
, gz
);
2932 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2933 adjust cofactors, we need u / g and v / g */
2935 mpz_divexact (s1
, v
, tv
);
2937 mpz_divexact (t1
, u
, tv
);
2942 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2943 if (mpz_odd_p (s0
) || mpz_odd_p (t0
))
2945 mpz_sub (s0
, s0
, s1
);
2946 mpz_add (t0
, t0
, t1
);
2948 assert (mpz_even_p (t0
) && mpz_even_p (s0
));
2949 mpz_tdiv_q_2exp (s0
, s0
, 1);
2950 mpz_tdiv_q_2exp (t0
, t0
, 1);
2953 /* Arrange so that |s| < |u| / 2g */
2954 mpz_add (s1
, s0
, s1
);
2955 if (mpz_cmpabs (s0
, s1
) > 0)
2958 mpz_sub (t0
, t0
, t1
);
2960 if (u
->_mp_size
< 0)
2962 if (v
->_mp_size
< 0)
2980 mpz_lcm (mpz_t r
, const mpz_t u
, const mpz_t v
)
2984 if (u
->_mp_size
== 0 || v
->_mp_size
== 0)
2993 mpz_divexact (g
, u
, g
);
3001 mpz_lcm_ui (mpz_t r
, const mpz_t u
, unsigned long v
)
3003 if (v
== 0 || u
->_mp_size
== 0)
3009 v
/= mpz_gcd_ui (NULL
, u
, v
);
3010 mpz_mul_ui (r
, u
, v
);
3016 mpz_invert (mpz_t r
, const mpz_t u
, const mpz_t m
)
3021 if (u
->_mp_size
== 0 || mpz_cmpabs_ui (m
, 1) <= 0)
3027 mpz_gcdext (g
, tr
, NULL
, u
, m
);
3028 invertible
= (mpz_cmp_ui (g
, 1) == 0);
3032 if (tr
->_mp_size
< 0)
3034 if (m
->_mp_size
>= 0)
3035 mpz_add (tr
, tr
, m
);
3037 mpz_sub (tr
, tr
, m
);
3048 /* Higher level operations (sqrt, pow and root) */
3051 mpz_pow_ui (mpz_t r
, const mpz_t b
, unsigned long e
)
3055 mpz_init_set_ui (tr
, 1);
3057 bit
= GMP_ULONG_HIGHBIT
;
3060 mpz_mul (tr
, tr
, tr
);
3062 mpz_mul (tr
, tr
, b
);
3072 mpz_ui_pow_ui (mpz_t r
, unsigned long blimb
, unsigned long e
)
3076 mpz_init_set_ui (b
, blimb
);
3077 mpz_pow_ui (r
, b
, e
);
3082 mpz_powm (mpz_t r
, const mpz_t b
, const mpz_t e
, const mpz_t m
)
3088 struct gmp_div_inverse minv
;
3092 en
= GMP_ABS (e
->_mp_size
);
3093 mn
= GMP_ABS (m
->_mp_size
);
3095 gmp_die ("mpz_powm: Zero modulo.");
3104 mpn_div_qr_invert (&minv
, mp
, mn
);
3109 /* To avoid shifts, we do all our reductions, except the final
3110 one, using a *normalized* m. */
3113 tp
= gmp_alloc_limbs (mn
);
3114 gmp_assert_nocarry (mpn_lshift (tp
, mp
, mn
, shift
));
3120 if (e
->_mp_size
< 0)
3122 if (!mpz_invert (base
, b
, m
))
3123 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3130 bn
= base
->_mp_size
;
3133 mpn_div_qr_preinv (NULL
, base
->_mp_d
, base
->_mp_size
, mp
, mn
, &minv
);
3137 /* We have reduced the absolute value. Now take care of the
3138 sign. Note that we get zero represented non-canonically as
3140 if (b
->_mp_size
< 0)
3142 mp_ptr bp
= MPZ_REALLOC (base
, mn
);
3143 gmp_assert_nocarry (mpn_sub (bp
, mp
, mn
, bp
, bn
));
3146 base
->_mp_size
= mpn_normalized_size (base
->_mp_d
, bn
);
3148 mpz_init_set_ui (tr
, 1);
3152 mp_limb_t w
= e
->_mp_d
[en
];
3155 bit
= GMP_LIMB_HIGHBIT
;
3158 mpz_mul (tr
, tr
, tr
);
3160 mpz_mul (tr
, tr
, base
);
3161 if (tr
->_mp_size
> mn
)
3163 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3164 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3171 /* Final reduction */
3172 if (tr
->_mp_size
>= mn
)
3175 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3176 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3179 gmp_free_limbs (tp
, mn
);
3187 mpz_powm_ui (mpz_t r
, const mpz_t b
, unsigned long elimb
, const mpz_t m
)
3191 mpz_init_set_ui (e
, elimb
);
3192 mpz_powm (r
, b
, e
, m
);
3196 /* x=trunc(y^(1/z)), r=y-x^z */
3198 mpz_rootrem (mpz_t x
, mpz_t r
, const mpz_t y
, unsigned long z
)
3203 sgn
= y
->_mp_size
< 0;
3204 if ((~z
& sgn
) != 0)
3205 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3207 gmp_die ("mpz_rootrem: Zeroth root.");
3209 if (mpz_cmpabs_ui (y
, 1) <= 0) {
3219 mpz_setbit (t
, mpz_sizeinbase (y
, 2) / z
+ 1);
3221 if (z
== 2) /* simplify sqrt loop: z-1 == 1 */
3223 mpz_swap (u
, t
); /* u = x */
3224 mpz_tdiv_q (t
, y
, u
); /* t = y/x */
3225 mpz_add (t
, t
, u
); /* t = y/x + x */
3226 mpz_tdiv_q_2exp (t
, t
, 1); /* x'= (y/x + x)/2 */
3227 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3236 mpz_swap (u
, t
); /* u = x */
3237 mpz_pow_ui (t
, u
, z
- 1); /* t = x^(z-1) */
3238 mpz_tdiv_q (t
, y
, t
); /* t = y/x^(z-1) */
3239 mpz_mul_ui (v
, u
, z
- 1); /* v = x*(z-1) */
3240 mpz_add (t
, t
, v
); /* t = y/x^(z-1) + x*(z-1) */
3241 mpz_tdiv_q_ui (t
, t
, z
); /* x'=(y/x^(z-1) + x*(z-1))/z */
3242 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3248 mpz_pow_ui (t
, u
, z
);
3258 mpz_root (mpz_t x
, const mpz_t y
, unsigned long z
)
3264 mpz_rootrem (x
, r
, y
, z
);
3265 res
= r
->_mp_size
== 0;
3271 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3273 mpz_sqrtrem (mpz_t s
, mpz_t r
, const mpz_t u
)
3275 mpz_rootrem (s
, r
, u
, 2);
3279 mpz_sqrt (mpz_t s
, const mpz_t u
)
3281 mpz_rootrem (s
, NULL
, u
, 2);
3285 mpz_perfect_square_p (const mpz_t u
)
3287 if (u
->_mp_size
<= 0)
3288 return (u
->_mp_size
== 0);
3290 return mpz_root (NULL
, u
, 2);
3294 mpn_perfect_square_p (mp_srcptr p
, mp_size_t n
)
3299 assert (p
[n
-1] != 0);
3300 return mpz_root (NULL
, mpz_roinit_normal_n (t
, p
, n
), 2);
3304 mpn_sqrtrem (mp_ptr sp
, mp_ptr rp
, mp_srcptr p
, mp_size_t n
)
3310 assert (p
[n
-1] != 0);
3314 mpz_rootrem (s
, r
, mpz_roinit_normal_n (u
, p
, n
), 2);
3316 assert (s
->_mp_size
== (n
+1)/2);
3317 mpn_copyd (sp
, s
->_mp_d
, s
->_mp_size
);
3321 mpn_copyd (rp
, r
->_mp_d
, res
);
3329 mpz_mfac_uiui (mpz_t x
, unsigned long n
, unsigned long m
)
3331 mpz_set_ui (x
, n
+ (n
== 0));
3332 if (m
+ 1 < 2) return;
3334 mpz_mul_ui (x
, x
, n
-= m
);
3338 mpz_2fac_ui (mpz_t x
, unsigned long n
)
3340 mpz_mfac_uiui (x
, n
, 2);
3344 mpz_fac_ui (mpz_t x
, unsigned long n
)
3346 mpz_mfac_uiui (x
, n
, 1);
3350 mpz_bin_uiui (mpz_t r
, unsigned long n
, unsigned long k
)
3354 mpz_set_ui (r
, k
<= n
);
3357 k
= (k
<= n
) ? n
- k
: 0;
3363 mpz_mul_ui (r
, r
, n
--);
3365 mpz_divexact (r
, r
, t
);
3370 /* Primality testing */
3372 /* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
3373 /* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
3375 gmp_jacobi_coprime (mp_limb_t a
, mp_limb_t b
)
3381 /* assert (mpn_gcd_11 (a, b) == 1); */
3383 /* Below, we represent a and b shifted right so that the least
3384 significant one bit is implicit. */
3393 /* (2/b) = -1 if b = 3 or 5 mod 8 */
3394 bit
^= c
& (b
^ (b
>> 1));
3398 return bit
& 1 ? -1 : 1;
3415 gmp_lucas_step_k_2k (mpz_t V
, mpz_t Qk
, const mpz_t n
)
3417 mpz_mod (Qk
, Qk
, n
);
3418 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3420 mpz_submul_ui (V
, Qk
, 2);
3421 mpz_tdiv_r (V
, V
, n
);
3422 /* Q^{2k} = (Q^k)^2 */
3423 mpz_mul (Qk
, Qk
, Qk
);
3426 /* Computes V_k, Q^k (mod n) for the Lucas' sequence */
3427 /* with P=1, Q=Q; k = (n>>b0)|1. */
3428 /* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
3429 /* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
3431 gmp_lucas_mod (mpz_t V
, mpz_t Qk
, long Q
,
3432 mp_bitcnt_t b0
, const mpz_t n
)
3439 assert (Q
<= - (LONG_MIN
/ 2));
3440 assert (Q
>= - (LONG_MAX
/ 2));
3441 assert (mpz_cmp_ui (n
, 4) > 0);
3442 assert (mpz_odd_p (n
));
3444 mpz_init_set_ui (U
, 1); /* U1 = 1 */
3445 mpz_set_ui (V
, 1); /* V1 = 1 */
3448 for (bs
= mpz_sizeinbase (n
, 2) - 1; --bs
>= b0
;)
3450 /* U_{2k} <- U_k * V_k */
3452 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3453 /* Q^{2k} = (Q^k)^2 */
3454 gmp_lucas_step_k_2k (V
, Qk
, n
);
3456 /* A step k->k+1 is performed if the bit in $n$ is 1 */
3457 /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */
3458 /* should be 1 in $n+1$ (bs == b0) */
3459 if (b0
== bs
|| mpz_tstbit (n
, bs
))
3461 /* Q^{k+1} <- Q^k * Q */
3462 mpz_mul_si (Qk
, Qk
, Q
);
3463 /* U_{k+1} <- (U_k + V_k) / 2 */
3464 mpz_swap (U
, V
); /* Keep in V the old value of U_k */
3466 /* We have to compute U/2, so we need an even value, */
3467 /* equivalent (mod n) */
3470 mpz_tdiv_q_2exp (U
, U
, 1);
3471 /* V_{k+1} <-(D*U_k + V_k) / 2 =
3472 U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
3473 mpz_mul_si (V
, V
, -2*Q
);
3475 mpz_tdiv_r (V
, V
, n
);
3477 mpz_tdiv_r (U
, U
, n
);
3480 res
= U
->_mp_size
== 0;
3485 /* Performs strong Lucas' test on x, with parameters suggested */
3486 /* for the BPSW test. Qk is only passed to recycle a variable. */
3487 /* Requires GCD (x,6) = 1.*/
3489 gmp_stronglucas (const mpz_t x
, mpz_t Qk
)
3493 mp_limb_t maxD
, D
; /* The absolute value is stored. */
3497 /* Test on the absolute value. */
3498 mpz_roinit_normal_n (n
, x
->_mp_d
, GMP_ABS (x
->_mp_size
));
3500 assert (mpz_odd_p (n
));
3501 /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
3502 if (mpz_root (Qk
, n
, 2))
3503 return 0; /* A square is composite. */
3505 /* Check Ds up to square root (in case, n is prime)
3506 or avoid overflows */
3507 maxD
= (Qk
->_mp_size
== 1) ? Qk
->_mp_d
[0] - 1 : GMP_LIMB_MAX
;
3510 /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
3511 /* For those Ds we have (D/n) = (n/|D|) */
3515 return 1 + (D
!= GMP_LIMB_MAX
); /* (1 + ! ~ D) */
3517 tl
= mpz_tdiv_ui (n
, D
);
3521 while (gmp_jacobi_coprime (tl
, D
) == 1);
3525 /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
3526 b0
= mpz_scan0 (n
, 0);
3528 /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
3529 Q
= (D
& 2) ? (long) (D
>> 2) + 1 : -(long) (D
>> 2);
3531 if (! gmp_lucas_mod (V
, Qk
, Q
, b0
, n
)) /* If Ud != 0 */
3532 while (V
->_mp_size
!= 0 && --b0
!= 0) /* while Vk != 0 */
3533 /* V <- V ^ 2 - 2Q^k */
3534 /* Q^{2k} = (Q^k)^2 */
3535 gmp_lucas_step_k_2k (V
, Qk
, n
);
3542 gmp_millerrabin (const mpz_t n
, const mpz_t nm1
, mpz_t y
,
3543 const mpz_t q
, mp_bitcnt_t k
)
3547 /* Caller must initialize y to the base. */
3548 mpz_powm (y
, y
, q
, n
);
3550 if (mpz_cmp_ui (y
, 1) == 0 || mpz_cmp (y
, nm1
) == 0)
3555 mpz_powm_ui (y
, y
, 2, n
);
3556 if (mpz_cmp (y
, nm1
) == 0)
3558 /* y == 1 means that the previous y was a non-trivial square root
3559 of 1 (mod n). y == 0 means that n is a power of the base.
3560 In either case, n is not prime. */
3561 if (mpz_cmp_ui (y
, 1) <= 0)
3567 /* This product is 0xc0cfd797, and fits in 32 bits. */
3568 #define GMP_PRIME_PRODUCT \
3569 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3571 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3572 #define GMP_PRIME_MASK 0xc96996dcUL
3575 mpz_probab_prime_p (const mpz_t n
, int reps
)
3584 /* Note that we use the absolute value of n only, for compatibility
3585 with the real GMP. */
3587 return (mpz_cmpabs_ui (n
, 2) == 0) ? 2 : 0;
3589 /* Above test excludes n == 0 */
3590 assert (n
->_mp_size
!= 0);
3592 if (mpz_cmpabs_ui (n
, 64) < 0)
3593 return (GMP_PRIME_MASK
>> (n
->_mp_d
[0] >> 1)) & 2;
3595 if (mpz_gcd_ui (NULL
, n
, GMP_PRIME_PRODUCT
) != 1)
3598 /* All prime factors are >= 31. */
3599 if (mpz_cmpabs_ui (n
, 31*31) < 0)
3605 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3608 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
3609 k
= mpn_scan1 (nm1
->_mp_d
, 0);
3610 mpz_tdiv_q_2exp (q
, nm1
, k
);
3613 mpz_init_set_ui (y
, 2);
3614 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
) && gmp_stronglucas (n
, y
);
3615 reps
-= 24; /* skip the first 24 repetitions */
3617 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3618 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3619 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3620 30 (a[30] == 971 > 31*31 == 961). */
3622 for (j
= 0; is_prime
& (j
< reps
); j
++)
3624 mpz_set_ui (y
, (unsigned long) j
*j
+j
+41);
3625 if (mpz_cmp (y
, nm1
) >= 0)
3627 /* Don't try any further bases. This "early" break does not affect
3628 the result for any reasonable reps value (<=5000 was tested) */
3632 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
);
3642 /* Logical operations and bit manipulation. */
3644 /* Numbers are treated as if represented in two's complement (and
3645 infinitely sign extended). For a negative values we get the two's
3646 complement from -x = ~x + 1, where ~ is bitwise complement.
3655 where yyyy is the bitwise complement of xxxx. So least significant
3656 bits, up to and including the first one bit, are unchanged, and
3657 the more significant bits are all complemented.
3659 To change a bit from zero to one in a negative number, subtract the
3660 corresponding power of two from the absolute value. This can never
3661 underflow. To change a bit from one to zero, add the corresponding
3662 power of two, and this might overflow. E.g., if x = -001111, the
3663 two's complement is 110001. Clearing the least significant bit, we
3664 get two's complement 110000, and -010000. */
3667 mpz_tstbit (const mpz_t d
, mp_bitcnt_t bit_index
)
3669 mp_size_t limb_index
;
3678 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3679 if (limb_index
>= dn
)
3682 shift
= bit_index
% GMP_LIMB_BITS
;
3683 w
= d
->_mp_d
[limb_index
];
3684 bit
= (w
>> shift
) & 1;
3688 /* d < 0. Check if any of the bits below is set: If so, our bit
3689 must be complemented. */
3690 if (shift
> 0 && (mp_limb_t
) (w
<< (GMP_LIMB_BITS
- shift
)) > 0)
3692 while (--limb_index
>= 0)
3693 if (d
->_mp_d
[limb_index
] > 0)
3700 mpz_abs_add_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3702 mp_size_t dn
, limb_index
;
3706 dn
= GMP_ABS (d
->_mp_size
);
3708 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3709 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3711 if (limb_index
>= dn
)
3714 /* The bit should be set outside of the end of the number.
3715 We have to increase the size of the number. */
3716 dp
= MPZ_REALLOC (d
, limb_index
+ 1);
3718 dp
[limb_index
] = bit
;
3719 for (i
= dn
; i
< limb_index
; i
++)
3721 dn
= limb_index
+ 1;
3729 cy
= mpn_add_1 (dp
+ limb_index
, dp
+ limb_index
, dn
- limb_index
, bit
);
3732 dp
= MPZ_REALLOC (d
, dn
+ 1);
3737 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3741 mpz_abs_sub_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3743 mp_size_t dn
, limb_index
;
3747 dn
= GMP_ABS (d
->_mp_size
);
3750 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3751 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3753 assert (limb_index
< dn
);
3755 gmp_assert_nocarry (mpn_sub_1 (dp
+ limb_index
, dp
+ limb_index
,
3756 dn
- limb_index
, bit
));
3757 dn
= mpn_normalized_size (dp
, dn
);
3758 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3762 mpz_setbit (mpz_t d
, mp_bitcnt_t bit_index
)
3764 if (!mpz_tstbit (d
, bit_index
))
3766 if (d
->_mp_size
>= 0)
3767 mpz_abs_add_bit (d
, bit_index
);
3769 mpz_abs_sub_bit (d
, bit_index
);
3774 mpz_clrbit (mpz_t d
, mp_bitcnt_t bit_index
)
3776 if (mpz_tstbit (d
, bit_index
))
3778 if (d
->_mp_size
>= 0)
3779 mpz_abs_sub_bit (d
, bit_index
);
3781 mpz_abs_add_bit (d
, bit_index
);
3786 mpz_combit (mpz_t d
, mp_bitcnt_t bit_index
)
3788 if (mpz_tstbit (d
, bit_index
) ^ (d
->_mp_size
< 0))
3789 mpz_abs_sub_bit (d
, bit_index
);
3791 mpz_abs_add_bit (d
, bit_index
);
3795 mpz_com (mpz_t r
, const mpz_t u
)
3797 mpz_add_ui (r
, u
, 1);
3802 mpz_and (mpz_t r
, const mpz_t u
, const mpz_t v
)
3804 mp_size_t un
, vn
, rn
, i
;
3807 mp_limb_t ux
, vx
, rx
;
3808 mp_limb_t uc
, vc
, rc
;
3809 mp_limb_t ul
, vl
, rl
;
3811 un
= GMP_ABS (u
->_mp_size
);
3812 vn
= GMP_ABS (v
->_mp_size
);
3815 MPZ_SRCPTR_SWAP (u
, v
);
3816 MP_SIZE_T_SWAP (un
, vn
);
3824 uc
= u
->_mp_size
< 0;
3825 vc
= v
->_mp_size
< 0;
3832 /* If the smaller input is positive, higher limbs don't matter. */
3835 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3843 ul
= (up
[i
] ^ ux
) + uc
;
3846 vl
= (vp
[i
] ^ vx
) + vc
;
3849 rl
= ( (ul
& vl
) ^ rx
) + rc
;
3858 ul
= (up
[i
] ^ ux
) + uc
;
3861 rl
= ( (ul
& vx
) ^ rx
) + rc
;
3868 rn
= mpn_normalized_size (rp
, rn
);
3870 r
->_mp_size
= rx
? -rn
: rn
;
3874 mpz_ior (mpz_t r
, const mpz_t u
, const mpz_t v
)
3876 mp_size_t un
, vn
, rn
, i
;
3879 mp_limb_t ux
, vx
, rx
;
3880 mp_limb_t uc
, vc
, rc
;
3881 mp_limb_t ul
, vl
, rl
;
3883 un
= GMP_ABS (u
->_mp_size
);
3884 vn
= GMP_ABS (v
->_mp_size
);
3887 MPZ_SRCPTR_SWAP (u
, v
);
3888 MP_SIZE_T_SWAP (un
, vn
);
3896 uc
= u
->_mp_size
< 0;
3897 vc
= v
->_mp_size
< 0;
3904 /* If the smaller input is negative, by sign extension higher limbs
3908 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3916 ul
= (up
[i
] ^ ux
) + uc
;
3919 vl
= (vp
[i
] ^ vx
) + vc
;
3922 rl
= ( (ul
| vl
) ^ rx
) + rc
;
3931 ul
= (up
[i
] ^ ux
) + uc
;
3934 rl
= ( (ul
| vx
) ^ rx
) + rc
;
3941 rn
= mpn_normalized_size (rp
, rn
);
3943 r
->_mp_size
= rx
? -rn
: rn
;
3947 mpz_xor (mpz_t r
, const mpz_t u
, const mpz_t v
)
3949 mp_size_t un
, vn
, i
;
3952 mp_limb_t ux
, vx
, rx
;
3953 mp_limb_t uc
, vc
, rc
;
3954 mp_limb_t ul
, vl
, rl
;
3956 un
= GMP_ABS (u
->_mp_size
);
3957 vn
= GMP_ABS (v
->_mp_size
);
3960 MPZ_SRCPTR_SWAP (u
, v
);
3961 MP_SIZE_T_SWAP (un
, vn
);
3969 uc
= u
->_mp_size
< 0;
3970 vc
= v
->_mp_size
< 0;
3977 rp
= MPZ_REALLOC (r
, un
+ (mp_size_t
) rc
);
3985 ul
= (up
[i
] ^ ux
) + uc
;
3988 vl
= (vp
[i
] ^ vx
) + vc
;
3991 rl
= (ul
^ vl
^ rx
) + rc
;
4000 ul
= (up
[i
] ^ ux
) + uc
;
4003 rl
= (ul
^ ux
) + rc
;
4010 un
= mpn_normalized_size (rp
, un
);
4012 r
->_mp_size
= rx
? -un
: un
;
4016 gmp_popcount_limb (mp_limb_t x
)
4020 /* Do 16 bits at a time, to avoid limb-sized constants. */
4021 int LOCAL_SHIFT_BITS
= 16;
4024 unsigned w
= x
- ((x
>> 1) & 0x5555);
4025 w
= ((w
>> 2) & 0x3333) + (w
& 0x3333);
4027 w
= ((w
>> 8) & 0x000f) + (w
& 0x000f);
4029 if (GMP_LIMB_BITS
> LOCAL_SHIFT_BITS
)
4030 x
>>= LOCAL_SHIFT_BITS
;
4038 mpn_popcount (mp_srcptr p
, mp_size_t n
)
4043 for (c
= 0, i
= 0; i
< n
; i
++)
4044 c
+= gmp_popcount_limb (p
[i
]);
4050 mpz_popcount (const mpz_t u
)
4057 return ~(mp_bitcnt_t
) 0;
4059 return mpn_popcount (u
->_mp_d
, un
);
4063 mpz_hamdist (const mpz_t u
, const mpz_t v
)
4065 mp_size_t un
, vn
, i
;
4066 mp_limb_t uc
, vc
, ul
, vl
, comp
;
4074 return ~(mp_bitcnt_t
) 0;
4076 comp
= - (uc
= vc
= (un
< 0));
4088 MPN_SRCPTR_SWAP (up
, un
, vp
, vn
);
4090 for (i
= 0, c
= 0; i
< vn
; i
++)
4092 ul
= (up
[i
] ^ comp
) + uc
;
4095 vl
= (vp
[i
] ^ comp
) + vc
;
4098 c
+= gmp_popcount_limb (ul
^ vl
);
4104 ul
= (up
[i
] ^ comp
) + uc
;
4107 c
+= gmp_popcount_limb (ul
^ comp
);
4114 mpz_scan1 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4117 mp_size_t us
, un
, i
;
4122 i
= starting_bit
/ GMP_LIMB_BITS
;
4124 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
4125 for u<0. Notice this test picks up any u==0 too. */
4127 return (us
>= 0 ? ~(mp_bitcnt_t
) 0 : starting_bit
);
4133 if (starting_bit
!= 0)
4137 ux
= mpn_zero_p (up
, i
);
4139 ux
= - (mp_limb_t
) (limb
>= ux
);
4142 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
4143 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4146 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4150 mpz_scan0 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4153 mp_size_t us
, un
, i
;
4157 ux
= - (mp_limb_t
) (us
>= 0);
4159 i
= starting_bit
/ GMP_LIMB_BITS
;
4161 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
4162 u<0. Notice this test picks up all cases of u==0 too. */
4164 return (ux
? starting_bit
: ~(mp_bitcnt_t
) 0);
4170 limb
-= mpn_zero_p (up
, i
); /* limb = ~(~limb + zero_p) */
4172 /* Mask all bits before starting_bit, thus ignoring them. */
4173 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4175 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4179 /* MPZ base conversion. */
4182 mpz_sizeinbase (const mpz_t u
, int base
)
4188 struct gmp_div_inverse bi
;
4192 assert (base
<= 62);
4194 un
= GMP_ABS (u
->_mp_size
);
4200 bits
= (un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1]);
4206 return (bits
+ 1) / 2;
4208 return (bits
+ 2) / 3;
4210 return (bits
+ 3) / 4;
4212 return (bits
+ 4) / 5;
4213 /* FIXME: Do something more clever for the common case of base
4217 tp
= gmp_alloc_limbs (un
);
4218 mpn_copyi (tp
, up
, un
);
4219 mpn_div_qr_1_invert (&bi
, base
);
4226 mpn_div_qr_1_preinv (tp
, tp
, tn
, &bi
);
4227 tn
-= (tp
[tn
-1] == 0);
4231 gmp_free_limbs (tp
, un
);
4236 mpz_get_str (char *sp
, int base
, const mpz_t u
)
4243 digits
= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
4247 digits
= "0123456789abcdefghijklmnopqrstuvwxyz";
4251 else if (base
>= -1)
4260 sn
= 1 + mpz_sizeinbase (u
, base
);
4264 sp
= (char *) gmp_alloc (osn
);
4268 un
= GMP_ABS (u
->_mp_size
);
4279 if (u
->_mp_size
< 0)
4282 bits
= mpn_base_power_of_two_p (base
);
4285 /* Not modified in this case. */
4286 sn
= i
+ mpn_get_str_bits ((unsigned char *) sp
+ i
, bits
, u
->_mp_d
, un
);
4289 struct mpn_base_info info
;
4292 mpn_get_base_info (&info
, base
);
4293 tp
= gmp_alloc_limbs (un
);
4294 mpn_copyi (tp
, u
->_mp_d
, un
);
4296 sn
= i
+ mpn_get_str_other ((unsigned char *) sp
+ i
, base
, &info
, tp
, un
);
4297 gmp_free_limbs (tp
, un
);
4301 sp
[i
] = digits
[(unsigned char) sp
[i
]];
4305 if (osn
&& osn
!= sn
+ 1)
4306 sp
= (char*) gmp_realloc (sp
, osn
, sn
+ 1);
4311 mpz_set_str (mpz_t r
, const char *sp
, int base
)
4313 unsigned bits
, value_of_a
;
4314 mp_size_t rn
, alloc
;
4320 assert (base
== 0 || (base
>= 2 && base
<= 62));
4322 while (isspace( (unsigned char) *sp
))
4325 sign
= (*sp
== '-');
4332 if (sp
[1] == 'x' || sp
[1] == 'X')
4337 else if (sp
[1] == 'b' || sp
[1] == 'B')
4355 dp
= (unsigned char *) gmp_alloc (sn
);
4357 value_of_a
= (base
> 36) ? 36 : 10;
4358 for (dn
= 0; *sp
; sp
++)
4362 if (isspace ((unsigned char) *sp
))
4364 else if (*sp
>= '0' && *sp
<= '9')
4366 else if (*sp
>= 'a' && *sp
<= 'z')
4367 digit
= *sp
- 'a' + value_of_a
;
4368 else if (*sp
>= 'A' && *sp
<= 'Z')
4369 digit
= *sp
- 'A' + 10;
4371 digit
= base
; /* fail */
4373 if (digit
>= (unsigned) base
)
4389 bits
= mpn_base_power_of_two_p (base
);
4393 alloc
= (dn
* bits
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
4394 rp
= MPZ_REALLOC (r
, alloc
);
4395 rn
= mpn_set_str_bits (rp
, dp
, dn
, bits
);
4399 struct mpn_base_info info
;
4400 mpn_get_base_info (&info
, base
);
4401 alloc
= (dn
+ info
.exp
- 1) / info
.exp
;
4402 rp
= MPZ_REALLOC (r
, alloc
);
4403 rn
= mpn_set_str_other (rp
, dp
, dn
, base
, &info
);
4404 /* Normalization, needed for all-zero input. */
4406 rn
-= rp
[rn
-1] == 0;
4408 assert (rn
<= alloc
);
4411 r
->_mp_size
= sign
? - rn
: rn
;
4417 mpz_init_set_str (mpz_t r
, const char *sp
, int base
)
4420 return mpz_set_str (r
, sp
, base
);
4424 mpz_out_str (FILE *stream
, int base
, const mpz_t x
)
4429 str
= mpz_get_str (NULL
, base
, x
);
4433 n
= fwrite (str
, 1, len
, stream
);
4434 gmp_free (str
, len
+ 1);
4440 gmp_detect_endian (void)
4442 static const int i
= 2;
4443 const unsigned char *p
= (const unsigned char *) &i
;
4447 /* Import and export. Does not support nails. */
4449 mpz_import (mpz_t r
, size_t count
, int order
, size_t size
, int endian
,
4450 size_t nails
, const void *src
)
4452 const unsigned char *p
;
4453 ptrdiff_t word_step
;
4457 /* The current (partial) limb. */
4459 /* The number of bytes already copied to this limb (starting from
4462 /* The index where the limb should be stored, when completed. */
4466 gmp_die ("mpz_import: Nails not supported.");
4468 assert (order
== 1 || order
== -1);
4469 assert (endian
>= -1 && endian
<= 1);
4472 endian
= gmp_detect_endian ();
4474 p
= (unsigned char *) src
;
4476 word_step
= (order
!= endian
) ? 2 * size
: 0;
4478 /* Process bytes from the least significant end, so point p at the
4479 least significant word. */
4482 p
+= size
* (count
- 1);
4483 word_step
= - word_step
;
4486 /* And at least significant byte of that word. */
4490 rn
= (size
* count
+ sizeof(mp_limb_t
) - 1) / sizeof(mp_limb_t
);
4491 rp
= MPZ_REALLOC (r
, rn
);
4493 for (limb
= 0, bytes
= 0, i
= 0; count
> 0; count
--, p
+= word_step
)
4496 for (j
= 0; j
< size
; j
++, p
-= (ptrdiff_t) endian
)
4498 limb
|= (mp_limb_t
) *p
<< (bytes
++ * CHAR_BIT
);
4499 if (bytes
== sizeof(mp_limb_t
))
4507 assert (i
+ (bytes
> 0) == rn
);
4511 i
= mpn_normalized_size (rp
, i
);
4517 mpz_export (void *r
, size_t *countp
, int order
, size_t size
, int endian
,
4518 size_t nails
, const mpz_t u
)
4524 gmp_die ("mpz_import: Nails not supported.");
4526 assert (order
== 1 || order
== -1);
4527 assert (endian
>= -1 && endian
<= 1);
4528 assert (size
> 0 || u
->_mp_size
== 0);
4536 ptrdiff_t word_step
;
4537 /* The current (partial) limb. */
4539 /* The number of bytes left to do in this limb. */
4541 /* The index where the limb was read. */
4546 /* Count bytes in top limb. */
4547 limb
= u
->_mp_d
[un
-1];
4550 k
= (GMP_LIMB_BITS
<= CHAR_BIT
);
4554 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4555 k
++; limb
>>= LOCAL_CHAR_BIT
;
4556 } while (limb
!= 0);
4558 /* else limb = 0; */
4560 count
= (k
+ (un
-1) * sizeof (mp_limb_t
) + size
- 1) / size
;
4563 r
= gmp_alloc (count
* size
);
4566 endian
= gmp_detect_endian ();
4568 p
= (unsigned char *) r
;
4570 word_step
= (order
!= endian
) ? 2 * size
: 0;
4572 /* Process bytes from the least significant end, so point p at the
4573 least significant word. */
4576 p
+= size
* (count
- 1);
4577 word_step
= - word_step
;
4580 /* And at least significant byte of that word. */
4584 for (bytes
= 0, i
= 0, k
= 0; k
< count
; k
++, p
+= word_step
)
4587 for (j
= 0; j
< size
; ++j
, p
-= (ptrdiff_t) endian
)
4589 if (sizeof (mp_limb_t
) == 1)
4598 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4602 limb
= u
->_mp_d
[i
++];
4603 bytes
= sizeof (mp_limb_t
);
4606 limb
>>= LOCAL_CHAR_BIT
;
4612 assert (k
== count
);