1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
3 Contributed to the GNU project by Niels Möller
4 Additional functionalities and improvements by Marco Bodrato.
6 Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc.
8 This file is part of the GNU MP Library.
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of either:
13 * the GNU Lesser General Public License as published by the Free
14 Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
19 * the GNU General Public License as published by the Free Software
20 Foundation; either version 2 of the License, or (at your option) any
23 or both in parallel, as here.
25 The GNU MP Library is distributed in the hope that it will be useful, but
26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
27 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
30 You should have received copies of the GNU General Public License and the
31 GNU Lesser General Public License along with the GNU MP Library. If not,
32 see https://www.gnu.org/licenses/. */
34 /* NOTE: All functions in this file which are not declared in
35 mini-gmp.h are internal, and are not intended to be compatible
36 with GMP or with future versions of mini-gmp. */
38 /* Much of the material copied from GMP files, including: gmp-impl.h,
39 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
40 mpn/generic/lshift.c, mpn/generic/mul_1.c,
41 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
42 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
43 mpn/generic/submul_1.c. */
54 #if !defined(MINI_GMP_DONT_USE_FLOAT_H)
60 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
62 #define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
63 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
65 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
66 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
68 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
69 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
71 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
72 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
74 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
75 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
77 #define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
79 #if defined(DBL_MANT_DIG) && FLT_RADIX == 2
80 #define GMP_DBL_MANT_BITS DBL_MANT_DIG
82 #define GMP_DBL_MANT_BITS (53)
85 /* Return non-zero if xp,xsize and yp,ysize overlap.
86 If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
87 overlap. If both these are false, there's an overlap. */
88 #define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \
89 ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
91 #define gmp_assert_nocarry(x) do { \
92 mp_limb_t __cy = (x); \
97 #define gmp_clz(count, x) do { \
98 mp_limb_t __clz_x = (x); \
99 unsigned __clz_c = 0; \
100 int LOCAL_SHIFT_BITS = 8; \
101 if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \
103 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
105 { __clz_x <<= LOCAL_SHIFT_BITS; } \
106 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
111 #define gmp_ctz(count, x) do { \
112 mp_limb_t __ctz_x = (x); \
113 unsigned __ctz_c = 0; \
114 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
115 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
118 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
122 (sh) = (ah) + (bh) + (__x < (al)); \
126 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
130 (sh) = (ah) - (bh) - ((al) < (bl)); \
134 #define gmp_umul_ppmm(w1, w0, u, v) \
136 int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \
137 if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \
139 unsigned int __ww = (unsigned int) (u) * (v); \
140 w0 = (mp_limb_t) __ww; \
141 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
143 else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \
145 unsigned long int __ww = (unsigned long int) (u) * (v); \
146 w0 = (mp_limb_t) __ww; \
147 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
150 mp_limb_t __x0, __x1, __x2, __x3; \
151 unsigned __ul, __vl, __uh, __vh; \
152 mp_limb_t __u = (u), __v = (v); \
153 assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \
155 __ul = __u & GMP_LLIMB_MASK; \
156 __uh = __u >> (GMP_LIMB_BITS / 2); \
157 __vl = __v & GMP_LLIMB_MASK; \
158 __vh = __v >> (GMP_LIMB_BITS / 2); \
160 __x0 = (mp_limb_t) __ul * __vl; \
161 __x1 = (mp_limb_t) __ul * __vh; \
162 __x2 = (mp_limb_t) __uh * __vl; \
163 __x3 = (mp_limb_t) __uh * __vh; \
165 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
166 __x1 += __x2; /* but this indeed can */ \
167 if (__x1 < __x2) /* did we get it? */ \
168 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
170 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
171 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
175 /* If mp_limb_t is of size smaller than int, plain u*v implies
176 automatic promotion to *signed* int, and then multiply may overflow
177 and cause undefined behavior. Explicitly cast to unsigned int for
179 #define gmp_umullo_limb(u, v) \
180 ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v))
182 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
184 mp_limb_t _qh, _ql, _r, _mask; \
185 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
186 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
187 _r = (nl) - gmp_umullo_limb (_qh, (d)); \
188 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
201 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
203 mp_limb_t _q0, _t1, _t0, _mask; \
204 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
205 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
207 /* Compute the two most significant limbs of n - q'd */ \
208 (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \
209 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
210 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
211 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
214 /* Conditionally adjust q and the remainders */ \
215 _mask = - (mp_limb_t) ((r1) >= _q0); \
217 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
220 if ((r1) > (d1) || (r0) >= (d0)) \
223 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
229 #define MP_LIMB_T_SWAP(x, y) \
231 mp_limb_t __mp_limb_t_swap__tmp = (x); \
233 (y) = __mp_limb_t_swap__tmp; \
235 #define MP_SIZE_T_SWAP(x, y) \
237 mp_size_t __mp_size_t_swap__tmp = (x); \
239 (y) = __mp_size_t_swap__tmp; \
241 #define MP_BITCNT_T_SWAP(x,y) \
243 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
245 (y) = __mp_bitcnt_t_swap__tmp; \
247 #define MP_PTR_SWAP(x, y) \
249 mp_ptr __mp_ptr_swap__tmp = (x); \
251 (y) = __mp_ptr_swap__tmp; \
253 #define MP_SRCPTR_SWAP(x, y) \
255 mp_srcptr __mp_srcptr_swap__tmp = (x); \
257 (y) = __mp_srcptr_swap__tmp; \
260 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
262 MP_PTR_SWAP (xp, yp); \
263 MP_SIZE_T_SWAP (xs, ys); \
265 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
267 MP_SRCPTR_SWAP (xp, yp); \
268 MP_SIZE_T_SWAP (xs, ys); \
271 #define MPZ_PTR_SWAP(x, y) \
273 mpz_ptr __mpz_ptr_swap__tmp = (x); \
275 (y) = __mpz_ptr_swap__tmp; \
277 #define MPZ_SRCPTR_SWAP(x, y) \
279 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
281 (y) = __mpz_srcptr_swap__tmp; \
284 const int mp_bits_per_limb
= GMP_LIMB_BITS
;
287 /* Memory allocation and other helper functions. */
289 gmp_die (const char *msg
)
291 fprintf (stderr
, "%s\n", msg
);
296 gmp_default_alloc (size_t size
)
304 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
310 gmp_default_realloc (void *old
, size_t unused_old_size
, size_t new_size
)
314 p
= realloc (old
, new_size
);
317 gmp_die("gmp_default_realloc: Virtual memory exhausted.");
323 gmp_default_free (void *p
, size_t unused_size
)
328 static void * (*gmp_allocate_func
) (size_t) = gmp_default_alloc
;
329 static void * (*gmp_reallocate_func
) (void *, size_t, size_t) = gmp_default_realloc
;
330 static void (*gmp_free_func
) (void *, size_t) = gmp_default_free
;
333 mp_get_memory_functions (void *(**alloc_func
) (size_t),
334 void *(**realloc_func
) (void *, size_t, size_t),
335 void (**free_func
) (void *, size_t))
338 *alloc_func
= gmp_allocate_func
;
341 *realloc_func
= gmp_reallocate_func
;
344 *free_func
= gmp_free_func
;
348 mp_set_memory_functions (void *(*alloc_func
) (size_t),
349 void *(*realloc_func
) (void *, size_t, size_t),
350 void (*free_func
) (void *, size_t))
353 alloc_func
= gmp_default_alloc
;
355 realloc_func
= gmp_default_realloc
;
357 free_func
= gmp_default_free
;
359 gmp_allocate_func
= alloc_func
;
360 gmp_reallocate_func
= realloc_func
;
361 gmp_free_func
= free_func
;
364 #define gmp_alloc(size) ((*gmp_allocate_func)((size)))
365 #define gmp_free(p, size) ((*gmp_free_func) ((p), (size)))
366 #define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size))
369 gmp_alloc_limbs (mp_size_t size
)
371 return (mp_ptr
) gmp_alloc (size
* sizeof (mp_limb_t
));
375 gmp_realloc_limbs (mp_ptr old
, mp_size_t old_size
, mp_size_t size
)
378 return (mp_ptr
) gmp_realloc (old
, old_size
* sizeof (mp_limb_t
), size
* sizeof (mp_limb_t
));
382 gmp_free_limbs (mp_ptr old
, mp_size_t size
)
384 gmp_free (old
, size
* sizeof (mp_limb_t
));
391 mpn_copyi (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
394 for (i
= 0; i
< n
; i
++)
399 mpn_copyd (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
406 mpn_cmp (mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
411 return ap
[n
] > bp
[n
] ? 1 : -1;
417 mpn_cmp4 (mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
420 return an
< bn
? -1 : 1;
422 return mpn_cmp (ap
, bp
, an
);
426 mpn_normalized_size (mp_srcptr xp
, mp_size_t n
)
428 while (n
> 0 && xp
[n
-1] == 0)
434 mpn_zero_p(mp_srcptr rp
, mp_size_t n
)
436 return mpn_normalized_size (rp
, n
) == 0;
440 mpn_zero (mp_ptr rp
, mp_size_t n
)
447 mpn_add_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
455 mp_limb_t r
= ap
[i
] + b
;
466 mpn_add_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
471 for (i
= 0, cy
= 0; i
< n
; i
++)
474 a
= ap
[i
]; b
= bp
[i
];
485 mpn_add (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
491 cy
= mpn_add_n (rp
, ap
, bp
, bn
);
493 cy
= mpn_add_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
498 mpn_sub_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
509 mp_limb_t cy
= a
< b
;
519 mpn_sub_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
524 for (i
= 0, cy
= 0; i
< n
; i
++)
527 a
= ap
[i
]; b
= bp
[i
];
537 mpn_sub (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
543 cy
= mpn_sub_n (rp
, ap
, bp
, bn
);
545 cy
= mpn_sub_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
550 mpn_mul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
552 mp_limb_t ul
, cl
, hpl
, lpl
;
560 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
563 cl
= (lpl
< cl
) + hpl
;
573 mpn_addmul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
575 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
583 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
586 cl
= (lpl
< cl
) + hpl
;
599 mpn_submul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
601 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
609 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
612 cl
= (lpl
< cl
) + hpl
;
625 mpn_mul (mp_ptr rp
, mp_srcptr up
, mp_size_t un
, mp_srcptr vp
, mp_size_t vn
)
629 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, up
, un
));
630 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, vp
, vn
));
632 /* We first multiply by the low order limb. This result can be
633 stored, not added, to rp. We also avoid a loop for zeroing this
636 rp
[un
] = mpn_mul_1 (rp
, up
, un
, vp
[0]);
638 /* Now accumulate the product of up[] and the next higher limb from
644 rp
[un
] = mpn_addmul_1 (rp
, up
, un
, vp
[0]);
650 mpn_mul_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
652 mpn_mul (rp
, ap
, n
, bp
, n
);
656 mpn_sqr (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
)
658 mpn_mul (rp
, ap
, n
, ap
, n
);
662 mpn_lshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
664 mp_limb_t high_limb
, low_limb
;
670 assert (cnt
< GMP_LIMB_BITS
);
675 tnc
= GMP_LIMB_BITS
- cnt
;
677 retval
= low_limb
>> tnc
;
678 high_limb
= (low_limb
<< cnt
);
683 *--rp
= high_limb
| (low_limb
>> tnc
);
684 high_limb
= (low_limb
<< cnt
);
692 mpn_rshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
694 mp_limb_t high_limb
, low_limb
;
700 assert (cnt
< GMP_LIMB_BITS
);
702 tnc
= GMP_LIMB_BITS
- cnt
;
704 retval
= (high_limb
<< tnc
);
705 low_limb
= high_limb
>> cnt
;
710 *rp
++ = low_limb
| (high_limb
<< tnc
);
711 low_limb
= high_limb
>> cnt
;
719 mpn_common_scan (mp_limb_t limb
, mp_size_t i
, mp_srcptr up
, mp_size_t un
,
724 assert (ux
== 0 || ux
== GMP_LIMB_MAX
);
725 assert (0 <= i
&& i
<= un
);
731 return (ux
== 0 ? ~(mp_bitcnt_t
) 0 : un
* GMP_LIMB_BITS
);
735 return (mp_bitcnt_t
) i
* GMP_LIMB_BITS
+ cnt
;
739 mpn_scan1 (mp_srcptr ptr
, mp_bitcnt_t bit
)
742 i
= bit
/ GMP_LIMB_BITS
;
744 return mpn_common_scan ( ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
749 mpn_scan0 (mp_srcptr ptr
, mp_bitcnt_t bit
)
752 i
= bit
/ GMP_LIMB_BITS
;
754 return mpn_common_scan (~ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
755 i
, ptr
, i
, GMP_LIMB_MAX
);
759 mpn_com (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
766 mpn_neg (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
776 mpn_com (++rp
, ++up
, --n
);
781 /* MPN division interface. */
783 /* The 3/2 inverse is defined as
785 m = floor( (B^3-1) / (B u1 + u0)) - B
788 mpn_invert_3by2 (mp_limb_t u1
, mp_limb_t u0
)
796 assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t
));
797 /* For notation, let b denote the half-limb base, so that B = b^2.
798 Split u1 = b uh + ul. */
799 ul
= u1
& GMP_LLIMB_MASK
;
800 uh
= u1
>> (GMP_LIMB_BITS
/ 2);
802 /* Approximation of the high half of quotient. Differs from the 2/1
803 inverse of the half limb uh, since we have already subtracted
805 qh
= (u1
^ GMP_LIMB_MAX
) / uh
;
807 /* Adjust to get a half-limb 3/2 inverse, i.e., we want
809 qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
810 = floor( (b (~u) + b-1) / u),
814 r = b (~u) + b-1 - qh (b uh + ul)
815 = b (~u - qh uh) + b-1 - qh ul
817 Subtraction of qh ul may underflow, which implies adjustments.
818 But by normalization, 2 u >= B > qh ul, so we need to adjust by
822 r
= ((~u1
- (mp_limb_t
) qh
* uh
) << (GMP_LIMB_BITS
/ 2)) | GMP_LLIMB_MASK
;
824 p
= (mp_limb_t
) qh
* ul
;
825 /* Adjustment steps taken from udiv_qrnnd_c */
830 if (r
>= u1
) /* i.e. we didn't get carry when adding to r */
839 /* Low half of the quotient is
841 ql = floor ( (b r + b-1) / u1).
843 This is a 3/2 division (on half-limbs), for which qh is a
846 p
= (r
>> (GMP_LIMB_BITS
/ 2)) * qh
+ r
;
847 /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
848 work, it is essential that ql is a full mp_limb_t. */
849 ql
= (p
>> (GMP_LIMB_BITS
/ 2)) + 1;
851 /* By the 3/2 trick, we don't need the high half limb. */
852 r
= (r
<< (GMP_LIMB_BITS
/ 2)) + GMP_LLIMB_MASK
- ql
* u1
;
854 if (r
>= (GMP_LIMB_MAX
& (p
<< (GMP_LIMB_BITS
/ 2))))
859 m
= ((mp_limb_t
) qh
<< (GMP_LIMB_BITS
/ 2)) + ql
;
867 /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
884 gmp_umul_ppmm (th
, tl
, u0
, m
);
889 m
-= ((r
> u1
) | ((r
== u1
) & (tl
> u0
)));
896 struct gmp_div_inverse
898 /* Normalization shift count. */
900 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
902 /* Inverse, for 2/1 or 3/2. */
907 mpn_div_qr_1_invert (struct gmp_div_inverse
*inv
, mp_limb_t d
)
914 inv
->d1
= d
<< shift
;
915 inv
->di
= mpn_invert_limb (inv
->d1
);
919 mpn_div_qr_2_invert (struct gmp_div_inverse
*inv
,
920 mp_limb_t d1
, mp_limb_t d0
)
929 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
934 inv
->di
= mpn_invert_3by2 (d1
, d0
);
938 mpn_div_qr_invert (struct gmp_div_inverse
*inv
,
939 mp_srcptr dp
, mp_size_t dn
)
944 mpn_div_qr_1_invert (inv
, dp
[0]);
946 mpn_div_qr_2_invert (inv
, dp
[1], dp
[0]);
959 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
960 d0
= (d0
<< shift
) | (dp
[dn
-3] >> (GMP_LIMB_BITS
- shift
));
964 inv
->di
= mpn_invert_3by2 (d1
, d0
);
968 /* Not matching current public gmp interface, rather corresponding to
969 the sbpi1_div_* functions. */
971 mpn_div_qr_1_preinv (mp_ptr qp
, mp_srcptr np
, mp_size_t nn
,
972 const struct gmp_div_inverse
*inv
)
981 /* Shift, reusing qp area if possible. In-place shift if qp == np. */
986 tp
= gmp_alloc_limbs (tn
);
988 r
= mpn_lshift (tp
, np
, nn
, inv
->shift
);
1000 gmp_udiv_qrnnd_preinv (q
, r
, r
, np
[nn
], d
, di
);
1005 gmp_free_limbs (tp
, tn
);
1007 return r
>> inv
->shift
;
1011 mpn_div_qr_2_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1012 const struct gmp_div_inverse
*inv
)
1016 mp_limb_t d1
, d0
, di
, r1
, r0
;
1025 r1
= mpn_lshift (np
, np
, nn
, shift
);
1036 gmp_udiv_qr_3by2 (q
, r1
, r0
, r1
, r0
, n0
, d1
, d0
, di
);
1045 assert ((r0
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- shift
))) == 0);
1046 r0
= (r0
>> shift
) | (r1
<< (GMP_LIMB_BITS
- shift
));
1055 mpn_div_qr_pi1 (mp_ptr qp
,
1056 mp_ptr np
, mp_size_t nn
, mp_limb_t n1
,
1057 mp_srcptr dp
, mp_size_t dn
,
1072 assert ((d1
& GMP_LIMB_HIGHBIT
) != 0);
1073 /* Iteration variable is the index of the q limb.
1075 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1076 * by <d1, d0, dp[dn-3], ..., dp[0] >
1082 mp_limb_t n0
= np
[dn
-1+i
];
1084 if (n1
== d1
&& n0
== d0
)
1087 mpn_submul_1 (np
+i
, dp
, dn
, q
);
1088 n1
= np
[dn
-1+i
]; /* update n1, last loop's value will now be invalid */
1092 gmp_udiv_qr_3by2 (q
, n1
, n0
, n1
, n0
, np
[dn
-2+i
], d1
, d0
, dinv
);
1094 cy
= mpn_submul_1 (np
+ i
, dp
, dn
-2, q
);
1104 n1
+= d1
+ mpn_add_n (np
+ i
, np
+ i
, dp
, dn
- 1);
1118 mpn_div_qr_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1119 mp_srcptr dp
, mp_size_t dn
,
1120 const struct gmp_div_inverse
*inv
)
1126 np
[0] = mpn_div_qr_1_preinv (qp
, np
, nn
, inv
);
1128 mpn_div_qr_2_preinv (qp
, np
, nn
, inv
);
1134 assert (inv
->d1
== dp
[dn
-1]);
1135 assert (inv
->d0
== dp
[dn
-2]);
1136 assert ((inv
->d1
& GMP_LIMB_HIGHBIT
) != 0);
1140 nh
= mpn_lshift (np
, np
, nn
, shift
);
1144 mpn_div_qr_pi1 (qp
, np
, nn
, nh
, dp
, dn
, inv
->di
);
1147 gmp_assert_nocarry (mpn_rshift (np
, np
, dn
, shift
));
1152 mpn_div_qr (mp_ptr qp
, mp_ptr np
, mp_size_t nn
, mp_srcptr dp
, mp_size_t dn
)
1154 struct gmp_div_inverse inv
;
1160 mpn_div_qr_invert (&inv
, dp
, dn
);
1161 if (dn
> 2 && inv
.shift
> 0)
1163 tp
= gmp_alloc_limbs (dn
);
1164 gmp_assert_nocarry (mpn_lshift (tp
, dp
, dn
, inv
.shift
));
1167 mpn_div_qr_preinv (qp
, np
, nn
, dp
, dn
, &inv
);
1169 gmp_free_limbs (tp
, dn
);
1173 /* MPN base conversion. */
1175 mpn_base_power_of_two_p (unsigned b
)
1191 struct mpn_base_info
1193 /* bb is the largest power of the base which fits in one limb, and
1194 exp is the corresponding exponent. */
1200 mpn_get_base_info (struct mpn_base_info
*info
, mp_limb_t b
)
1206 m
= GMP_LIMB_MAX
/ b
;
1207 for (exp
= 1, p
= b
; p
<= m
; exp
++)
1215 mpn_limb_size_in_base_2 (mp_limb_t u
)
1221 return GMP_LIMB_BITS
- shift
;
1225 mpn_get_str_bits (unsigned char *sp
, unsigned bits
, mp_srcptr up
, mp_size_t un
)
1232 sn
= ((un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1])
1235 mask
= (1U << bits
) - 1;
1237 for (i
= 0, j
= sn
, shift
= 0; j
-- > 0;)
1239 unsigned char digit
= up
[i
] >> shift
;
1243 if (shift
>= GMP_LIMB_BITS
&& ++i
< un
)
1245 shift
-= GMP_LIMB_BITS
;
1246 digit
|= up
[i
] << (bits
- shift
);
1248 sp
[j
] = digit
& mask
;
1253 /* We generate digits from the least significant end, and reverse at
1256 mpn_limb_get_str (unsigned char *sp
, mp_limb_t w
,
1257 const struct gmp_div_inverse
*binv
)
1260 for (i
= 0; w
> 0; i
++)
1264 h
= w
>> (GMP_LIMB_BITS
- binv
->shift
);
1265 l
= w
<< binv
->shift
;
1267 gmp_udiv_qrnnd_preinv (w
, r
, h
, l
, binv
->d1
, binv
->di
);
1268 assert ((r
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- binv
->shift
))) == 0);
1277 mpn_get_str_other (unsigned char *sp
,
1278 int base
, const struct mpn_base_info
*info
,
1279 mp_ptr up
, mp_size_t un
)
1281 struct gmp_div_inverse binv
;
1285 mpn_div_qr_1_invert (&binv
, base
);
1291 struct gmp_div_inverse bbinv
;
1292 mpn_div_qr_1_invert (&bbinv
, info
->bb
);
1298 w
= mpn_div_qr_1_preinv (up
, up
, un
, &bbinv
);
1299 un
-= (up
[un
-1] == 0);
1300 done
= mpn_limb_get_str (sp
+ sn
, w
, &binv
);
1302 for (sn
+= done
; done
< info
->exp
; done
++)
1307 sn
+= mpn_limb_get_str (sp
+ sn
, up
[0], &binv
);
1310 for (i
= 0; 2*i
+ 1 < sn
; i
++)
1312 unsigned char t
= sp
[i
];
1313 sp
[i
] = sp
[sn
- i
- 1];
1321 mpn_get_str (unsigned char *sp
, int base
, mp_ptr up
, mp_size_t un
)
1326 assert (up
[un
-1] > 0);
1328 bits
= mpn_base_power_of_two_p (base
);
1330 return mpn_get_str_bits (sp
, bits
, up
, un
);
1333 struct mpn_base_info info
;
1335 mpn_get_base_info (&info
, base
);
1336 return mpn_get_str_other (sp
, base
, &info
, up
, un
);
1341 mpn_set_str_bits (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1348 for (limb
= 0, rn
= 0, shift
= 0; sn
-- > 0; )
1350 limb
|= (mp_limb_t
) sp
[sn
] << shift
;
1352 if (shift
>= GMP_LIMB_BITS
)
1354 shift
-= GMP_LIMB_BITS
;
1356 /* Next line is correct also if shift == 0,
1357 bits == 8, and mp_limb_t == unsigned char. */
1358 limb
= (unsigned int) sp
[sn
] >> (bits
- shift
);
1364 rn
= mpn_normalized_size (rp
, rn
);
1368 /* Result is usually normalized, except for all-zero input, in which
1369 case a single zero limb is written at *RP, and 1 is returned. */
1371 mpn_set_str_other (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1372 mp_limb_t b
, const struct mpn_base_info
*info
)
1381 k
= 1 + (sn
- 1) % info
->exp
;
1386 w
= w
* b
+ sp
[j
++];
1390 for (rn
= 1; j
< sn
;)
1395 for (k
= 1; k
< info
->exp
; k
++)
1396 w
= w
* b
+ sp
[j
++];
1398 cy
= mpn_mul_1 (rp
, rp
, rn
, info
->bb
);
1399 cy
+= mpn_add_1 (rp
, rp
, rn
, w
);
1409 mpn_set_str (mp_ptr rp
, const unsigned char *sp
, size_t sn
, int base
)
1416 bits
= mpn_base_power_of_two_p (base
);
1418 return mpn_set_str_bits (rp
, sp
, sn
, bits
);
1421 struct mpn_base_info info
;
1423 mpn_get_base_info (&info
, base
);
1424 return mpn_set_str_other (rp
, sp
, sn
, base
, &info
);
1433 static const mp_limb_t dummy_limb
= GMP_LIMB_MAX
& 0xc1a0;
1437 r
->_mp_d
= (mp_ptr
) &dummy_limb
;
1440 /* The utility of this function is a bit limited, since many functions
1441 assigns the result variable using mpz_swap. */
1443 mpz_init2 (mpz_t r
, mp_bitcnt_t bits
)
1447 bits
-= (bits
!= 0); /* Round down, except if 0 */
1448 rn
= 1 + bits
/ GMP_LIMB_BITS
;
1452 r
->_mp_d
= gmp_alloc_limbs (rn
);
1459 gmp_free_limbs (r
->_mp_d
, r
->_mp_alloc
);
1463 mpz_realloc (mpz_t r
, mp_size_t size
)
1465 size
= GMP_MAX (size
, 1);
1468 r
->_mp_d
= gmp_realloc_limbs (r
->_mp_d
, r
->_mp_alloc
, size
);
1470 r
->_mp_d
= gmp_alloc_limbs (size
);
1471 r
->_mp_alloc
= size
;
1473 if (GMP_ABS (r
->_mp_size
) > size
)
1479 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1480 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1481 ? mpz_realloc(z,n) \
1484 /* MPZ assignment and basic conversions. */
1486 mpz_set_si (mpz_t r
, signed long int x
)
1491 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1493 mpz_set_ui (r
, GMP_NEG_CAST (unsigned long int, x
));
1499 MPZ_REALLOC (r
, 1)[0] = GMP_NEG_CAST (unsigned long int, x
);
1504 mpz_set_ui (mpz_t r
, unsigned long int x
)
1509 MPZ_REALLOC (r
, 1)[0] = x
;
1510 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1512 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1513 while (x
>>= LOCAL_GMP_LIMB_BITS
)
1516 MPZ_REALLOC (r
, r
->_mp_size
)[r
->_mp_size
- 1] = x
;
1525 mpz_set (mpz_t r
, const mpz_t x
)
1527 /* Allow the NOP r == x */
1533 n
= GMP_ABS (x
->_mp_size
);
1534 rp
= MPZ_REALLOC (r
, n
);
1536 mpn_copyi (rp
, x
->_mp_d
, n
);
1537 r
->_mp_size
= x
->_mp_size
;
1542 mpz_init_set_si (mpz_t r
, signed long int x
)
1549 mpz_init_set_ui (mpz_t r
, unsigned long int x
)
1556 mpz_init_set (mpz_t r
, const mpz_t x
)
1563 mpz_fits_slong_p (const mpz_t u
)
1565 return mpz_cmp_si (u
, LONG_MAX
) <= 0 && mpz_cmp_si (u
, LONG_MIN
) >= 0;
1569 mpn_absfits_ulong_p (mp_srcptr up
, mp_size_t un
)
1571 int ulongsize
= GMP_ULONG_BITS
/ GMP_LIMB_BITS
;
1572 mp_limb_t ulongrem
= 0;
1574 if (GMP_ULONG_BITS
% GMP_LIMB_BITS
!= 0)
1575 ulongrem
= (mp_limb_t
) (ULONG_MAX
>> GMP_LIMB_BITS
* ulongsize
) + 1;
1577 return un
<= ulongsize
|| (up
[ulongsize
] < ulongrem
&& un
== ulongsize
+ 1);
1581 mpz_fits_ulong_p (const mpz_t u
)
1583 mp_size_t us
= u
->_mp_size
;
1585 return us
>= 0 && mpn_absfits_ulong_p (u
->_mp_d
, us
);
1589 mpz_fits_sint_p (const mpz_t u
)
1591 return mpz_cmp_si (u
, INT_MAX
) <= 0 && mpz_cmp_si (u
, INT_MIN
) >= 0;
1595 mpz_fits_uint_p (const mpz_t u
)
1597 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, UINT_MAX
) <= 0;
1601 mpz_fits_sshort_p (const mpz_t u
)
1603 return mpz_cmp_si (u
, SHRT_MAX
) <= 0 && mpz_cmp_si (u
, SHRT_MIN
) >= 0;
1607 mpz_fits_ushort_p (const mpz_t u
)
1609 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, USHRT_MAX
) <= 0;
1613 mpz_get_si (const mpz_t u
)
1615 unsigned long r
= mpz_get_ui (u
);
1616 unsigned long c
= -LONG_MAX
- LONG_MIN
;
1618 if (u
->_mp_size
< 0)
1619 /* This expression is necessary to properly handle -LONG_MIN */
1620 return -(long) c
- (long) ((r
- c
) & LONG_MAX
);
1622 return (long) (r
& LONG_MAX
);
1626 mpz_get_ui (const mpz_t u
)
1628 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1630 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1631 unsigned long r
= 0;
1632 mp_size_t n
= GMP_ABS (u
->_mp_size
);
1633 n
= GMP_MIN (n
, 1 + (mp_size_t
) (GMP_ULONG_BITS
- 1) / GMP_LIMB_BITS
);
1635 r
= (r
<< LOCAL_GMP_LIMB_BITS
) + u
->_mp_d
[n
];
1639 return u
->_mp_size
== 0 ? 0 : u
->_mp_d
[0];
1643 mpz_size (const mpz_t u
)
1645 return GMP_ABS (u
->_mp_size
);
1649 mpz_getlimbn (const mpz_t u
, mp_size_t n
)
1651 if (n
>= 0 && n
< GMP_ABS (u
->_mp_size
))
1658 mpz_realloc2 (mpz_t x
, mp_bitcnt_t n
)
1660 mpz_realloc (x
, 1 + (n
- (n
!= 0)) / GMP_LIMB_BITS
);
1664 mpz_limbs_read (mpz_srcptr x
)
1670 mpz_limbs_modify (mpz_t x
, mp_size_t n
)
1673 return MPZ_REALLOC (x
, n
);
1677 mpz_limbs_write (mpz_t x
, mp_size_t n
)
1679 return mpz_limbs_modify (x
, n
);
1683 mpz_limbs_finish (mpz_t x
, mp_size_t xs
)
1686 xn
= mpn_normalized_size (x
->_mp_d
, GMP_ABS (xs
));
1687 x
->_mp_size
= xs
< 0 ? -xn
: xn
;
1691 mpz_roinit_normal_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1694 x
->_mp_d
= (mp_ptr
) xp
;
1700 mpz_roinit_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1702 mpz_roinit_normal_n (x
, xp
, xs
);
1703 mpz_limbs_finish (x
, xs
);
1708 /* Conversions and comparison to double. */
1710 mpz_set_d (mpz_t r
, double x
)
1719 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1720 zero or infinity. */
1721 if (x
!= x
|| x
== x
* 0.5)
1736 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1738 for (rn
= 1; x
>= B
; rn
++)
1741 rp
= MPZ_REALLOC (r
, rn
);
1757 r
->_mp_size
= sign
? - rn
: rn
;
1761 mpz_init_set_d (mpz_t r
, double x
)
1768 mpz_get_d (const mpz_t u
)
1774 double B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1776 un
= GMP_ABS (u
->_mp_size
);
1783 m
= m
+ GMP_DBL_MANT_BITS
- GMP_LIMB_BITS
;
1785 l
&= GMP_LIMB_MAX
<< -m
;
1787 for (x
= l
; --un
>= 0;)
1794 l
&= GMP_LIMB_MAX
<< -m
;
1799 if (u
->_mp_size
< 0)
1806 mpz_cmpabs_d (const mpz_t x
, double d
)
1819 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1822 /* Scale d so it can be compared with the top limb. */
1823 for (i
= 1; i
< xn
; i
++)
1829 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1830 for (i
= xn
; i
-- > 0;)
1847 mpz_cmp_d (const mpz_t x
, double d
)
1849 if (x
->_mp_size
< 0)
1854 return -mpz_cmpabs_d (x
, d
);
1861 return mpz_cmpabs_d (x
, d
);
1866 /* MPZ comparisons and the like. */
1868 mpz_sgn (const mpz_t u
)
1870 return GMP_CMP (u
->_mp_size
, 0);
1874 mpz_cmp_si (const mpz_t u
, long v
)
1876 mp_size_t usize
= u
->_mp_size
;
1879 return mpz_cmp_ui (u
, v
);
1880 else if (usize
>= 0)
1883 return - mpz_cmpabs_ui (u
, GMP_NEG_CAST (unsigned long int, v
));
1887 mpz_cmp_ui (const mpz_t u
, unsigned long v
)
1889 mp_size_t usize
= u
->_mp_size
;
1894 return mpz_cmpabs_ui (u
, v
);
1898 mpz_cmp (const mpz_t a
, const mpz_t b
)
1900 mp_size_t asize
= a
->_mp_size
;
1901 mp_size_t bsize
= b
->_mp_size
;
1904 return (asize
< bsize
) ? -1 : 1;
1905 else if (asize
>= 0)
1906 return mpn_cmp (a
->_mp_d
, b
->_mp_d
, asize
);
1908 return mpn_cmp (b
->_mp_d
, a
->_mp_d
, -asize
);
1912 mpz_cmpabs_ui (const mpz_t u
, unsigned long v
)
1914 mp_size_t un
= GMP_ABS (u
->_mp_size
);
1916 if (! mpn_absfits_ulong_p (u
->_mp_d
, un
))
1920 unsigned long uu
= mpz_get_ui (u
);
1921 return GMP_CMP(uu
, v
);
1926 mpz_cmpabs (const mpz_t u
, const mpz_t v
)
1928 return mpn_cmp4 (u
->_mp_d
, GMP_ABS (u
->_mp_size
),
1929 v
->_mp_d
, GMP_ABS (v
->_mp_size
));
1933 mpz_abs (mpz_t r
, const mpz_t u
)
1936 r
->_mp_size
= GMP_ABS (r
->_mp_size
);
1940 mpz_neg (mpz_t r
, const mpz_t u
)
1943 r
->_mp_size
= -r
->_mp_size
;
1947 mpz_swap (mpz_t u
, mpz_t v
)
1949 MP_SIZE_T_SWAP (u
->_mp_alloc
, v
->_mp_alloc
);
1950 MPN_PTR_SWAP (u
->_mp_d
, u
->_mp_size
, v
->_mp_d
, v
->_mp_size
);
1954 /* MPZ addition and subtraction */
1958 mpz_add_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1961 mpz_init_set_ui (bb
, b
);
1967 mpz_sub_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1969 mpz_ui_sub (r
, b
, a
);
1974 mpz_ui_sub (mpz_t r
, unsigned long a
, const mpz_t b
)
1977 mpz_add_ui (r
, r
, a
);
1981 mpz_abs_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
1983 mp_size_t an
= GMP_ABS (a
->_mp_size
);
1984 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
1990 MPZ_SRCPTR_SWAP (a
, b
);
1991 MP_SIZE_T_SWAP (an
, bn
);
1994 rp
= MPZ_REALLOC (r
, an
+ 1);
1995 cy
= mpn_add (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
);
2003 mpz_abs_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
2005 mp_size_t an
= GMP_ABS (a
->_mp_size
);
2006 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
2010 cmp
= mpn_cmp4 (a
->_mp_d
, an
, b
->_mp_d
, bn
);
2013 rp
= MPZ_REALLOC (r
, an
);
2014 gmp_assert_nocarry (mpn_sub (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
));
2015 return mpn_normalized_size (rp
, an
);
2019 rp
= MPZ_REALLOC (r
, bn
);
2020 gmp_assert_nocarry (mpn_sub (rp
, b
->_mp_d
, bn
, a
->_mp_d
, an
));
2021 return -mpn_normalized_size (rp
, bn
);
2028 mpz_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
2032 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2033 rn
= mpz_abs_add (r
, a
, b
);
2035 rn
= mpz_abs_sub (r
, a
, b
);
2037 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2041 mpz_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
2045 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2046 rn
= mpz_abs_sub (r
, a
, b
);
2048 rn
= mpz_abs_add (r
, a
, b
);
2050 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2054 /* MPZ multiplication */
2056 mpz_mul_si (mpz_t r
, const mpz_t u
, long int v
)
2060 mpz_mul_ui (r
, u
, GMP_NEG_CAST (unsigned long int, v
));
2064 mpz_mul_ui (r
, u
, v
);
2068 mpz_mul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2071 mpz_init_set_ui (vv
, v
);
2078 mpz_mul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2081 mp_size_t un
, vn
, rn
;
2088 if (un
== 0 || vn
== 0)
2094 sign
= (un
^ vn
) < 0;
2099 mpz_init2 (t
, (un
+ vn
) * GMP_LIMB_BITS
);
2103 mpn_mul (tp
, u
->_mp_d
, un
, v
->_mp_d
, vn
);
2105 mpn_mul (tp
, v
->_mp_d
, vn
, u
->_mp_d
, un
);
2108 rn
-= tp
[rn
-1] == 0;
2110 t
->_mp_size
= sign
? - rn
: rn
;
2116 mpz_mul_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bits
)
2123 un
= GMP_ABS (u
->_mp_size
);
2130 limbs
= bits
/ GMP_LIMB_BITS
;
2131 shift
= bits
% GMP_LIMB_BITS
;
2133 rn
= un
+ limbs
+ (shift
> 0);
2134 rp
= MPZ_REALLOC (r
, rn
);
2137 mp_limb_t cy
= mpn_lshift (rp
+ limbs
, u
->_mp_d
, un
, shift
);
2142 mpn_copyd (rp
+ limbs
, u
->_mp_d
, un
);
2144 mpn_zero (rp
, limbs
);
2146 r
->_mp_size
= (u
->_mp_size
< 0) ? - rn
: rn
;
2150 mpz_addmul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2153 mpz_init_set_ui (t
, v
);
2160 mpz_submul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2163 mpz_init_set_ui (t
, v
);
2170 mpz_addmul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2180 mpz_submul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2191 enum mpz_div_round_mode
{ GMP_DIV_FLOOR
, GMP_DIV_CEIL
, GMP_DIV_TRUNC
};
2193 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2195 mpz_div_qr (mpz_t q
, mpz_t r
,
2196 const mpz_t n
, const mpz_t d
, enum mpz_div_round_mode mode
)
2198 mp_size_t ns
, ds
, nn
, dn
, qs
;
2203 gmp_die("mpz_div_qr: Divide by zero.");
2221 if (mode
== GMP_DIV_CEIL
&& qs
>= 0)
2223 /* q = 1, r = n - d */
2229 else if (mode
== GMP_DIV_FLOOR
&& qs
< 0)
2231 /* q = -1, r = n + d */
2253 mpz_init_set (tr
, n
);
2260 mpz_init2 (tq
, qn
* GMP_LIMB_BITS
);
2266 mpn_div_qr (qp
, np
, nn
, d
->_mp_d
, dn
);
2270 qn
-= (qp
[qn
-1] == 0);
2272 tq
->_mp_size
= qs
< 0 ? -qn
: qn
;
2274 rn
= mpn_normalized_size (np
, dn
);
2275 tr
->_mp_size
= ns
< 0 ? - rn
: rn
;
2277 if (mode
== GMP_DIV_FLOOR
&& qs
< 0 && rn
!= 0)
2280 mpz_sub_ui (tq
, tq
, 1);
2282 mpz_add (tr
, tr
, d
);
2284 else if (mode
== GMP_DIV_CEIL
&& qs
>= 0 && rn
!= 0)
2287 mpz_add_ui (tq
, tq
, 1);
2289 mpz_sub (tr
, tr
, d
);
2307 mpz_cdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2309 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_CEIL
);
2313 mpz_fdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2315 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2319 mpz_tdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2321 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2325 mpz_cdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2327 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2331 mpz_fdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2333 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2337 mpz_tdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2339 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2343 mpz_cdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2345 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2349 mpz_fdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2351 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2355 mpz_tdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2357 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2361 mpz_mod (mpz_t r
, const mpz_t n
, const mpz_t d
)
2363 mpz_div_qr (NULL
, r
, n
, d
, d
->_mp_size
>= 0 ? GMP_DIV_FLOOR
: GMP_DIV_CEIL
);
2367 mpz_div_q_2exp (mpz_t q
, const mpz_t u
, mp_bitcnt_t bit_index
,
2368 enum mpz_div_round_mode mode
)
2381 limb_cnt
= bit_index
/ GMP_LIMB_BITS
;
2382 qn
= GMP_ABS (un
) - limb_cnt
;
2383 bit_index
%= GMP_LIMB_BITS
;
2385 if (mode
== ((un
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* un != 0 here. */
2386 /* Note: Below, the final indexing at limb_cnt is valid because at
2387 that point we have qn > 0. */
2389 || !mpn_zero_p (u
->_mp_d
, limb_cnt
)
2390 || (u
->_mp_d
[limb_cnt
]
2391 & (((mp_limb_t
) 1 << bit_index
) - 1)));
2399 qp
= MPZ_REALLOC (q
, qn
);
2403 mpn_rshift (qp
, u
->_mp_d
+ limb_cnt
, qn
, bit_index
);
2404 qn
-= qp
[qn
- 1] == 0;
2408 mpn_copyi (qp
, u
->_mp_d
+ limb_cnt
, qn
);
2415 mpz_add_ui (q
, q
, 1);
2421 mpz_div_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bit_index
,
2422 enum mpz_div_round_mode mode
)
2424 mp_size_t us
, un
, rn
;
2429 if (us
== 0 || bit_index
== 0)
2434 rn
= (bit_index
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
2437 rp
= MPZ_REALLOC (r
, rn
);
2440 mask
= GMP_LIMB_MAX
>> (rn
* GMP_LIMB_BITS
- bit_index
);
2444 /* Quotient (with truncation) is zero, and remainder is
2446 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2448 /* Have to negate and sign extend. */
2451 gmp_assert_nocarry (! mpn_neg (rp
, u
->_mp_d
, un
));
2452 for (i
= un
; i
< rn
- 1; i
++)
2453 rp
[i
] = GMP_LIMB_MAX
;
2462 mpn_copyi (rp
, u
->_mp_d
, un
);
2470 mpn_copyi (rp
, u
->_mp_d
, rn
- 1);
2472 rp
[rn
-1] = u
->_mp_d
[rn
-1] & mask
;
2474 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2476 /* If r != 0, compute 2^{bit_count} - r. */
2477 mpn_neg (rp
, rp
, rn
);
2481 /* us is not used for anything else, so we can modify it
2482 here to indicate flipped sign. */
2486 rn
= mpn_normalized_size (rp
, rn
);
2487 r
->_mp_size
= us
< 0 ? -rn
: rn
;
2491 mpz_cdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2493 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2497 mpz_fdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2499 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2503 mpz_tdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2505 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2509 mpz_cdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2511 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2515 mpz_fdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2517 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2521 mpz_tdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2523 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2527 mpz_divexact (mpz_t q
, const mpz_t n
, const mpz_t d
)
2529 gmp_assert_nocarry (mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2533 mpz_divisible_p (const mpz_t n
, const mpz_t d
)
2535 return mpz_div_qr (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2539 mpz_congruent_p (const mpz_t a
, const mpz_t b
, const mpz_t m
)
2544 /* a == b (mod 0) iff a == b */
2545 if (mpz_sgn (m
) == 0)
2546 return (mpz_cmp (a
, b
) == 0);
2550 res
= mpz_divisible_p (t
, m
);
2556 static unsigned long
2557 mpz_div_qr_ui (mpz_t q
, mpz_t r
,
2558 const mpz_t n
, unsigned long d
, enum mpz_div_round_mode mode
)
2564 mpz_init_set_ui (dd
, d
);
2565 mpz_div_qr (q
, rr
, n
, dd
, mode
);
2567 ret
= mpz_get_ui (rr
);
2577 mpz_cdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2579 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_CEIL
);
2583 mpz_fdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2585 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2589 mpz_tdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2591 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2595 mpz_cdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2597 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2601 mpz_fdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2603 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2607 mpz_tdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2609 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2613 mpz_cdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2615 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2618 mpz_fdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2620 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2623 mpz_tdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2625 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2629 mpz_cdiv_ui (const mpz_t n
, unsigned long d
)
2631 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_CEIL
);
2635 mpz_fdiv_ui (const mpz_t n
, unsigned long d
)
2637 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2641 mpz_tdiv_ui (const mpz_t n
, unsigned long d
)
2643 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2647 mpz_mod_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2649 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2653 mpz_divexact_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2655 gmp_assert_nocarry (mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2659 mpz_divisible_ui_p (const mpz_t n
, unsigned long d
)
2661 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2667 mpn_gcd_11 (mp_limb_t u
, mp_limb_t v
)
2671 assert ( (u
| v
) > 0);
2678 gmp_ctz (shift
, u
| v
);
2684 MP_LIMB_T_SWAP (u
, v
);
2686 while ( (v
& 1) == 0)
2696 while ( (u
& 1) == 0);
2703 while ( (v
& 1) == 0);
2710 mpz_gcd_ui (mpz_t g
, const mpz_t u
, unsigned long v
)
2713 mpz_init_set_ui(t
, v
);
2727 mpz_make_odd (mpz_t r
)
2731 assert (r
->_mp_size
> 0);
2732 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2733 shift
= mpn_scan1 (r
->_mp_d
, 0);
2734 mpz_tdiv_q_2exp (r
, r
, shift
);
2740 mpz_gcd (mpz_t g
, const mpz_t u
, const mpz_t v
)
2743 mp_bitcnt_t uz
, vz
, gz
;
2745 if (u
->_mp_size
== 0)
2750 if (v
->_mp_size
== 0)
2760 uz
= mpz_make_odd (tu
);
2762 vz
= mpz_make_odd (tv
);
2763 gz
= GMP_MIN (uz
, vz
);
2765 if (tu
->_mp_size
< tv
->_mp_size
)
2768 mpz_tdiv_r (tu
, tu
, tv
);
2769 if (tu
->_mp_size
== 0)
2779 c
= mpz_cmp (tu
, tv
);
2788 if (tv
->_mp_size
== 1)
2792 mpz_tdiv_r (tu
, tu
, tv
);
2793 gp
= MPZ_REALLOC (g
, 1); /* gp = mpz_limbs_modify (g, 1); */
2794 *gp
= mpn_gcd_11 (tu
->_mp_d
[0], tv
->_mp_d
[0]);
2796 g
->_mp_size
= *gp
!= 0; /* mpz_limbs_finish (g, 1); */
2799 mpz_sub (tu
, tu
, tv
);
2803 mpz_mul_2exp (g
, g
, gz
);
2807 mpz_gcdext (mpz_t g
, mpz_t s
, mpz_t t
, const mpz_t u
, const mpz_t v
)
2809 mpz_t tu
, tv
, s0
, s1
, t0
, t1
;
2810 mp_bitcnt_t uz
, vz
, gz
;
2813 if (u
->_mp_size
== 0)
2815 /* g = 0 u + sgn(v) v */
2816 signed long sign
= mpz_sgn (v
);
2821 mpz_set_si (t
, sign
);
2825 if (v
->_mp_size
== 0)
2827 /* g = sgn(u) u + 0 v */
2828 signed long sign
= mpz_sgn (u
);
2831 mpz_set_si (s
, sign
);
2845 uz
= mpz_make_odd (tu
);
2847 vz
= mpz_make_odd (tv
);
2848 gz
= GMP_MIN (uz
, vz
);
2853 /* Cofactors corresponding to odd gcd. gz handled later. */
2854 if (tu
->_mp_size
< tv
->_mp_size
)
2857 MPZ_SRCPTR_SWAP (u
, v
);
2858 MPZ_PTR_SWAP (s
, t
);
2859 MP_BITCNT_T_SWAP (uz
, vz
);
2867 * where u and v denote the inputs with common factors of two
2868 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2870 * 2^p tu = s1 u - t1 v
2871 * 2^p tv = -s0 u + t0 v
2874 /* After initial division, tu = q tv + tu', we have
2876 * u = 2^uz (tu' + q tv)
2881 * t0 = 2^uz, t1 = 2^uz q
2885 mpz_tdiv_qr (t1
, tu
, tu
, tv
);
2886 mpz_mul_2exp (t1
, t1
, uz
);
2888 mpz_setbit (s1
, vz
);
2891 if (tu
->_mp_size
> 0)
2894 shift
= mpz_make_odd (tu
);
2895 mpz_setbit (t0
, uz
+ shift
);
2901 c
= mpz_cmp (tu
, tv
);
2909 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2910 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2912 mpz_sub (tv
, tv
, tu
);
2913 mpz_add (t0
, t0
, t1
);
2914 mpz_add (s0
, s0
, s1
);
2916 shift
= mpz_make_odd (tv
);
2917 mpz_mul_2exp (t1
, t1
, shift
);
2918 mpz_mul_2exp (s1
, s1
, shift
);
2922 mpz_sub (tu
, tu
, tv
);
2923 mpz_add (t1
, t0
, t1
);
2924 mpz_add (s1
, s0
, s1
);
2926 shift
= mpz_make_odd (tu
);
2927 mpz_mul_2exp (t0
, t0
, shift
);
2928 mpz_mul_2exp (s0
, s0
, shift
);
2934 mpz_setbit (t0
, uz
);
2936 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2939 mpz_mul_2exp (tv
, tv
, gz
);
2942 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2943 adjust cofactors, we need u / g and v / g */
2945 mpz_divexact (s1
, v
, tv
);
2947 mpz_divexact (t1
, u
, tv
);
2952 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2953 if (mpz_odd_p (s0
) || mpz_odd_p (t0
))
2955 mpz_sub (s0
, s0
, s1
);
2956 mpz_add (t0
, t0
, t1
);
2958 assert (mpz_even_p (t0
) && mpz_even_p (s0
));
2959 mpz_tdiv_q_2exp (s0
, s0
, 1);
2960 mpz_tdiv_q_2exp (t0
, t0
, 1);
2963 /* Arrange so that |s| < |u| / 2g */
2964 mpz_add (s1
, s0
, s1
);
2965 if (mpz_cmpabs (s0
, s1
) > 0)
2968 mpz_sub (t0
, t0
, t1
);
2970 if (u
->_mp_size
< 0)
2972 if (v
->_mp_size
< 0)
2990 mpz_lcm (mpz_t r
, const mpz_t u
, const mpz_t v
)
2994 if (u
->_mp_size
== 0 || v
->_mp_size
== 0)
3003 mpz_divexact (g
, u
, g
);
3011 mpz_lcm_ui (mpz_t r
, const mpz_t u
, unsigned long v
)
3013 if (v
== 0 || u
->_mp_size
== 0)
3019 v
/= mpz_gcd_ui (NULL
, u
, v
);
3020 mpz_mul_ui (r
, u
, v
);
3026 mpz_invert (mpz_t r
, const mpz_t u
, const mpz_t m
)
3031 if (u
->_mp_size
== 0 || mpz_cmpabs_ui (m
, 1) <= 0)
3037 mpz_gcdext (g
, tr
, NULL
, u
, m
);
3038 invertible
= (mpz_cmp_ui (g
, 1) == 0);
3042 if (tr
->_mp_size
< 0)
3044 if (m
->_mp_size
>= 0)
3045 mpz_add (tr
, tr
, m
);
3047 mpz_sub (tr
, tr
, m
);
3058 /* Higher level operations (sqrt, pow and root) */
3061 mpz_pow_ui (mpz_t r
, const mpz_t b
, unsigned long e
)
3065 mpz_init_set_ui (tr
, 1);
3067 bit
= GMP_ULONG_HIGHBIT
;
3070 mpz_mul (tr
, tr
, tr
);
3072 mpz_mul (tr
, tr
, b
);
3082 mpz_ui_pow_ui (mpz_t r
, unsigned long blimb
, unsigned long e
)
3086 mpz_init_set_ui (b
, blimb
);
3087 mpz_pow_ui (r
, b
, e
);
3092 mpz_powm (mpz_t r
, const mpz_t b
, const mpz_t e
, const mpz_t m
)
3098 struct gmp_div_inverse minv
;
3102 en
= GMP_ABS (e
->_mp_size
);
3103 mn
= GMP_ABS (m
->_mp_size
);
3105 gmp_die ("mpz_powm: Zero modulo.");
3109 mpz_set_ui (r
, mpz_cmpabs_ui (m
, 1));
3114 mpn_div_qr_invert (&minv
, mp
, mn
);
3119 /* To avoid shifts, we do all our reductions, except the final
3120 one, using a *normalized* m. */
3123 tp
= gmp_alloc_limbs (mn
);
3124 gmp_assert_nocarry (mpn_lshift (tp
, mp
, mn
, shift
));
3130 if (e
->_mp_size
< 0)
3132 if (!mpz_invert (base
, b
, m
))
3133 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3140 bn
= base
->_mp_size
;
3143 mpn_div_qr_preinv (NULL
, base
->_mp_d
, base
->_mp_size
, mp
, mn
, &minv
);
3147 /* We have reduced the absolute value. Now take care of the
3148 sign. Note that we get zero represented non-canonically as
3150 if (b
->_mp_size
< 0)
3152 mp_ptr bp
= MPZ_REALLOC (base
, mn
);
3153 gmp_assert_nocarry (mpn_sub (bp
, mp
, mn
, bp
, bn
));
3156 base
->_mp_size
= mpn_normalized_size (base
->_mp_d
, bn
);
3158 mpz_init_set_ui (tr
, 1);
3162 mp_limb_t w
= e
->_mp_d
[en
];
3165 bit
= GMP_LIMB_HIGHBIT
;
3168 mpz_mul (tr
, tr
, tr
);
3170 mpz_mul (tr
, tr
, base
);
3171 if (tr
->_mp_size
> mn
)
3173 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3174 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3181 /* Final reduction */
3182 if (tr
->_mp_size
>= mn
)
3185 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3186 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3189 gmp_free_limbs (tp
, mn
);
3197 mpz_powm_ui (mpz_t r
, const mpz_t b
, unsigned long elimb
, const mpz_t m
)
3201 mpz_init_set_ui (e
, elimb
);
3202 mpz_powm (r
, b
, e
, m
);
3206 /* x=trunc(y^(1/z)), r=y-x^z */
3208 mpz_rootrem (mpz_t x
, mpz_t r
, const mpz_t y
, unsigned long z
)
3214 sgn
= y
->_mp_size
< 0;
3215 if ((~z
& sgn
) != 0)
3216 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3218 gmp_die ("mpz_rootrem: Zeroth root.");
3220 if (mpz_cmpabs_ui (y
, 1) <= 0) {
3230 bc
= (mpz_sizeinbase (y
, 2) - 1) / z
+ 1;
3233 if (z
== 2) /* simplify sqrt loop: z-1 == 1 */
3235 mpz_swap (u
, t
); /* u = x */
3236 mpz_tdiv_q (t
, y
, u
); /* t = y/x */
3237 mpz_add (t
, t
, u
); /* t = y/x + x */
3238 mpz_tdiv_q_2exp (t
, t
, 1); /* x'= (y/x + x)/2 */
3239 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3248 mpz_swap (u
, t
); /* u = x */
3249 mpz_pow_ui (t
, u
, z
- 1); /* t = x^(z-1) */
3250 mpz_tdiv_q (t
, y
, t
); /* t = y/x^(z-1) */
3251 mpz_mul_ui (v
, u
, z
- 1); /* v = x*(z-1) */
3252 mpz_add (t
, t
, v
); /* t = y/x^(z-1) + x*(z-1) */
3253 mpz_tdiv_q_ui (t
, t
, z
); /* x'=(y/x^(z-1) + x*(z-1))/z */
3254 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3260 mpz_pow_ui (t
, u
, z
);
3270 mpz_root (mpz_t x
, const mpz_t y
, unsigned long z
)
3276 mpz_rootrem (x
, r
, y
, z
);
3277 res
= r
->_mp_size
== 0;
3283 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3285 mpz_sqrtrem (mpz_t s
, mpz_t r
, const mpz_t u
)
3287 mpz_rootrem (s
, r
, u
, 2);
3291 mpz_sqrt (mpz_t s
, const mpz_t u
)
3293 mpz_rootrem (s
, NULL
, u
, 2);
3297 mpz_perfect_square_p (const mpz_t u
)
3299 if (u
->_mp_size
<= 0)
3300 return (u
->_mp_size
== 0);
3302 return mpz_root (NULL
, u
, 2);
3306 mpn_perfect_square_p (mp_srcptr p
, mp_size_t n
)
3311 assert (p
[n
-1] != 0);
3312 return mpz_root (NULL
, mpz_roinit_normal_n (t
, p
, n
), 2);
3316 mpn_sqrtrem (mp_ptr sp
, mp_ptr rp
, mp_srcptr p
, mp_size_t n
)
3322 assert (p
[n
-1] != 0);
3326 mpz_rootrem (s
, r
, mpz_roinit_normal_n (u
, p
, n
), 2);
3328 assert (s
->_mp_size
== (n
+1)/2);
3329 mpn_copyd (sp
, s
->_mp_d
, s
->_mp_size
);
3333 mpn_copyd (rp
, r
->_mp_d
, res
);
3341 mpz_mfac_uiui (mpz_t x
, unsigned long n
, unsigned long m
)
3343 mpz_set_ui (x
, n
+ (n
== 0));
3344 if (m
+ 1 < 2) return;
3346 mpz_mul_ui (x
, x
, n
-= m
);
3350 mpz_2fac_ui (mpz_t x
, unsigned long n
)
3352 mpz_mfac_uiui (x
, n
, 2);
3356 mpz_fac_ui (mpz_t x
, unsigned long n
)
3358 mpz_mfac_uiui (x
, n
, 1);
3362 mpz_bin_uiui (mpz_t r
, unsigned long n
, unsigned long k
)
3366 mpz_set_ui (r
, k
<= n
);
3369 k
= (k
<= n
) ? n
- k
: 0;
3375 mpz_mul_ui (r
, r
, n
--);
3377 mpz_divexact (r
, r
, t
);
3382 /* Primality testing */
3384 /* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
3385 /* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
3387 gmp_jacobi_coprime (mp_limb_t a
, mp_limb_t b
)
3393 /* assert (mpn_gcd_11 (a, b) == 1); */
3395 /* Below, we represent a and b shifted right so that the least
3396 significant one bit is implicit. */
3405 /* (2/b) = -1 if b = 3 or 5 mod 8 */
3406 bit
^= c
& (b
^ (b
>> 1));
3410 return bit
& 1 ? -1 : 1;
3427 gmp_lucas_step_k_2k (mpz_t V
, mpz_t Qk
, const mpz_t n
)
3429 mpz_mod (Qk
, Qk
, n
);
3430 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3432 mpz_submul_ui (V
, Qk
, 2);
3433 mpz_tdiv_r (V
, V
, n
);
3434 /* Q^{2k} = (Q^k)^2 */
3435 mpz_mul (Qk
, Qk
, Qk
);
3438 /* Computes V_k, Q^k (mod n) for the Lucas' sequence */
3439 /* with P=1, Q=Q; k = (n>>b0)|1. */
3440 /* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
3441 /* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
3443 gmp_lucas_mod (mpz_t V
, mpz_t Qk
, long Q
,
3444 mp_bitcnt_t b0
, const mpz_t n
)
3451 assert (Q
<= - (LONG_MIN
/ 2));
3452 assert (Q
>= - (LONG_MAX
/ 2));
3453 assert (mpz_cmp_ui (n
, 4) > 0);
3454 assert (mpz_odd_p (n
));
3456 mpz_init_set_ui (U
, 1); /* U1 = 1 */
3457 mpz_set_ui (V
, 1); /* V1 = 1 */
3460 for (bs
= mpz_sizeinbase (n
, 2) - 1; --bs
>= b0
;)
3462 /* U_{2k} <- U_k * V_k */
3464 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3465 /* Q^{2k} = (Q^k)^2 */
3466 gmp_lucas_step_k_2k (V
, Qk
, n
);
3468 /* A step k->k+1 is performed if the bit in $n$ is 1 */
3469 /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */
3470 /* should be 1 in $n+1$ (bs == b0) */
3471 if (b0
== bs
|| mpz_tstbit (n
, bs
))
3473 /* Q^{k+1} <- Q^k * Q */
3474 mpz_mul_si (Qk
, Qk
, Q
);
3475 /* U_{k+1} <- (U_k + V_k) / 2 */
3476 mpz_swap (U
, V
); /* Keep in V the old value of U_k */
3478 /* We have to compute U/2, so we need an even value, */
3479 /* equivalent (mod n) */
3482 mpz_tdiv_q_2exp (U
, U
, 1);
3483 /* V_{k+1} <-(D*U_k + V_k) / 2 =
3484 U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
3485 mpz_mul_si (V
, V
, -2*Q
);
3487 mpz_tdiv_r (V
, V
, n
);
3489 mpz_tdiv_r (U
, U
, n
);
3492 res
= U
->_mp_size
== 0;
3497 /* Performs strong Lucas' test on x, with parameters suggested */
3498 /* for the BPSW test. Qk is only passed to recycle a variable. */
3499 /* Requires GCD (x,6) = 1.*/
3501 gmp_stronglucas (const mpz_t x
, mpz_t Qk
)
3505 mp_limb_t maxD
, D
; /* The absolute value is stored. */
3509 /* Test on the absolute value. */
3510 mpz_roinit_normal_n (n
, x
->_mp_d
, GMP_ABS (x
->_mp_size
));
3512 assert (mpz_odd_p (n
));
3513 /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
3514 if (mpz_root (Qk
, n
, 2))
3515 return 0; /* A square is composite. */
3517 /* Check Ds up to square root (in case, n is prime)
3518 or avoid overflows */
3519 maxD
= (Qk
->_mp_size
== 1) ? Qk
->_mp_d
[0] - 1 : GMP_LIMB_MAX
;
3522 /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
3523 /* For those Ds we have (D/n) = (n/|D|) */
3527 return 1 + (D
!= GMP_LIMB_MAX
); /* (1 + ! ~ D) */
3529 tl
= mpz_tdiv_ui (n
, D
);
3533 while (gmp_jacobi_coprime (tl
, D
) == 1);
3537 /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
3538 b0
= mpn_common_scan (~ n
->_mp_d
[0], 0, n
->_mp_d
, n
->_mp_size
, GMP_LIMB_MAX
);
3539 /* b0 = mpz_scan0 (n, 0); */
3541 /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
3542 Q
= (D
& 2) ? (long) (D
>> 2) + 1 : -(long) (D
>> 2);
3544 if (! gmp_lucas_mod (V
, Qk
, Q
, b0
, n
)) /* If Ud != 0 */
3545 while (V
->_mp_size
!= 0 && --b0
!= 0) /* while Vk != 0 */
3546 /* V <- V ^ 2 - 2Q^k */
3547 /* Q^{2k} = (Q^k)^2 */
3548 gmp_lucas_step_k_2k (V
, Qk
, n
);
3555 gmp_millerrabin (const mpz_t n
, const mpz_t nm1
, mpz_t y
,
3556 const mpz_t q
, mp_bitcnt_t k
)
3560 /* Caller must initialize y to the base. */
3561 mpz_powm (y
, y
, q
, n
);
3563 if (mpz_cmp_ui (y
, 1) == 0 || mpz_cmp (y
, nm1
) == 0)
3568 mpz_powm_ui (y
, y
, 2, n
);
3569 if (mpz_cmp (y
, nm1
) == 0)
3575 /* This product is 0xc0cfd797, and fits in 32 bits. */
3576 #define GMP_PRIME_PRODUCT \
3577 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3579 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3580 #define GMP_PRIME_MASK 0xc96996dcUL
3583 mpz_probab_prime_p (const mpz_t n
, int reps
)
3592 /* Note that we use the absolute value of n only, for compatibility
3593 with the real GMP. */
3595 return (mpz_cmpabs_ui (n
, 2) == 0) ? 2 : 0;
3597 /* Above test excludes n == 0 */
3598 assert (n
->_mp_size
!= 0);
3600 if (mpz_cmpabs_ui (n
, 64) < 0)
3601 return (GMP_PRIME_MASK
>> (n
->_mp_d
[0] >> 1)) & 2;
3603 if (mpz_gcd_ui (NULL
, n
, GMP_PRIME_PRODUCT
) != 1)
3606 /* All prime factors are >= 31. */
3607 if (mpz_cmpabs_ui (n
, 31*31) < 0)
3613 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3616 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
3617 k
= mpn_scan1 (nm1
->_mp_d
, 0);
3618 mpz_tdiv_q_2exp (q
, nm1
, k
);
3621 mpz_init_set_ui (y
, 2);
3622 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
) && gmp_stronglucas (n
, y
);
3623 reps
-= 24; /* skip the first 24 repetitions */
3625 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3626 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3627 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3628 30 (a[30] == 971 > 31*31 == 961). */
3630 for (j
= 0; is_prime
& (j
< reps
); j
++)
3632 mpz_set_ui (y
, (unsigned long) j
*j
+j
+41);
3633 if (mpz_cmp (y
, nm1
) >= 0)
3635 /* Don't try any further bases. This "early" break does not affect
3636 the result for any reasonable reps value (<=5000 was tested) */
3640 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
);
3650 /* Logical operations and bit manipulation. */
3652 /* Numbers are treated as if represented in two's complement (and
3653 infinitely sign extended). For a negative values we get the two's
3654 complement from -x = ~x + 1, where ~ is bitwise complement.
3663 where yyyy is the bitwise complement of xxxx. So least significant
3664 bits, up to and including the first one bit, are unchanged, and
3665 the more significant bits are all complemented.
3667 To change a bit from zero to one in a negative number, subtract the
3668 corresponding power of two from the absolute value. This can never
3669 underflow. To change a bit from one to zero, add the corresponding
3670 power of two, and this might overflow. E.g., if x = -001111, the
3671 two's complement is 110001. Clearing the least significant bit, we
3672 get two's complement 110000, and -010000. */
3675 mpz_tstbit (const mpz_t d
, mp_bitcnt_t bit_index
)
3677 mp_size_t limb_index
;
3686 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3687 if (limb_index
>= dn
)
3690 shift
= bit_index
% GMP_LIMB_BITS
;
3691 w
= d
->_mp_d
[limb_index
];
3692 bit
= (w
>> shift
) & 1;
3696 /* d < 0. Check if any of the bits below is set: If so, our bit
3697 must be complemented. */
3698 if (shift
> 0 && (mp_limb_t
) (w
<< (GMP_LIMB_BITS
- shift
)) > 0)
3700 while (--limb_index
>= 0)
3701 if (d
->_mp_d
[limb_index
] > 0)
3708 mpz_abs_add_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3710 mp_size_t dn
, limb_index
;
3714 dn
= GMP_ABS (d
->_mp_size
);
3716 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3717 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3719 if (limb_index
>= dn
)
3722 /* The bit should be set outside of the end of the number.
3723 We have to increase the size of the number. */
3724 dp
= MPZ_REALLOC (d
, limb_index
+ 1);
3726 dp
[limb_index
] = bit
;
3727 for (i
= dn
; i
< limb_index
; i
++)
3729 dn
= limb_index
+ 1;
3737 cy
= mpn_add_1 (dp
+ limb_index
, dp
+ limb_index
, dn
- limb_index
, bit
);
3740 dp
= MPZ_REALLOC (d
, dn
+ 1);
3745 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3749 mpz_abs_sub_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3751 mp_size_t dn
, limb_index
;
3755 dn
= GMP_ABS (d
->_mp_size
);
3758 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3759 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3761 assert (limb_index
< dn
);
3763 gmp_assert_nocarry (mpn_sub_1 (dp
+ limb_index
, dp
+ limb_index
,
3764 dn
- limb_index
, bit
));
3765 dn
= mpn_normalized_size (dp
, dn
);
3766 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3770 mpz_setbit (mpz_t d
, mp_bitcnt_t bit_index
)
3772 if (!mpz_tstbit (d
, bit_index
))
3774 if (d
->_mp_size
>= 0)
3775 mpz_abs_add_bit (d
, bit_index
);
3777 mpz_abs_sub_bit (d
, bit_index
);
3782 mpz_clrbit (mpz_t d
, mp_bitcnt_t bit_index
)
3784 if (mpz_tstbit (d
, bit_index
))
3786 if (d
->_mp_size
>= 0)
3787 mpz_abs_sub_bit (d
, bit_index
);
3789 mpz_abs_add_bit (d
, bit_index
);
3794 mpz_combit (mpz_t d
, mp_bitcnt_t bit_index
)
3796 if (mpz_tstbit (d
, bit_index
) ^ (d
->_mp_size
< 0))
3797 mpz_abs_sub_bit (d
, bit_index
);
3799 mpz_abs_add_bit (d
, bit_index
);
3803 mpz_com (mpz_t r
, const mpz_t u
)
3805 mpz_add_ui (r
, u
, 1);
3810 mpz_and (mpz_t r
, const mpz_t u
, const mpz_t v
)
3812 mp_size_t un
, vn
, rn
, i
;
3815 mp_limb_t ux
, vx
, rx
;
3816 mp_limb_t uc
, vc
, rc
;
3817 mp_limb_t ul
, vl
, rl
;
3819 un
= GMP_ABS (u
->_mp_size
);
3820 vn
= GMP_ABS (v
->_mp_size
);
3823 MPZ_SRCPTR_SWAP (u
, v
);
3824 MP_SIZE_T_SWAP (un
, vn
);
3832 uc
= u
->_mp_size
< 0;
3833 vc
= v
->_mp_size
< 0;
3840 /* If the smaller input is positive, higher limbs don't matter. */
3843 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3851 ul
= (up
[i
] ^ ux
) + uc
;
3854 vl
= (vp
[i
] ^ vx
) + vc
;
3857 rl
= ( (ul
& vl
) ^ rx
) + rc
;
3866 ul
= (up
[i
] ^ ux
) + uc
;
3869 rl
= ( (ul
& vx
) ^ rx
) + rc
;
3876 rn
= mpn_normalized_size (rp
, rn
);
3878 r
->_mp_size
= rx
? -rn
: rn
;
3882 mpz_ior (mpz_t r
, const mpz_t u
, const mpz_t v
)
3884 mp_size_t un
, vn
, rn
, i
;
3887 mp_limb_t ux
, vx
, rx
;
3888 mp_limb_t uc
, vc
, rc
;
3889 mp_limb_t ul
, vl
, rl
;
3891 un
= GMP_ABS (u
->_mp_size
);
3892 vn
= GMP_ABS (v
->_mp_size
);
3895 MPZ_SRCPTR_SWAP (u
, v
);
3896 MP_SIZE_T_SWAP (un
, vn
);
3904 uc
= u
->_mp_size
< 0;
3905 vc
= v
->_mp_size
< 0;
3912 /* If the smaller input is negative, by sign extension higher limbs
3916 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3924 ul
= (up
[i
] ^ ux
) + uc
;
3927 vl
= (vp
[i
] ^ vx
) + vc
;
3930 rl
= ( (ul
| vl
) ^ rx
) + rc
;
3939 ul
= (up
[i
] ^ ux
) + uc
;
3942 rl
= ( (ul
| vx
) ^ rx
) + rc
;
3949 rn
= mpn_normalized_size (rp
, rn
);
3951 r
->_mp_size
= rx
? -rn
: rn
;
3955 mpz_xor (mpz_t r
, const mpz_t u
, const mpz_t v
)
3957 mp_size_t un
, vn
, i
;
3960 mp_limb_t ux
, vx
, rx
;
3961 mp_limb_t uc
, vc
, rc
;
3962 mp_limb_t ul
, vl
, rl
;
3964 un
= GMP_ABS (u
->_mp_size
);
3965 vn
= GMP_ABS (v
->_mp_size
);
3968 MPZ_SRCPTR_SWAP (u
, v
);
3969 MP_SIZE_T_SWAP (un
, vn
);
3977 uc
= u
->_mp_size
< 0;
3978 vc
= v
->_mp_size
< 0;
3985 rp
= MPZ_REALLOC (r
, un
+ (mp_size_t
) rc
);
3993 ul
= (up
[i
] ^ ux
) + uc
;
3996 vl
= (vp
[i
] ^ vx
) + vc
;
3999 rl
= (ul
^ vl
^ rx
) + rc
;
4008 ul
= (up
[i
] ^ ux
) + uc
;
4011 rl
= (ul
^ ux
) + rc
;
4018 un
= mpn_normalized_size (rp
, un
);
4020 r
->_mp_size
= rx
? -un
: un
;
4024 gmp_popcount_limb (mp_limb_t x
)
4028 /* Do 16 bits at a time, to avoid limb-sized constants. */
4029 int LOCAL_SHIFT_BITS
= 16;
4032 unsigned w
= x
- ((x
>> 1) & 0x5555);
4033 w
= ((w
>> 2) & 0x3333) + (w
& 0x3333);
4035 w
= ((w
>> 8) & 0x000f) + (w
& 0x000f);
4037 if (GMP_LIMB_BITS
> LOCAL_SHIFT_BITS
)
4038 x
>>= LOCAL_SHIFT_BITS
;
4046 mpn_popcount (mp_srcptr p
, mp_size_t n
)
4051 for (c
= 0, i
= 0; i
< n
; i
++)
4052 c
+= gmp_popcount_limb (p
[i
]);
4058 mpz_popcount (const mpz_t u
)
4065 return ~(mp_bitcnt_t
) 0;
4067 return mpn_popcount (u
->_mp_d
, un
);
4071 mpz_hamdist (const mpz_t u
, const mpz_t v
)
4073 mp_size_t un
, vn
, i
;
4074 mp_limb_t uc
, vc
, ul
, vl
, comp
;
4082 return ~(mp_bitcnt_t
) 0;
4084 comp
= - (uc
= vc
= (un
< 0));
4096 MPN_SRCPTR_SWAP (up
, un
, vp
, vn
);
4098 for (i
= 0, c
= 0; i
< vn
; i
++)
4100 ul
= (up
[i
] ^ comp
) + uc
;
4103 vl
= (vp
[i
] ^ comp
) + vc
;
4106 c
+= gmp_popcount_limb (ul
^ vl
);
4112 ul
= (up
[i
] ^ comp
) + uc
;
4115 c
+= gmp_popcount_limb (ul
^ comp
);
4122 mpz_scan1 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4125 mp_size_t us
, un
, i
;
4130 i
= starting_bit
/ GMP_LIMB_BITS
;
4132 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
4133 for u<0. Notice this test picks up any u==0 too. */
4135 return (us
>= 0 ? ~(mp_bitcnt_t
) 0 : starting_bit
);
4141 if (starting_bit
!= 0)
4145 ux
= mpn_zero_p (up
, i
);
4147 ux
= - (mp_limb_t
) (limb
>= ux
);
4150 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
4151 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4154 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4158 mpz_scan0 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4161 mp_size_t us
, un
, i
;
4165 ux
= - (mp_limb_t
) (us
>= 0);
4167 i
= starting_bit
/ GMP_LIMB_BITS
;
4169 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
4170 u<0. Notice this test picks up all cases of u==0 too. */
4172 return (ux
? starting_bit
: ~(mp_bitcnt_t
) 0);
4178 limb
-= mpn_zero_p (up
, i
); /* limb = ~(~limb + zero_p) */
4180 /* Mask all bits before starting_bit, thus ignoring them. */
4181 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4183 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4187 /* MPZ base conversion. */
4190 mpz_sizeinbase (const mpz_t u
, int base
)
4196 struct gmp_div_inverse bi
;
4200 assert (base
<= 62);
4202 un
= GMP_ABS (u
->_mp_size
);
4208 bits
= (un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1]);
4214 return (bits
+ 1) / 2;
4216 return (bits
+ 2) / 3;
4218 return (bits
+ 3) / 4;
4220 return (bits
+ 4) / 5;
4221 /* FIXME: Do something more clever for the common case of base
4225 tp
= gmp_alloc_limbs (un
);
4226 mpn_copyi (tp
, up
, un
);
4227 mpn_div_qr_1_invert (&bi
, base
);
4234 mpn_div_qr_1_preinv (tp
, tp
, tn
, &bi
);
4235 tn
-= (tp
[tn
-1] == 0);
4239 gmp_free_limbs (tp
, un
);
4244 mpz_get_str (char *sp
, int base
, const mpz_t u
)
4251 digits
= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
4255 digits
= "0123456789abcdefghijklmnopqrstuvwxyz";
4259 else if (base
>= -1)
4268 sn
= 1 + mpz_sizeinbase (u
, base
);
4272 sp
= (char *) gmp_alloc (osn
);
4276 un
= GMP_ABS (u
->_mp_size
);
4287 if (u
->_mp_size
< 0)
4290 bits
= mpn_base_power_of_two_p (base
);
4293 /* Not modified in this case. */
4294 sn
= i
+ mpn_get_str_bits ((unsigned char *) sp
+ i
, bits
, u
->_mp_d
, un
);
4297 struct mpn_base_info info
;
4300 mpn_get_base_info (&info
, base
);
4301 tp
= gmp_alloc_limbs (un
);
4302 mpn_copyi (tp
, u
->_mp_d
, un
);
4304 sn
= i
+ mpn_get_str_other ((unsigned char *) sp
+ i
, base
, &info
, tp
, un
);
4305 gmp_free_limbs (tp
, un
);
4309 sp
[i
] = digits
[(unsigned char) sp
[i
]];
4313 if (osn
&& osn
!= sn
+ 1)
4314 sp
= (char*) gmp_realloc (sp
, osn
, sn
+ 1);
4319 mpz_set_str (mpz_t r
, const char *sp
, int base
)
4321 unsigned bits
, value_of_a
;
4322 mp_size_t rn
, alloc
;
4328 assert (base
== 0 || (base
>= 2 && base
<= 62));
4330 while (isspace( (unsigned char) *sp
))
4333 sign
= (*sp
== '-');
4340 if (sp
[1] == 'x' || sp
[1] == 'X')
4345 else if (sp
[1] == 'b' || sp
[1] == 'B')
4363 dp
= (unsigned char *) gmp_alloc (sn
);
4365 value_of_a
= (base
> 36) ? 36 : 10;
4366 for (dn
= 0; *sp
; sp
++)
4370 if (isspace ((unsigned char) *sp
))
4372 else if (*sp
>= '0' && *sp
<= '9')
4374 else if (*sp
>= 'a' && *sp
<= 'z')
4375 digit
= *sp
- 'a' + value_of_a
;
4376 else if (*sp
>= 'A' && *sp
<= 'Z')
4377 digit
= *sp
- 'A' + 10;
4379 digit
= base
; /* fail */
4381 if (digit
>= (unsigned) base
)
4397 bits
= mpn_base_power_of_two_p (base
);
4401 alloc
= (dn
* bits
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
4402 rp
= MPZ_REALLOC (r
, alloc
);
4403 rn
= mpn_set_str_bits (rp
, dp
, dn
, bits
);
4407 struct mpn_base_info info
;
4408 mpn_get_base_info (&info
, base
);
4409 alloc
= (dn
+ info
.exp
- 1) / info
.exp
;
4410 rp
= MPZ_REALLOC (r
, alloc
);
4411 rn
= mpn_set_str_other (rp
, dp
, dn
, base
, &info
);
4412 /* Normalization, needed for all-zero input. */
4414 rn
-= rp
[rn
-1] == 0;
4416 assert (rn
<= alloc
);
4419 r
->_mp_size
= sign
? - rn
: rn
;
4425 mpz_init_set_str (mpz_t r
, const char *sp
, int base
)
4428 return mpz_set_str (r
, sp
, base
);
4432 mpz_out_str (FILE *stream
, int base
, const mpz_t x
)
4437 str
= mpz_get_str (NULL
, base
, x
);
4441 n
= fwrite (str
, 1, len
, stream
);
4442 gmp_free (str
, len
+ 1);
4448 gmp_detect_endian (void)
4450 static const int i
= 2;
4451 const unsigned char *p
= (const unsigned char *) &i
;
4455 /* Import and export. Does not support nails. */
4457 mpz_import (mpz_t r
, size_t count
, int order
, size_t size
, int endian
,
4458 size_t nails
, const void *src
)
4460 const unsigned char *p
;
4461 ptrdiff_t word_step
;
4465 /* The current (partial) limb. */
4467 /* The number of bytes already copied to this limb (starting from
4470 /* The index where the limb should be stored, when completed. */
4474 gmp_die ("mpz_import: Nails not supported.");
4476 assert (order
== 1 || order
== -1);
4477 assert (endian
>= -1 && endian
<= 1);
4480 endian
= gmp_detect_endian ();
4482 p
= (unsigned char *) src
;
4484 word_step
= (order
!= endian
) ? 2 * size
: 0;
4486 /* Process bytes from the least significant end, so point p at the
4487 least significant word. */
4490 p
+= size
* (count
- 1);
4491 word_step
= - word_step
;
4494 /* And at least significant byte of that word. */
4498 rn
= (size
* count
+ sizeof(mp_limb_t
) - 1) / sizeof(mp_limb_t
);
4499 rp
= MPZ_REALLOC (r
, rn
);
4501 for (limb
= 0, bytes
= 0, i
= 0; count
> 0; count
--, p
+= word_step
)
4504 for (j
= 0; j
< size
; j
++, p
-= (ptrdiff_t) endian
)
4506 limb
|= (mp_limb_t
) *p
<< (bytes
++ * CHAR_BIT
);
4507 if (bytes
== sizeof(mp_limb_t
))
4515 assert (i
+ (bytes
> 0) == rn
);
4519 i
= mpn_normalized_size (rp
, i
);
4525 mpz_export (void *r
, size_t *countp
, int order
, size_t size
, int endian
,
4526 size_t nails
, const mpz_t u
)
4532 gmp_die ("mpz_export: Nails not supported.");
4534 assert (order
== 1 || order
== -1);
4535 assert (endian
>= -1 && endian
<= 1);
4536 assert (size
> 0 || u
->_mp_size
== 0);
4544 ptrdiff_t word_step
;
4545 /* The current (partial) limb. */
4547 /* The number of bytes left to do in this limb. */
4549 /* The index where the limb was read. */
4554 /* Count bytes in top limb. */
4555 limb
= u
->_mp_d
[un
-1];
4558 k
= (GMP_LIMB_BITS
<= CHAR_BIT
);
4562 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4563 k
++; limb
>>= LOCAL_CHAR_BIT
;
4564 } while (limb
!= 0);
4566 /* else limb = 0; */
4568 count
= (k
+ (un
-1) * sizeof (mp_limb_t
) + size
- 1) / size
;
4571 r
= gmp_alloc (count
* size
);
4574 endian
= gmp_detect_endian ();
4576 p
= (unsigned char *) r
;
4578 word_step
= (order
!= endian
) ? 2 * size
: 0;
4580 /* Process bytes from the least significant end, so point p at the
4581 least significant word. */
4584 p
+= size
* (count
- 1);
4585 word_step
= - word_step
;
4588 /* And at least significant byte of that word. */
4592 for (bytes
= 0, i
= 0, k
= 0; k
< count
; k
++, p
+= word_step
)
4595 for (j
= 0; j
< size
; ++j
, p
-= (ptrdiff_t) endian
)
4597 if (sizeof (mp_limb_t
) == 1)
4606 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4610 limb
= u
->_mp_d
[i
++];
4611 bytes
= sizeof (mp_limb_t
);
4614 limb
>>= LOCAL_CHAR_BIT
;
4620 assert (k
== count
);