1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
3 Contributed to the GNU project by Niels Möller
4 Additional functionalities and improvements by Marco Bodrato.
6 Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc.
8 This file is part of the GNU MP Library.
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of either:
13 * the GNU Lesser General Public License as published by the Free
14 Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
19 * the GNU General Public License as published by the Free Software
20 Foundation; either version 2 of the License, or (at your option) any
23 or both in parallel, as here.
25 The GNU MP Library is distributed in the hope that it will be useful, but
26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
27 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
30 You should have received copies of the GNU General Public License and the
31 GNU Lesser General Public License along with the GNU MP Library. If not,
32 see https://www.gnu.org/licenses/. */
34 /* NOTE: All functions in this file which are not declared in
35 mini-gmp.h are internal, and are not intended to be compatible
36 with GMP or with future versions of mini-gmp. */
38 /* Much of the material copied from GMP files, including: gmp-impl.h,
39 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
40 mpn/generic/lshift.c, mpn/generic/mul_1.c,
41 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
42 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
43 mpn/generic/submul_1.c. */
54 #if !defined(MINI_GMP_DONT_USE_FLOAT_H)
60 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
62 #define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
63 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
65 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
66 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
68 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
69 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
71 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
72 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
74 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
75 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
77 #define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
79 #if defined(DBL_MANT_DIG) && FLT_RADIX == 2
80 #define GMP_DBL_MANT_BITS DBL_MANT_DIG
82 #define GMP_DBL_MANT_BITS (53)
85 /* Return non-zero if xp,xsize and yp,ysize overlap.
86 If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
87 overlap. If both these are false, there's an overlap. */
88 #define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \
89 ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
91 #define gmp_assert_nocarry(x) do { \
92 mp_limb_t __cy = (x); \
97 #define gmp_clz(count, x) do { \
98 mp_limb_t __clz_x = (x); \
99 unsigned __clz_c = 0; \
100 int LOCAL_SHIFT_BITS = 8; \
101 if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \
103 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
105 { __clz_x <<= LOCAL_SHIFT_BITS; } \
106 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
111 #define gmp_ctz(count, x) do { \
112 mp_limb_t __ctz_x = (x); \
113 unsigned __ctz_c = 0; \
114 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
115 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
118 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
122 (sh) = (ah) + (bh) + (__x < (al)); \
126 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
130 (sh) = (ah) - (bh) - ((al) < (bl)); \
134 #define gmp_umul_ppmm(w1, w0, u, v) \
136 int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \
137 if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \
139 unsigned int __ww = (unsigned int) (u) * (v); \
140 w0 = (mp_limb_t) __ww; \
141 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
143 else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \
145 unsigned long int __ww = (unsigned long int) (u) * (v); \
146 w0 = (mp_limb_t) __ww; \
147 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
150 mp_limb_t __x0, __x1, __x2, __x3; \
151 unsigned __ul, __vl, __uh, __vh; \
152 mp_limb_t __u = (u), __v = (v); \
153 assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \
155 __ul = __u & GMP_LLIMB_MASK; \
156 __uh = __u >> (GMP_LIMB_BITS / 2); \
157 __vl = __v & GMP_LLIMB_MASK; \
158 __vh = __v >> (GMP_LIMB_BITS / 2); \
160 __x0 = (mp_limb_t) __ul * __vl; \
161 __x1 = (mp_limb_t) __ul * __vh; \
162 __x2 = (mp_limb_t) __uh * __vl; \
163 __x3 = (mp_limb_t) __uh * __vh; \
165 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
166 __x1 += __x2; /* but this indeed can */ \
167 if (__x1 < __x2) /* did we get it? */ \
168 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
170 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
171 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
175 /* If mp_limb_t is of size smaller than int, plain u*v implies
176 automatic promotion to *signed* int, and then multiply may overflow
177 and cause undefined behavior. Explicitly cast to unsigned int for
179 #define gmp_umullo_limb(u, v) \
180 ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v))
182 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
184 mp_limb_t _qh, _ql, _r, _mask; \
185 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
186 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
187 _r = (nl) - gmp_umullo_limb (_qh, (d)); \
188 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
201 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
203 mp_limb_t _q0, _t1, _t0, _mask; \
204 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
205 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
207 /* Compute the two most significant limbs of n - q'd */ \
208 (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \
209 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
210 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
211 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
214 /* Conditionally adjust q and the remainders */ \
215 _mask = - (mp_limb_t) ((r1) >= _q0); \
217 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
220 if ((r1) > (d1) || (r0) >= (d0)) \
223 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
229 #define MP_LIMB_T_SWAP(x, y) \
231 mp_limb_t __mp_limb_t_swap__tmp = (x); \
233 (y) = __mp_limb_t_swap__tmp; \
235 #define MP_SIZE_T_SWAP(x, y) \
237 mp_size_t __mp_size_t_swap__tmp = (x); \
239 (y) = __mp_size_t_swap__tmp; \
241 #define MP_BITCNT_T_SWAP(x,y) \
243 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
245 (y) = __mp_bitcnt_t_swap__tmp; \
247 #define MP_PTR_SWAP(x, y) \
249 mp_ptr __mp_ptr_swap__tmp = (x); \
251 (y) = __mp_ptr_swap__tmp; \
253 #define MP_SRCPTR_SWAP(x, y) \
255 mp_srcptr __mp_srcptr_swap__tmp = (x); \
257 (y) = __mp_srcptr_swap__tmp; \
260 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
262 MP_PTR_SWAP (xp, yp); \
263 MP_SIZE_T_SWAP (xs, ys); \
265 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
267 MP_SRCPTR_SWAP (xp, yp); \
268 MP_SIZE_T_SWAP (xs, ys); \
271 #define MPZ_PTR_SWAP(x, y) \
273 mpz_ptr __mpz_ptr_swap__tmp = (x); \
275 (y) = __mpz_ptr_swap__tmp; \
277 #define MPZ_SRCPTR_SWAP(x, y) \
279 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
281 (y) = __mpz_srcptr_swap__tmp; \
284 const int mp_bits_per_limb
= GMP_LIMB_BITS
;
287 /* Memory allocation and other helper functions. */
289 gmp_die (const char *msg
)
291 fprintf (stderr
, "%s\n", msg
);
296 gmp_default_alloc (size_t size
)
304 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
310 gmp_default_realloc (void *old
, size_t unused_old_size
, size_t new_size
)
314 p
= realloc (old
, new_size
);
317 gmp_die("gmp_default_realloc: Virtual memory exhausted.");
323 gmp_default_free (void *p
, size_t unused_size
)
328 static void * (*gmp_allocate_func
) (size_t) = gmp_default_alloc
;
329 static void * (*gmp_reallocate_func
) (void *, size_t, size_t) = gmp_default_realloc
;
330 static void (*gmp_free_func
) (void *, size_t) = gmp_default_free
;
333 mp_get_memory_functions (void *(**alloc_func
) (size_t),
334 void *(**realloc_func
) (void *, size_t, size_t),
335 void (**free_func
) (void *, size_t))
338 *alloc_func
= gmp_allocate_func
;
341 *realloc_func
= gmp_reallocate_func
;
344 *free_func
= gmp_free_func
;
348 mp_set_memory_functions (void *(*alloc_func
) (size_t),
349 void *(*realloc_func
) (void *, size_t, size_t),
350 void (*free_func
) (void *, size_t))
353 alloc_func
= gmp_default_alloc
;
355 realloc_func
= gmp_default_realloc
;
357 free_func
= gmp_default_free
;
359 gmp_allocate_func
= alloc_func
;
360 gmp_reallocate_func
= realloc_func
;
361 gmp_free_func
= free_func
;
364 #define gmp_alloc(size) ((*gmp_allocate_func)((size)))
365 #define gmp_free(p, size) ((*gmp_free_func) ((p), (size)))
366 #define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size))
369 gmp_alloc_limbs (mp_size_t size
)
371 return (mp_ptr
) gmp_alloc (size
* sizeof (mp_limb_t
));
375 gmp_realloc_limbs (mp_ptr old
, mp_size_t old_size
, mp_size_t size
)
378 return (mp_ptr
) gmp_realloc (old
, old_size
* sizeof (mp_limb_t
), size
* sizeof (mp_limb_t
));
382 gmp_free_limbs (mp_ptr old
, mp_size_t size
)
384 gmp_free (old
, size
* sizeof (mp_limb_t
));
391 mpn_copyi (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
394 for (i
= 0; i
< n
; i
++)
399 mpn_copyd (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
406 mpn_cmp (mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
411 return ap
[n
] > bp
[n
] ? 1 : -1;
417 mpn_cmp4 (mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
420 return an
< bn
? -1 : 1;
422 return mpn_cmp (ap
, bp
, an
);
426 mpn_normalized_size (mp_srcptr xp
, mp_size_t n
)
428 while (n
> 0 && xp
[n
-1] == 0)
434 mpn_zero_p(mp_srcptr rp
, mp_size_t n
)
436 return mpn_normalized_size (rp
, n
) == 0;
440 mpn_zero (mp_ptr rp
, mp_size_t n
)
447 mpn_add_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
455 mp_limb_t r
= ap
[i
] + b
;
466 mpn_add_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
471 for (i
= 0, cy
= 0; i
< n
; i
++)
474 a
= ap
[i
]; b
= bp
[i
];
485 mpn_add (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
491 cy
= mpn_add_n (rp
, ap
, bp
, bn
);
493 cy
= mpn_add_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
498 mpn_sub_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
509 mp_limb_t cy
= a
< b
;
519 mpn_sub_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
524 for (i
= 0, cy
= 0; i
< n
; i
++)
527 a
= ap
[i
]; b
= bp
[i
];
537 mpn_sub (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
543 cy
= mpn_sub_n (rp
, ap
, bp
, bn
);
545 cy
= mpn_sub_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
550 mpn_mul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
552 mp_limb_t ul
, cl
, hpl
, lpl
;
560 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
563 cl
= (lpl
< cl
) + hpl
;
573 mpn_addmul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
575 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
583 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
586 cl
= (lpl
< cl
) + hpl
;
599 mpn_submul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
601 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
609 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
612 cl
= (lpl
< cl
) + hpl
;
625 mpn_mul (mp_ptr rp
, mp_srcptr up
, mp_size_t un
, mp_srcptr vp
, mp_size_t vn
)
629 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, up
, un
));
630 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, vp
, vn
));
632 /* We first multiply by the low order limb. This result can be
633 stored, not added, to rp. We also avoid a loop for zeroing this
636 rp
[un
] = mpn_mul_1 (rp
, up
, un
, vp
[0]);
638 /* Now accumulate the product of up[] and the next higher limb from
644 rp
[un
] = mpn_addmul_1 (rp
, up
, un
, vp
[0]);
650 mpn_mul_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
652 mpn_mul (rp
, ap
, n
, bp
, n
);
656 mpn_sqr (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
)
658 mpn_mul (rp
, ap
, n
, ap
, n
);
662 mpn_lshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
664 mp_limb_t high_limb
, low_limb
;
670 assert (cnt
< GMP_LIMB_BITS
);
675 tnc
= GMP_LIMB_BITS
- cnt
;
677 retval
= low_limb
>> tnc
;
678 high_limb
= (low_limb
<< cnt
);
683 *--rp
= high_limb
| (low_limb
>> tnc
);
684 high_limb
= (low_limb
<< cnt
);
692 mpn_rshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
694 mp_limb_t high_limb
, low_limb
;
700 assert (cnt
< GMP_LIMB_BITS
);
702 tnc
= GMP_LIMB_BITS
- cnt
;
704 retval
= (high_limb
<< tnc
);
705 low_limb
= high_limb
>> cnt
;
710 *rp
++ = low_limb
| (high_limb
<< tnc
);
711 low_limb
= high_limb
>> cnt
;
719 mpn_common_scan (mp_limb_t limb
, mp_size_t i
, mp_srcptr up
, mp_size_t un
,
724 assert (ux
== 0 || ux
== GMP_LIMB_MAX
);
725 assert (0 <= i
&& i
<= un
);
731 return (ux
== 0 ? ~(mp_bitcnt_t
) 0 : un
* GMP_LIMB_BITS
);
735 return (mp_bitcnt_t
) i
* GMP_LIMB_BITS
+ cnt
;
739 mpn_scan1 (mp_srcptr ptr
, mp_bitcnt_t bit
)
742 i
= bit
/ GMP_LIMB_BITS
;
744 return mpn_common_scan ( ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
749 mpn_scan0 (mp_srcptr ptr
, mp_bitcnt_t bit
)
752 i
= bit
/ GMP_LIMB_BITS
;
754 return mpn_common_scan (~ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
755 i
, ptr
, i
, GMP_LIMB_MAX
);
759 mpn_com (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
766 mpn_neg (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
776 mpn_com (++rp
, ++up
, --n
);
781 /* MPN division interface. */
783 /* The 3/2 inverse is defined as
785 m = floor( (B^3-1) / (B u1 + u0)) - B
788 mpn_invert_3by2 (mp_limb_t u1
, mp_limb_t u0
)
796 assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t
));
797 /* For notation, let b denote the half-limb base, so that B = b^2.
798 Split u1 = b uh + ul. */
799 ul
= u1
& GMP_LLIMB_MASK
;
800 uh
= u1
>> (GMP_LIMB_BITS
/ 2);
802 /* Approximation of the high half of quotient. Differs from the 2/1
803 inverse of the half limb uh, since we have already subtracted
805 qh
= (u1
^ GMP_LIMB_MAX
) / uh
;
807 /* Adjust to get a half-limb 3/2 inverse, i.e., we want
809 qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
810 = floor( (b (~u) + b-1) / u),
814 r = b (~u) + b-1 - qh (b uh + ul)
815 = b (~u - qh uh) + b-1 - qh ul
817 Subtraction of qh ul may underflow, which implies adjustments.
818 But by normalization, 2 u >= B > qh ul, so we need to adjust by
822 r
= ((~u1
- (mp_limb_t
) qh
* uh
) << (GMP_LIMB_BITS
/ 2)) | GMP_LLIMB_MASK
;
824 p
= (mp_limb_t
) qh
* ul
;
825 /* Adjustment steps taken from udiv_qrnnd_c */
830 if (r
>= u1
) /* i.e. we didn't get carry when adding to r */
839 /* Low half of the quotient is
841 ql = floor ( (b r + b-1) / u1).
843 This is a 3/2 division (on half-limbs), for which qh is a
846 p
= (r
>> (GMP_LIMB_BITS
/ 2)) * qh
+ r
;
847 /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
848 work, it is essential that ql is a full mp_limb_t. */
849 ql
= (p
>> (GMP_LIMB_BITS
/ 2)) + 1;
851 /* By the 3/2 trick, we don't need the high half limb. */
852 r
= (r
<< (GMP_LIMB_BITS
/ 2)) + GMP_LLIMB_MASK
- ql
* u1
;
854 if (r
>= (GMP_LIMB_MAX
& (p
<< (GMP_LIMB_BITS
/ 2))))
859 m
= ((mp_limb_t
) qh
<< (GMP_LIMB_BITS
/ 2)) + ql
;
867 /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
884 gmp_umul_ppmm (th
, tl
, u0
, m
);
889 m
-= ((r
> u1
) | ((r
== u1
) & (tl
> u0
)));
896 struct gmp_div_inverse
898 /* Normalization shift count. */
900 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
902 /* Inverse, for 2/1 or 3/2. */
907 mpn_div_qr_1_invert (struct gmp_div_inverse
*inv
, mp_limb_t d
)
914 inv
->d1
= d
<< shift
;
915 inv
->di
= mpn_invert_limb (inv
->d1
);
919 mpn_div_qr_2_invert (struct gmp_div_inverse
*inv
,
920 mp_limb_t d1
, mp_limb_t d0
)
929 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
934 inv
->di
= mpn_invert_3by2 (d1
, d0
);
938 mpn_div_qr_invert (struct gmp_div_inverse
*inv
,
939 mp_srcptr dp
, mp_size_t dn
)
944 mpn_div_qr_1_invert (inv
, dp
[0]);
946 mpn_div_qr_2_invert (inv
, dp
[1], dp
[0]);
959 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
960 d0
= (d0
<< shift
) | (dp
[dn
-3] >> (GMP_LIMB_BITS
- shift
));
964 inv
->di
= mpn_invert_3by2 (d1
, d0
);
968 /* Not matching current public gmp interface, rather corresponding to
969 the sbpi1_div_* functions. */
971 mpn_div_qr_1_preinv (mp_ptr qp
, mp_srcptr np
, mp_size_t nn
,
972 const struct gmp_div_inverse
*inv
)
981 /* Shift, reusing qp area if possible. In-place shift if qp == np. */
986 tp
= gmp_alloc_limbs (tn
);
988 r
= mpn_lshift (tp
, np
, nn
, inv
->shift
);
1000 gmp_udiv_qrnnd_preinv (q
, r
, r
, np
[nn
], d
, di
);
1005 gmp_free_limbs (tp
, tn
);
1007 return r
>> inv
->shift
;
1011 mpn_div_qr_2_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1012 const struct gmp_div_inverse
*inv
)
1016 mp_limb_t d1
, d0
, di
, r1
, r0
;
1025 r1
= mpn_lshift (np
, np
, nn
, shift
);
1036 gmp_udiv_qr_3by2 (q
, r1
, r0
, r1
, r0
, n0
, d1
, d0
, di
);
1045 assert ((r0
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- shift
))) == 0);
1046 r0
= (r0
>> shift
) | (r1
<< (GMP_LIMB_BITS
- shift
));
1055 mpn_div_qr_pi1 (mp_ptr qp
,
1056 mp_ptr np
, mp_size_t nn
, mp_limb_t n1
,
1057 mp_srcptr dp
, mp_size_t dn
,
1072 assert ((d1
& GMP_LIMB_HIGHBIT
) != 0);
1073 /* Iteration variable is the index of the q limb.
1075 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1076 * by <d1, d0, dp[dn-3], ..., dp[0] >
1082 mp_limb_t n0
= np
[dn
-1+i
];
1084 if (n1
== d1
&& n0
== d0
)
1087 mpn_submul_1 (np
+i
, dp
, dn
, q
);
1088 n1
= np
[dn
-1+i
]; /* update n1, last loop's value will now be invalid */
1092 gmp_udiv_qr_3by2 (q
, n1
, n0
, n1
, n0
, np
[dn
-2+i
], d1
, d0
, dinv
);
1094 cy
= mpn_submul_1 (np
+ i
, dp
, dn
-2, q
);
1104 n1
+= d1
+ mpn_add_n (np
+ i
, np
+ i
, dp
, dn
- 1);
1118 mpn_div_qr_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1119 mp_srcptr dp
, mp_size_t dn
,
1120 const struct gmp_div_inverse
*inv
)
1126 np
[0] = mpn_div_qr_1_preinv (qp
, np
, nn
, inv
);
1128 mpn_div_qr_2_preinv (qp
, np
, nn
, inv
);
1134 assert (inv
->d1
== dp
[dn
-1]);
1135 assert (inv
->d0
== dp
[dn
-2]);
1136 assert ((inv
->d1
& GMP_LIMB_HIGHBIT
) != 0);
1140 nh
= mpn_lshift (np
, np
, nn
, shift
);
1144 mpn_div_qr_pi1 (qp
, np
, nn
, nh
, dp
, dn
, inv
->di
);
1147 gmp_assert_nocarry (mpn_rshift (np
, np
, dn
, shift
));
1152 mpn_div_qr (mp_ptr qp
, mp_ptr np
, mp_size_t nn
, mp_srcptr dp
, mp_size_t dn
)
1154 struct gmp_div_inverse inv
;
1160 mpn_div_qr_invert (&inv
, dp
, dn
);
1161 if (dn
> 2 && inv
.shift
> 0)
1163 tp
= gmp_alloc_limbs (dn
);
1164 gmp_assert_nocarry (mpn_lshift (tp
, dp
, dn
, inv
.shift
));
1167 mpn_div_qr_preinv (qp
, np
, nn
, dp
, dn
, &inv
);
1169 gmp_free_limbs (tp
, dn
);
1173 /* MPN base conversion. */
1175 mpn_base_power_of_two_p (unsigned b
)
1191 struct mpn_base_info
1193 /* bb is the largest power of the base which fits in one limb, and
1194 exp is the corresponding exponent. */
1200 mpn_get_base_info (struct mpn_base_info
*info
, mp_limb_t b
)
1206 m
= GMP_LIMB_MAX
/ b
;
1207 for (exp
= 1, p
= b
; p
<= m
; exp
++)
1215 mpn_limb_size_in_base_2 (mp_limb_t u
)
1221 return GMP_LIMB_BITS
- shift
;
1225 mpn_get_str_bits (unsigned char *sp
, unsigned bits
, mp_srcptr up
, mp_size_t un
)
1232 sn
= ((un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1])
1235 mask
= (1U << bits
) - 1;
1237 for (i
= 0, j
= sn
, shift
= 0; j
-- > 0;)
1239 unsigned char digit
= up
[i
] >> shift
;
1243 if (shift
>= GMP_LIMB_BITS
&& ++i
< un
)
1245 shift
-= GMP_LIMB_BITS
;
1246 digit
|= up
[i
] << (bits
- shift
);
1248 sp
[j
] = digit
& mask
;
1253 /* We generate digits from the least significant end, and reverse at
1256 mpn_limb_get_str (unsigned char *sp
, mp_limb_t w
,
1257 const struct gmp_div_inverse
*binv
)
1260 for (i
= 0; w
> 0; i
++)
1264 h
= w
>> (GMP_LIMB_BITS
- binv
->shift
);
1265 l
= w
<< binv
->shift
;
1267 gmp_udiv_qrnnd_preinv (w
, r
, h
, l
, binv
->d1
, binv
->di
);
1268 assert ((r
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- binv
->shift
))) == 0);
1277 mpn_get_str_other (unsigned char *sp
,
1278 int base
, const struct mpn_base_info
*info
,
1279 mp_ptr up
, mp_size_t un
)
1281 struct gmp_div_inverse binv
;
1285 mpn_div_qr_1_invert (&binv
, base
);
1291 struct gmp_div_inverse bbinv
;
1292 mpn_div_qr_1_invert (&bbinv
, info
->bb
);
1298 w
= mpn_div_qr_1_preinv (up
, up
, un
, &bbinv
);
1299 un
-= (up
[un
-1] == 0);
1300 done
= mpn_limb_get_str (sp
+ sn
, w
, &binv
);
1302 for (sn
+= done
; done
< info
->exp
; done
++)
1307 sn
+= mpn_limb_get_str (sp
+ sn
, up
[0], &binv
);
1310 for (i
= 0; 2*i
+ 1 < sn
; i
++)
1312 unsigned char t
= sp
[i
];
1313 sp
[i
] = sp
[sn
- i
- 1];
1321 mpn_get_str (unsigned char *sp
, int base
, mp_ptr up
, mp_size_t un
)
1326 assert (up
[un
-1] > 0);
1328 bits
= mpn_base_power_of_two_p (base
);
1330 return mpn_get_str_bits (sp
, bits
, up
, un
);
1333 struct mpn_base_info info
;
1335 mpn_get_base_info (&info
, base
);
1336 return mpn_get_str_other (sp
, base
, &info
, up
, un
);
1341 mpn_set_str_bits (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1348 for (limb
= 0, rn
= 0, shift
= 0; sn
-- > 0; )
1350 limb
|= (mp_limb_t
) sp
[sn
] << shift
;
1352 if (shift
>= GMP_LIMB_BITS
)
1354 shift
-= GMP_LIMB_BITS
;
1356 /* Next line is correct also if shift == 0,
1357 bits == 8, and mp_limb_t == unsigned char. */
1358 limb
= (unsigned int) sp
[sn
] >> (bits
- shift
);
1364 rn
= mpn_normalized_size (rp
, rn
);
1368 /* Result is usually normalized, except for all-zero input, in which
1369 case a single zero limb is written at *RP, and 1 is returned. */
1371 mpn_set_str_other (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1372 mp_limb_t b
, const struct mpn_base_info
*info
)
1381 k
= 1 + (sn
- 1) % info
->exp
;
1386 w
= w
* b
+ sp
[j
++];
1390 for (rn
= 1; j
< sn
;)
1395 for (k
= 1; k
< info
->exp
; k
++)
1396 w
= w
* b
+ sp
[j
++];
1398 cy
= mpn_mul_1 (rp
, rp
, rn
, info
->bb
);
1399 cy
+= mpn_add_1 (rp
, rp
, rn
, w
);
1409 mpn_set_str (mp_ptr rp
, const unsigned char *sp
, size_t sn
, int base
)
1416 bits
= mpn_base_power_of_two_p (base
);
1418 return mpn_set_str_bits (rp
, sp
, sn
, bits
);
1421 struct mpn_base_info info
;
1423 mpn_get_base_info (&info
, base
);
1424 return mpn_set_str_other (rp
, sp
, sn
, base
, &info
);
1433 static const mp_limb_t dummy_limb
= GMP_LIMB_MAX
& 0xc1a0;
1437 r
->_mp_d
= (mp_ptr
) &dummy_limb
;
1440 /* The utility of this function is a bit limited, since many functions
1441 assigns the result variable using mpz_swap. */
1443 mpz_init2 (mpz_t r
, mp_bitcnt_t bits
)
1447 bits
-= (bits
!= 0); /* Round down, except if 0 */
1448 rn
= 1 + bits
/ GMP_LIMB_BITS
;
1452 r
->_mp_d
= gmp_alloc_limbs (rn
);
1459 gmp_free_limbs (r
->_mp_d
, r
->_mp_alloc
);
1463 mpz_realloc (mpz_t r
, mp_size_t size
)
1465 size
= GMP_MAX (size
, 1);
1468 r
->_mp_d
= gmp_realloc_limbs (r
->_mp_d
, r
->_mp_alloc
, size
);
1470 r
->_mp_d
= gmp_alloc_limbs (size
);
1471 r
->_mp_alloc
= size
;
1473 if (GMP_ABS (r
->_mp_size
) > size
)
1479 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1480 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1481 ? mpz_realloc(z,n) \
1484 /* MPZ assignment and basic conversions. */
1486 mpz_set_si (mpz_t r
, signed long int x
)
1491 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1493 mpz_set_ui (r
, GMP_NEG_CAST (unsigned long int, x
));
1499 MPZ_REALLOC (r
, 1)[0] = GMP_NEG_CAST (unsigned long int, x
);
1504 mpz_set_ui (mpz_t r
, unsigned long int x
)
1509 MPZ_REALLOC (r
, 1)[0] = x
;
1510 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1512 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1513 while (x
>>= LOCAL_GMP_LIMB_BITS
)
1516 MPZ_REALLOC (r
, r
->_mp_size
)[r
->_mp_size
- 1] = x
;
1525 mpz_set (mpz_t r
, const mpz_t x
)
1527 /* Allow the NOP r == x */
1533 n
= GMP_ABS (x
->_mp_size
);
1534 rp
= MPZ_REALLOC (r
, n
);
1536 mpn_copyi (rp
, x
->_mp_d
, n
);
1537 r
->_mp_size
= x
->_mp_size
;
1542 mpz_init_set_si (mpz_t r
, signed long int x
)
1549 mpz_init_set_ui (mpz_t r
, unsigned long int x
)
1556 mpz_init_set (mpz_t r
, const mpz_t x
)
1563 mpz_fits_slong_p (const mpz_t u
)
1565 return mpz_cmp_si (u
, LONG_MAX
) <= 0 && mpz_cmp_si (u
, LONG_MIN
) >= 0;
1569 mpn_absfits_ulong_p (mp_srcptr up
, mp_size_t un
)
1571 int ulongsize
= GMP_ULONG_BITS
/ GMP_LIMB_BITS
;
1572 mp_limb_t ulongrem
= 0;
1574 if (GMP_ULONG_BITS
% GMP_LIMB_BITS
!= 0)
1575 ulongrem
= (mp_limb_t
) (ULONG_MAX
>> GMP_LIMB_BITS
* ulongsize
) + 1;
1577 return un
<= ulongsize
|| (up
[ulongsize
] < ulongrem
&& un
== ulongsize
+ 1);
1581 mpz_fits_ulong_p (const mpz_t u
)
1583 mp_size_t us
= u
->_mp_size
;
1585 return us
>= 0 && mpn_absfits_ulong_p (u
->_mp_d
, us
);
1589 mpz_fits_sint_p (const mpz_t u
)
1591 return mpz_cmp_si (u
, INT_MAX
) <= 0 && mpz_cmp_si (u
, INT_MIN
) >= 0;
1595 mpz_fits_uint_p (const mpz_t u
)
1597 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, UINT_MAX
) <= 0;
1601 mpz_fits_sshort_p (const mpz_t u
)
1603 return mpz_cmp_si (u
, SHRT_MAX
) <= 0 && mpz_cmp_si (u
, SHRT_MIN
) >= 0;
1607 mpz_fits_ushort_p (const mpz_t u
)
1609 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, USHRT_MAX
) <= 0;
1613 mpz_get_si (const mpz_t u
)
1615 unsigned long r
= mpz_get_ui (u
);
1616 unsigned long c
= -LONG_MAX
- LONG_MIN
;
1618 if (u
->_mp_size
< 0)
1619 /* This expression is necessary to properly handle -LONG_MIN */
1620 return -(long) c
- (long) ((r
- c
) & LONG_MAX
);
1622 return (long) (r
& LONG_MAX
);
1626 mpz_get_ui (const mpz_t u
)
1628 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1630 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1631 unsigned long r
= 0;
1632 mp_size_t n
= GMP_ABS (u
->_mp_size
);
1633 n
= GMP_MIN (n
, 1 + (mp_size_t
) (GMP_ULONG_BITS
- 1) / GMP_LIMB_BITS
);
1635 r
= (r
<< LOCAL_GMP_LIMB_BITS
) + u
->_mp_d
[n
];
1639 return u
->_mp_size
== 0 ? 0 : u
->_mp_d
[0];
1643 mpz_size (const mpz_t u
)
1645 return GMP_ABS (u
->_mp_size
);
1649 mpz_getlimbn (const mpz_t u
, mp_size_t n
)
1651 if (n
>= 0 && n
< GMP_ABS (u
->_mp_size
))
1658 mpz_realloc2 (mpz_t x
, mp_bitcnt_t n
)
1660 mpz_realloc (x
, 1 + (n
- (n
!= 0)) / GMP_LIMB_BITS
);
1664 mpz_limbs_read (mpz_srcptr x
)
1670 mpz_limbs_modify (mpz_t x
, mp_size_t n
)
1673 return MPZ_REALLOC (x
, n
);
1677 mpz_limbs_write (mpz_t x
, mp_size_t n
)
1679 return mpz_limbs_modify (x
, n
);
1683 mpz_limbs_finish (mpz_t x
, mp_size_t xs
)
1686 xn
= mpn_normalized_size (x
->_mp_d
, GMP_ABS (xs
));
1687 x
->_mp_size
= xs
< 0 ? -xn
: xn
;
1691 mpz_roinit_normal_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1694 x
->_mp_d
= (mp_ptr
) xp
;
1700 mpz_roinit_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1702 mpz_roinit_normal_n (x
, xp
, xs
);
1703 mpz_limbs_finish (x
, xs
);
1708 /* Conversions and comparison to double. */
1710 mpz_set_d (mpz_t r
, double x
)
1719 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1720 zero or infinity. */
1721 if (x
!= x
|| x
== x
* 0.5)
1736 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1738 for (rn
= 1; x
>= B
; rn
++)
1741 rp
= MPZ_REALLOC (r
, rn
);
1757 r
->_mp_size
= sign
? - rn
: rn
;
1761 mpz_init_set_d (mpz_t r
, double x
)
1768 mpz_get_d (const mpz_t u
)
1774 double B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1776 un
= GMP_ABS (u
->_mp_size
);
1783 m
= m
+ GMP_DBL_MANT_BITS
- GMP_LIMB_BITS
;
1785 l
&= GMP_LIMB_MAX
<< -m
;
1787 for (x
= l
; --un
>= 0;)
1794 l
&= GMP_LIMB_MAX
<< -m
;
1799 if (u
->_mp_size
< 0)
1806 mpz_cmpabs_d (const mpz_t x
, double d
)
1819 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1822 /* Scale d so it can be compared with the top limb. */
1823 for (i
= 1; i
< xn
; i
++)
1829 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1830 for (i
= xn
; i
-- > 0;)
1847 mpz_cmp_d (const mpz_t x
, double d
)
1849 if (x
->_mp_size
< 0)
1854 return -mpz_cmpabs_d (x
, d
);
1861 return mpz_cmpabs_d (x
, d
);
1866 /* MPZ comparisons and the like. */
1868 mpz_sgn (const mpz_t u
)
1870 return GMP_CMP (u
->_mp_size
, 0);
1874 mpz_cmp_si (const mpz_t u
, long v
)
1876 mp_size_t usize
= u
->_mp_size
;
1879 return mpz_cmp_ui (u
, v
);
1880 else if (usize
>= 0)
1883 return - mpz_cmpabs_ui (u
, GMP_NEG_CAST (unsigned long int, v
));
1887 mpz_cmp_ui (const mpz_t u
, unsigned long v
)
1889 mp_size_t usize
= u
->_mp_size
;
1894 return mpz_cmpabs_ui (u
, v
);
1898 mpz_cmp (const mpz_t a
, const mpz_t b
)
1900 mp_size_t asize
= a
->_mp_size
;
1901 mp_size_t bsize
= b
->_mp_size
;
1904 return (asize
< bsize
) ? -1 : 1;
1905 else if (asize
>= 0)
1906 return mpn_cmp (a
->_mp_d
, b
->_mp_d
, asize
);
1908 return mpn_cmp (b
->_mp_d
, a
->_mp_d
, -asize
);
1912 mpz_cmpabs_ui (const mpz_t u
, unsigned long v
)
1914 mp_size_t un
= GMP_ABS (u
->_mp_size
);
1916 if (! mpn_absfits_ulong_p (u
->_mp_d
, un
))
1920 unsigned long uu
= mpz_get_ui (u
);
1921 return GMP_CMP(uu
, v
);
1926 mpz_cmpabs (const mpz_t u
, const mpz_t v
)
1928 return mpn_cmp4 (u
->_mp_d
, GMP_ABS (u
->_mp_size
),
1929 v
->_mp_d
, GMP_ABS (v
->_mp_size
));
1933 mpz_abs (mpz_t r
, const mpz_t u
)
1936 r
->_mp_size
= GMP_ABS (r
->_mp_size
);
1940 mpz_neg (mpz_t r
, const mpz_t u
)
1943 r
->_mp_size
= -r
->_mp_size
;
1947 mpz_swap (mpz_t u
, mpz_t v
)
1949 MP_SIZE_T_SWAP (u
->_mp_alloc
, v
->_mp_alloc
);
1950 MPN_PTR_SWAP (u
->_mp_d
, u
->_mp_size
, v
->_mp_d
, v
->_mp_size
);
1954 /* MPZ addition and subtraction */
1958 mpz_add_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1961 mpz_init_set_ui (bb
, b
);
1967 mpz_sub_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1969 mpz_ui_sub (r
, b
, a
);
1974 mpz_ui_sub (mpz_t r
, unsigned long a
, const mpz_t b
)
1977 mpz_add_ui (r
, r
, a
);
1981 mpz_abs_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
1983 mp_size_t an
= GMP_ABS (a
->_mp_size
);
1984 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
1990 MPZ_SRCPTR_SWAP (a
, b
);
1991 MP_SIZE_T_SWAP (an
, bn
);
1994 rp
= MPZ_REALLOC (r
, an
+ 1);
1995 cy
= mpn_add (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
);
2003 mpz_abs_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
2005 mp_size_t an
= GMP_ABS (a
->_mp_size
);
2006 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
2010 cmp
= mpn_cmp4 (a
->_mp_d
, an
, b
->_mp_d
, bn
);
2013 rp
= MPZ_REALLOC (r
, an
);
2014 gmp_assert_nocarry (mpn_sub (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
));
2015 return mpn_normalized_size (rp
, an
);
2019 rp
= MPZ_REALLOC (r
, bn
);
2020 gmp_assert_nocarry (mpn_sub (rp
, b
->_mp_d
, bn
, a
->_mp_d
, an
));
2021 return -mpn_normalized_size (rp
, bn
);
2028 mpz_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
2032 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2033 rn
= mpz_abs_add (r
, a
, b
);
2035 rn
= mpz_abs_sub (r
, a
, b
);
2037 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2041 mpz_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
2045 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2046 rn
= mpz_abs_sub (r
, a
, b
);
2048 rn
= mpz_abs_add (r
, a
, b
);
2050 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2054 /* MPZ multiplication */
2056 mpz_mul_si (mpz_t r
, const mpz_t u
, long int v
)
2060 mpz_mul_ui (r
, u
, GMP_NEG_CAST (unsigned long int, v
));
2064 mpz_mul_ui (r
, u
, v
);
2068 mpz_mul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2071 mpz_init_set_ui (vv
, v
);
2078 mpz_mul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2081 mp_size_t un
, vn
, rn
;
2088 if (un
== 0 || vn
== 0)
2094 sign
= (un
^ vn
) < 0;
2099 mpz_init2 (t
, (un
+ vn
) * GMP_LIMB_BITS
);
2103 mpn_mul (tp
, u
->_mp_d
, un
, v
->_mp_d
, vn
);
2105 mpn_mul (tp
, v
->_mp_d
, vn
, u
->_mp_d
, un
);
2108 rn
-= tp
[rn
-1] == 0;
2110 t
->_mp_size
= sign
? - rn
: rn
;
2116 mpz_mul_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bits
)
2123 un
= GMP_ABS (u
->_mp_size
);
2130 limbs
= bits
/ GMP_LIMB_BITS
;
2131 shift
= bits
% GMP_LIMB_BITS
;
2133 rn
= un
+ limbs
+ (shift
> 0);
2134 rp
= MPZ_REALLOC (r
, rn
);
2137 mp_limb_t cy
= mpn_lshift (rp
+ limbs
, u
->_mp_d
, un
, shift
);
2142 mpn_copyd (rp
+ limbs
, u
->_mp_d
, un
);
2144 mpn_zero (rp
, limbs
);
2146 r
->_mp_size
= (u
->_mp_size
< 0) ? - rn
: rn
;
2150 mpz_addmul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2153 mpz_init_set_ui (t
, v
);
2160 mpz_submul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2163 mpz_init_set_ui (t
, v
);
2170 mpz_addmul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2180 mpz_submul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2191 enum mpz_div_round_mode
{ GMP_DIV_FLOOR
, GMP_DIV_CEIL
, GMP_DIV_TRUNC
};
2193 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2195 mpz_div_qr (mpz_t q
, mpz_t r
,
2196 const mpz_t n
, const mpz_t d
, enum mpz_div_round_mode mode
)
2198 mp_size_t ns
, ds
, nn
, dn
, qs
;
2203 gmp_die("mpz_div_qr: Divide by zero.");
2221 if (mode
== GMP_DIV_CEIL
&& qs
>= 0)
2223 /* q = 1, r = n - d */
2229 else if (mode
== GMP_DIV_FLOOR
&& qs
< 0)
2231 /* q = -1, r = n + d */
2253 mpz_init_set (tr
, n
);
2260 mpz_init2 (tq
, qn
* GMP_LIMB_BITS
);
2266 mpn_div_qr (qp
, np
, nn
, d
->_mp_d
, dn
);
2270 qn
-= (qp
[qn
-1] == 0);
2272 tq
->_mp_size
= qs
< 0 ? -qn
: qn
;
2274 rn
= mpn_normalized_size (np
, dn
);
2275 tr
->_mp_size
= ns
< 0 ? - rn
: rn
;
2277 if (mode
== GMP_DIV_FLOOR
&& qs
< 0 && rn
!= 0)
2280 mpz_sub_ui (tq
, tq
, 1);
2282 mpz_add (tr
, tr
, d
);
2284 else if (mode
== GMP_DIV_CEIL
&& qs
>= 0 && rn
!= 0)
2287 mpz_add_ui (tq
, tq
, 1);
2289 mpz_sub (tr
, tr
, d
);
2307 mpz_cdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2309 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_CEIL
);
2313 mpz_fdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2315 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2319 mpz_tdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2321 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2325 mpz_cdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2327 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2331 mpz_fdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2333 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2337 mpz_tdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2339 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2343 mpz_cdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2345 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2349 mpz_fdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2351 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2355 mpz_tdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2357 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2361 mpz_mod (mpz_t r
, const mpz_t n
, const mpz_t d
)
2363 mpz_div_qr (NULL
, r
, n
, d
, d
->_mp_size
>= 0 ? GMP_DIV_FLOOR
: GMP_DIV_CEIL
);
2367 mpz_div_q_2exp (mpz_t q
, const mpz_t u
, mp_bitcnt_t bit_index
,
2368 enum mpz_div_round_mode mode
)
2381 limb_cnt
= bit_index
/ GMP_LIMB_BITS
;
2382 qn
= GMP_ABS (un
) - limb_cnt
;
2383 bit_index
%= GMP_LIMB_BITS
;
2385 if (mode
== ((un
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* un != 0 here. */
2386 /* Note: Below, the final indexing at limb_cnt is valid because at
2387 that point we have qn > 0. */
2389 || !mpn_zero_p (u
->_mp_d
, limb_cnt
)
2390 || (u
->_mp_d
[limb_cnt
]
2391 & (((mp_limb_t
) 1 << bit_index
) - 1)));
2399 qp
= MPZ_REALLOC (q
, qn
);
2403 mpn_rshift (qp
, u
->_mp_d
+ limb_cnt
, qn
, bit_index
);
2404 qn
-= qp
[qn
- 1] == 0;
2408 mpn_copyi (qp
, u
->_mp_d
+ limb_cnt
, qn
);
2415 mpz_add_ui (q
, q
, 1);
2421 mpz_div_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bit_index
,
2422 enum mpz_div_round_mode mode
)
2424 mp_size_t us
, un
, rn
;
2429 if (us
== 0 || bit_index
== 0)
2434 rn
= (bit_index
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
2437 rp
= MPZ_REALLOC (r
, rn
);
2440 mask
= GMP_LIMB_MAX
>> (rn
* GMP_LIMB_BITS
- bit_index
);
2444 /* Quotient (with truncation) is zero, and remainder is
2446 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2448 /* Have to negate and sign extend. */
2451 gmp_assert_nocarry (! mpn_neg (rp
, u
->_mp_d
, un
));
2452 for (i
= un
; i
< rn
- 1; i
++)
2453 rp
[i
] = GMP_LIMB_MAX
;
2462 mpn_copyi (rp
, u
->_mp_d
, un
);
2470 mpn_copyi (rp
, u
->_mp_d
, rn
- 1);
2472 rp
[rn
-1] = u
->_mp_d
[rn
-1] & mask
;
2474 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2476 /* If r != 0, compute 2^{bit_count} - r. */
2477 mpn_neg (rp
, rp
, rn
);
2481 /* us is not used for anything else, so we can modify it
2482 here to indicate flipped sign. */
2486 rn
= mpn_normalized_size (rp
, rn
);
2487 r
->_mp_size
= us
< 0 ? -rn
: rn
;
2491 mpz_cdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2493 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2497 mpz_fdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2499 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2503 mpz_tdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2505 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2509 mpz_cdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2511 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2515 mpz_fdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2517 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2521 mpz_tdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2523 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2527 mpz_divexact (mpz_t q
, const mpz_t n
, const mpz_t d
)
2529 gmp_assert_nocarry (mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2533 mpz_divisible_p (const mpz_t n
, const mpz_t d
)
2535 return mpz_div_qr (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2539 mpz_congruent_p (const mpz_t a
, const mpz_t b
, const mpz_t m
)
2544 /* a == b (mod 0) iff a == b */
2545 if (mpz_sgn (m
) == 0)
2546 return (mpz_cmp (a
, b
) == 0);
2550 res
= mpz_divisible_p (t
, m
);
2556 static unsigned long
2557 mpz_div_qr_ui (mpz_t q
, mpz_t r
,
2558 const mpz_t n
, unsigned long d
, enum mpz_div_round_mode mode
)
2564 mpz_init_set_ui (dd
, d
);
2565 mpz_div_qr (q
, rr
, n
, dd
, mode
);
2567 ret
= mpz_get_ui (rr
);
2577 mpz_cdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2579 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_CEIL
);
2583 mpz_fdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2585 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2589 mpz_tdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2591 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2595 mpz_cdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2597 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2601 mpz_fdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2603 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2607 mpz_tdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2609 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2613 mpz_cdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2615 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2618 mpz_fdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2620 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2623 mpz_tdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2625 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2629 mpz_cdiv_ui (const mpz_t n
, unsigned long d
)
2631 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_CEIL
);
2635 mpz_fdiv_ui (const mpz_t n
, unsigned long d
)
2637 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2641 mpz_tdiv_ui (const mpz_t n
, unsigned long d
)
2643 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2647 mpz_mod_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2649 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2653 mpz_divexact_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2655 gmp_assert_nocarry (mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2659 mpz_divisible_ui_p (const mpz_t n
, unsigned long d
)
2661 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2667 mpn_gcd_11 (mp_limb_t u
, mp_limb_t v
)
2671 assert ( (u
| v
) > 0);
2678 gmp_ctz (shift
, u
| v
);
2684 MP_LIMB_T_SWAP (u
, v
);
2686 while ( (v
& 1) == 0)
2696 while ( (u
& 1) == 0);
2703 while ( (v
& 1) == 0);
2710 mpz_gcd_ui (mpz_t g
, const mpz_t u
, unsigned long v
)
2713 mpz_init_set_ui(t
, v
);
2727 mpz_make_odd (mpz_t r
)
2731 assert (r
->_mp_size
> 0);
2732 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2733 shift
= mpn_scan1 (r
->_mp_d
, 0);
2734 mpz_tdiv_q_2exp (r
, r
, shift
);
2740 mpz_gcd (mpz_t g
, const mpz_t u
, const mpz_t v
)
2743 mp_bitcnt_t uz
, vz
, gz
;
2745 if (u
->_mp_size
== 0)
2750 if (v
->_mp_size
== 0)
2760 uz
= mpz_make_odd (tu
);
2762 vz
= mpz_make_odd (tv
);
2763 gz
= GMP_MIN (uz
, vz
);
2765 if (tu
->_mp_size
< tv
->_mp_size
)
2768 mpz_tdiv_r (tu
, tu
, tv
);
2769 if (tu
->_mp_size
== 0)
2779 c
= mpz_cmp (tu
, tv
);
2788 if (tv
->_mp_size
== 1)
2792 mpz_tdiv_r (tu
, tu
, tv
);
2793 gp
= MPZ_REALLOC (g
, 1); /* gp = mpz_limbs_modify (g, 1); */
2794 *gp
= mpn_gcd_11 (tu
->_mp_d
[0], tv
->_mp_d
[0]);
2796 g
->_mp_size
= *gp
!= 0; /* mpz_limbs_finish (g, 1); */
2799 mpz_sub (tu
, tu
, tv
);
2803 mpz_mul_2exp (g
, g
, gz
);
2807 mpz_gcdext (mpz_t g
, mpz_t s
, mpz_t t
, const mpz_t u
, const mpz_t v
)
2809 mpz_t tu
, tv
, s0
, s1
, t0
, t1
;
2810 mp_bitcnt_t uz
, vz
, gz
;
2814 if (u
->_mp_size
== 0)
2816 /* g = 0 u + sgn(v) v */
2817 signed long sign
= mpz_sgn (v
);
2822 mpz_set_si (t
, sign
);
2826 if (v
->_mp_size
== 0)
2828 /* g = sgn(u) u + 0 v */
2829 signed long sign
= mpz_sgn (u
);
2832 mpz_set_si (s
, sign
);
2846 uz
= mpz_make_odd (tu
);
2848 vz
= mpz_make_odd (tv
);
2849 gz
= GMP_MIN (uz
, vz
);
2854 /* Cofactors corresponding to odd gcd. gz handled later. */
2855 if (tu
->_mp_size
< tv
->_mp_size
)
2858 MPZ_SRCPTR_SWAP (u
, v
);
2859 MPZ_PTR_SWAP (s
, t
);
2860 MP_BITCNT_T_SWAP (uz
, vz
);
2868 * where u and v denote the inputs with common factors of two
2869 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2871 * 2^p tu = s1 u - t1 v
2872 * 2^p tv = -s0 u + t0 v
2875 /* After initial division, tu = q tv + tu', we have
2877 * u = 2^uz (tu' + q tv)
2882 * t0 = 2^uz, t1 = 2^uz q
2886 mpz_tdiv_qr (t1
, tu
, tu
, tv
);
2887 mpz_mul_2exp (t1
, t1
, uz
);
2889 mpz_setbit (s1
, vz
);
2892 if (tu
->_mp_size
> 0)
2895 shift
= mpz_make_odd (tu
);
2896 mpz_setbit (t0
, uz
+ shift
);
2902 c
= mpz_cmp (tu
, tv
);
2910 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2911 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2913 mpz_sub (tv
, tv
, tu
);
2914 mpz_add (t0
, t0
, t1
);
2915 mpz_add (s0
, s0
, s1
);
2917 shift
= mpz_make_odd (tv
);
2918 mpz_mul_2exp (t1
, t1
, shift
);
2919 mpz_mul_2exp (s1
, s1
, shift
);
2923 mpz_sub (tu
, tu
, tv
);
2924 mpz_add (t1
, t0
, t1
);
2925 mpz_add (s1
, s0
, s1
);
2927 shift
= mpz_make_odd (tu
);
2928 mpz_mul_2exp (t0
, t0
, shift
);
2929 mpz_mul_2exp (s0
, s0
, shift
);
2935 mpz_setbit (t0
, uz
);
2937 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2940 mpz_mul_2exp (tv
, tv
, gz
);
2943 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2944 adjust cofactors, we need u / g and v / g */
2946 mpz_divexact (s1
, v
, tv
);
2948 mpz_divexact (t1
, u
, tv
);
2953 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2954 if (mpz_odd_p (s0
) || mpz_odd_p (t0
))
2956 mpz_sub (s0
, s0
, s1
);
2957 mpz_add (t0
, t0
, t1
);
2959 assert (mpz_even_p (t0
) && mpz_even_p (s0
));
2960 mpz_tdiv_q_2exp (s0
, s0
, 1);
2961 mpz_tdiv_q_2exp (t0
, t0
, 1);
2964 /* Choose small cofactors (they should generally satify
2966 |s| < |u| / 2g and |t| < |v| / 2g,
2968 with some documented exceptions). Always choose the smallest s,
2969 if there are two choices for s with same absolute value, choose
2970 the one with smallest corresponding t (this asymmetric condition
2971 is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */
2972 mpz_add (s1
, s0
, s1
);
2973 mpz_sub (t1
, t0
, t1
);
2974 cmp
= mpz_cmpabs (s0
, s1
);
2975 if (cmp
> 0 || (cmp
== 0 && mpz_cmpabs (t0
, t1
) > 0))
2980 if (u
->_mp_size
< 0)
2982 if (v
->_mp_size
< 0)
3000 mpz_lcm (mpz_t r
, const mpz_t u
, const mpz_t v
)
3004 if (u
->_mp_size
== 0 || v
->_mp_size
== 0)
3013 mpz_divexact (g
, u
, g
);
3021 mpz_lcm_ui (mpz_t r
, const mpz_t u
, unsigned long v
)
3023 if (v
== 0 || u
->_mp_size
== 0)
3029 v
/= mpz_gcd_ui (NULL
, u
, v
);
3030 mpz_mul_ui (r
, u
, v
);
3036 mpz_invert (mpz_t r
, const mpz_t u
, const mpz_t m
)
3041 if (u
->_mp_size
== 0 || mpz_cmpabs_ui (m
, 1) <= 0)
3047 mpz_gcdext (g
, tr
, NULL
, u
, m
);
3048 invertible
= (mpz_cmp_ui (g
, 1) == 0);
3052 if (tr
->_mp_size
< 0)
3054 if (m
->_mp_size
>= 0)
3055 mpz_add (tr
, tr
, m
);
3057 mpz_sub (tr
, tr
, m
);
3068 /* Higher level operations (sqrt, pow and root) */
3071 mpz_pow_ui (mpz_t r
, const mpz_t b
, unsigned long e
)
3075 mpz_init_set_ui (tr
, 1);
3077 bit
= GMP_ULONG_HIGHBIT
;
3080 mpz_mul (tr
, tr
, tr
);
3082 mpz_mul (tr
, tr
, b
);
3092 mpz_ui_pow_ui (mpz_t r
, unsigned long blimb
, unsigned long e
)
3096 mpz_init_set_ui (b
, blimb
);
3097 mpz_pow_ui (r
, b
, e
);
3102 mpz_powm (mpz_t r
, const mpz_t b
, const mpz_t e
, const mpz_t m
)
3108 struct gmp_div_inverse minv
;
3112 en
= GMP_ABS (e
->_mp_size
);
3113 mn
= GMP_ABS (m
->_mp_size
);
3115 gmp_die ("mpz_powm: Zero modulo.");
3119 mpz_set_ui (r
, mpz_cmpabs_ui (m
, 1));
3124 mpn_div_qr_invert (&minv
, mp
, mn
);
3129 /* To avoid shifts, we do all our reductions, except the final
3130 one, using a *normalized* m. */
3133 tp
= gmp_alloc_limbs (mn
);
3134 gmp_assert_nocarry (mpn_lshift (tp
, mp
, mn
, shift
));
3140 if (e
->_mp_size
< 0)
3142 if (!mpz_invert (base
, b
, m
))
3143 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3150 bn
= base
->_mp_size
;
3153 mpn_div_qr_preinv (NULL
, base
->_mp_d
, base
->_mp_size
, mp
, mn
, &minv
);
3157 /* We have reduced the absolute value. Now take care of the
3158 sign. Note that we get zero represented non-canonically as
3160 if (b
->_mp_size
< 0)
3162 mp_ptr bp
= MPZ_REALLOC (base
, mn
);
3163 gmp_assert_nocarry (mpn_sub (bp
, mp
, mn
, bp
, bn
));
3166 base
->_mp_size
= mpn_normalized_size (base
->_mp_d
, bn
);
3168 mpz_init_set_ui (tr
, 1);
3172 mp_limb_t w
= e
->_mp_d
[en
];
3175 bit
= GMP_LIMB_HIGHBIT
;
3178 mpz_mul (tr
, tr
, tr
);
3180 mpz_mul (tr
, tr
, base
);
3181 if (tr
->_mp_size
> mn
)
3183 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3184 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3191 /* Final reduction */
3192 if (tr
->_mp_size
>= mn
)
3195 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3196 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3199 gmp_free_limbs (tp
, mn
);
3207 mpz_powm_ui (mpz_t r
, const mpz_t b
, unsigned long elimb
, const mpz_t m
)
3211 mpz_init_set_ui (e
, elimb
);
3212 mpz_powm (r
, b
, e
, m
);
3216 /* x=trunc(y^(1/z)), r=y-x^z */
3218 mpz_rootrem (mpz_t x
, mpz_t r
, const mpz_t y
, unsigned long z
)
3224 sgn
= y
->_mp_size
< 0;
3225 if ((~z
& sgn
) != 0)
3226 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3228 gmp_die ("mpz_rootrem: Zeroth root.");
3230 if (mpz_cmpabs_ui (y
, 1) <= 0) {
3240 bc
= (mpz_sizeinbase (y
, 2) - 1) / z
+ 1;
3243 if (z
== 2) /* simplify sqrt loop: z-1 == 1 */
3245 mpz_swap (u
, t
); /* u = x */
3246 mpz_tdiv_q (t
, y
, u
); /* t = y/x */
3247 mpz_add (t
, t
, u
); /* t = y/x + x */
3248 mpz_tdiv_q_2exp (t
, t
, 1); /* x'= (y/x + x)/2 */
3249 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3258 mpz_swap (u
, t
); /* u = x */
3259 mpz_pow_ui (t
, u
, z
- 1); /* t = x^(z-1) */
3260 mpz_tdiv_q (t
, y
, t
); /* t = y/x^(z-1) */
3261 mpz_mul_ui (v
, u
, z
- 1); /* v = x*(z-1) */
3262 mpz_add (t
, t
, v
); /* t = y/x^(z-1) + x*(z-1) */
3263 mpz_tdiv_q_ui (t
, t
, z
); /* x'=(y/x^(z-1) + x*(z-1))/z */
3264 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3270 mpz_pow_ui (t
, u
, z
);
3280 mpz_root (mpz_t x
, const mpz_t y
, unsigned long z
)
3286 mpz_rootrem (x
, r
, y
, z
);
3287 res
= r
->_mp_size
== 0;
3293 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3295 mpz_sqrtrem (mpz_t s
, mpz_t r
, const mpz_t u
)
3297 mpz_rootrem (s
, r
, u
, 2);
3301 mpz_sqrt (mpz_t s
, const mpz_t u
)
3303 mpz_rootrem (s
, NULL
, u
, 2);
3307 mpz_perfect_square_p (const mpz_t u
)
3309 if (u
->_mp_size
<= 0)
3310 return (u
->_mp_size
== 0);
3312 return mpz_root (NULL
, u
, 2);
3316 mpn_perfect_square_p (mp_srcptr p
, mp_size_t n
)
3321 assert (p
[n
-1] != 0);
3322 return mpz_root (NULL
, mpz_roinit_normal_n (t
, p
, n
), 2);
3326 mpn_sqrtrem (mp_ptr sp
, mp_ptr rp
, mp_srcptr p
, mp_size_t n
)
3332 assert (p
[n
-1] != 0);
3336 mpz_rootrem (s
, r
, mpz_roinit_normal_n (u
, p
, n
), 2);
3338 assert (s
->_mp_size
== (n
+1)/2);
3339 mpn_copyd (sp
, s
->_mp_d
, s
->_mp_size
);
3343 mpn_copyd (rp
, r
->_mp_d
, res
);
3351 mpz_mfac_uiui (mpz_t x
, unsigned long n
, unsigned long m
)
3353 mpz_set_ui (x
, n
+ (n
== 0));
3354 if (m
+ 1 < 2) return;
3356 mpz_mul_ui (x
, x
, n
-= m
);
3360 mpz_2fac_ui (mpz_t x
, unsigned long n
)
3362 mpz_mfac_uiui (x
, n
, 2);
3366 mpz_fac_ui (mpz_t x
, unsigned long n
)
3368 mpz_mfac_uiui (x
, n
, 1);
3372 mpz_bin_uiui (mpz_t r
, unsigned long n
, unsigned long k
)
3376 mpz_set_ui (r
, k
<= n
);
3379 k
= (k
<= n
) ? n
- k
: 0;
3385 mpz_mul_ui (r
, r
, n
--);
3387 mpz_divexact (r
, r
, t
);
3392 /* Primality testing */
3394 /* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
3395 /* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
3397 gmp_jacobi_coprime (mp_limb_t a
, mp_limb_t b
)
3403 /* assert (mpn_gcd_11 (a, b) == 1); */
3405 /* Below, we represent a and b shifted right so that the least
3406 significant one bit is implicit. */
3415 /* (2/b) = -1 if b = 3 or 5 mod 8 */
3416 bit
^= c
& (b
^ (b
>> 1));
3420 return bit
& 1 ? -1 : 1;
3437 gmp_lucas_step_k_2k (mpz_t V
, mpz_t Qk
, const mpz_t n
)
3439 mpz_mod (Qk
, Qk
, n
);
3440 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3442 mpz_submul_ui (V
, Qk
, 2);
3443 mpz_tdiv_r (V
, V
, n
);
3444 /* Q^{2k} = (Q^k)^2 */
3445 mpz_mul (Qk
, Qk
, Qk
);
3448 /* Computes V_k, Q^k (mod n) for the Lucas' sequence */
3449 /* with P=1, Q=Q; k = (n>>b0)|1. */
3450 /* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
3451 /* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
3453 gmp_lucas_mod (mpz_t V
, mpz_t Qk
, long Q
,
3454 mp_bitcnt_t b0
, const mpz_t n
)
3461 assert (Q
<= - (LONG_MIN
/ 2));
3462 assert (Q
>= - (LONG_MAX
/ 2));
3463 assert (mpz_cmp_ui (n
, 4) > 0);
3464 assert (mpz_odd_p (n
));
3466 mpz_init_set_ui (U
, 1); /* U1 = 1 */
3467 mpz_set_ui (V
, 1); /* V1 = 1 */
3470 for (bs
= mpz_sizeinbase (n
, 2) - 1; --bs
>= b0
;)
3472 /* U_{2k} <- U_k * V_k */
3474 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3475 /* Q^{2k} = (Q^k)^2 */
3476 gmp_lucas_step_k_2k (V
, Qk
, n
);
3478 /* A step k->k+1 is performed if the bit in $n$ is 1 */
3479 /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */
3480 /* should be 1 in $n+1$ (bs == b0) */
3481 if (b0
== bs
|| mpz_tstbit (n
, bs
))
3483 /* Q^{k+1} <- Q^k * Q */
3484 mpz_mul_si (Qk
, Qk
, Q
);
3485 /* U_{k+1} <- (U_k + V_k) / 2 */
3486 mpz_swap (U
, V
); /* Keep in V the old value of U_k */
3488 /* We have to compute U/2, so we need an even value, */
3489 /* equivalent (mod n) */
3492 mpz_tdiv_q_2exp (U
, U
, 1);
3493 /* V_{k+1} <-(D*U_k + V_k) / 2 =
3494 U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
3495 mpz_mul_si (V
, V
, -2*Q
);
3497 mpz_tdiv_r (V
, V
, n
);
3499 mpz_tdiv_r (U
, U
, n
);
3502 res
= U
->_mp_size
== 0;
3507 /* Performs strong Lucas' test on x, with parameters suggested */
3508 /* for the BPSW test. Qk is only passed to recycle a variable. */
3509 /* Requires GCD (x,6) = 1.*/
3511 gmp_stronglucas (const mpz_t x
, mpz_t Qk
)
3515 mp_limb_t maxD
, D
; /* The absolute value is stored. */
3519 /* Test on the absolute value. */
3520 mpz_roinit_normal_n (n
, x
->_mp_d
, GMP_ABS (x
->_mp_size
));
3522 assert (mpz_odd_p (n
));
3523 /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
3524 if (mpz_root (Qk
, n
, 2))
3525 return 0; /* A square is composite. */
3527 /* Check Ds up to square root (in case, n is prime)
3528 or avoid overflows */
3529 maxD
= (Qk
->_mp_size
== 1) ? Qk
->_mp_d
[0] - 1 : GMP_LIMB_MAX
;
3532 /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
3533 /* For those Ds we have (D/n) = (n/|D|) */
3537 return 1 + (D
!= GMP_LIMB_MAX
); /* (1 + ! ~ D) */
3539 tl
= mpz_tdiv_ui (n
, D
);
3543 while (gmp_jacobi_coprime (tl
, D
) == 1);
3547 /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
3548 b0
= mpn_common_scan (~ n
->_mp_d
[0], 0, n
->_mp_d
, n
->_mp_size
, GMP_LIMB_MAX
);
3549 /* b0 = mpz_scan0 (n, 0); */
3551 /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
3552 Q
= (D
& 2) ? (long) (D
>> 2) + 1 : -(long) (D
>> 2);
3554 if (! gmp_lucas_mod (V
, Qk
, Q
, b0
, n
)) /* If Ud != 0 */
3555 while (V
->_mp_size
!= 0 && --b0
!= 0) /* while Vk != 0 */
3556 /* V <- V ^ 2 - 2Q^k */
3557 /* Q^{2k} = (Q^k)^2 */
3558 gmp_lucas_step_k_2k (V
, Qk
, n
);
3565 gmp_millerrabin (const mpz_t n
, const mpz_t nm1
, mpz_t y
,
3566 const mpz_t q
, mp_bitcnt_t k
)
3570 /* Caller must initialize y to the base. */
3571 mpz_powm (y
, y
, q
, n
);
3573 if (mpz_cmp_ui (y
, 1) == 0 || mpz_cmp (y
, nm1
) == 0)
3578 mpz_powm_ui (y
, y
, 2, n
);
3579 if (mpz_cmp (y
, nm1
) == 0)
3585 /* This product is 0xc0cfd797, and fits in 32 bits. */
3586 #define GMP_PRIME_PRODUCT \
3587 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3589 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3590 #define GMP_PRIME_MASK 0xc96996dcUL
3593 mpz_probab_prime_p (const mpz_t n
, int reps
)
3602 /* Note that we use the absolute value of n only, for compatibility
3603 with the real GMP. */
3605 return (mpz_cmpabs_ui (n
, 2) == 0) ? 2 : 0;
3607 /* Above test excludes n == 0 */
3608 assert (n
->_mp_size
!= 0);
3610 if (mpz_cmpabs_ui (n
, 64) < 0)
3611 return (GMP_PRIME_MASK
>> (n
->_mp_d
[0] >> 1)) & 2;
3613 if (mpz_gcd_ui (NULL
, n
, GMP_PRIME_PRODUCT
) != 1)
3616 /* All prime factors are >= 31. */
3617 if (mpz_cmpabs_ui (n
, 31*31) < 0)
3623 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3626 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
3627 k
= mpn_scan1 (nm1
->_mp_d
, 0);
3628 mpz_tdiv_q_2exp (q
, nm1
, k
);
3631 mpz_init_set_ui (y
, 2);
3632 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
) && gmp_stronglucas (n
, y
);
3633 reps
-= 24; /* skip the first 24 repetitions */
3635 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3636 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3637 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3638 30 (a[30] == 971 > 31*31 == 961). */
3640 for (j
= 0; is_prime
& (j
< reps
); j
++)
3642 mpz_set_ui (y
, (unsigned long) j
*j
+j
+41);
3643 if (mpz_cmp (y
, nm1
) >= 0)
3645 /* Don't try any further bases. This "early" break does not affect
3646 the result for any reasonable reps value (<=5000 was tested) */
3650 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
);
3660 /* Logical operations and bit manipulation. */
3662 /* Numbers are treated as if represented in two's complement (and
3663 infinitely sign extended). For a negative values we get the two's
3664 complement from -x = ~x + 1, where ~ is bitwise complement.
3673 where yyyy is the bitwise complement of xxxx. So least significant
3674 bits, up to and including the first one bit, are unchanged, and
3675 the more significant bits are all complemented.
3677 To change a bit from zero to one in a negative number, subtract the
3678 corresponding power of two from the absolute value. This can never
3679 underflow. To change a bit from one to zero, add the corresponding
3680 power of two, and this might overflow. E.g., if x = -001111, the
3681 two's complement is 110001. Clearing the least significant bit, we
3682 get two's complement 110000, and -010000. */
3685 mpz_tstbit (const mpz_t d
, mp_bitcnt_t bit_index
)
3687 mp_size_t limb_index
;
3696 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3697 if (limb_index
>= dn
)
3700 shift
= bit_index
% GMP_LIMB_BITS
;
3701 w
= d
->_mp_d
[limb_index
];
3702 bit
= (w
>> shift
) & 1;
3706 /* d < 0. Check if any of the bits below is set: If so, our bit
3707 must be complemented. */
3708 if (shift
> 0 && (mp_limb_t
) (w
<< (GMP_LIMB_BITS
- shift
)) > 0)
3710 while (--limb_index
>= 0)
3711 if (d
->_mp_d
[limb_index
] > 0)
3718 mpz_abs_add_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3720 mp_size_t dn
, limb_index
;
3724 dn
= GMP_ABS (d
->_mp_size
);
3726 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3727 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3729 if (limb_index
>= dn
)
3732 /* The bit should be set outside of the end of the number.
3733 We have to increase the size of the number. */
3734 dp
= MPZ_REALLOC (d
, limb_index
+ 1);
3736 dp
[limb_index
] = bit
;
3737 for (i
= dn
; i
< limb_index
; i
++)
3739 dn
= limb_index
+ 1;
3747 cy
= mpn_add_1 (dp
+ limb_index
, dp
+ limb_index
, dn
- limb_index
, bit
);
3750 dp
= MPZ_REALLOC (d
, dn
+ 1);
3755 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3759 mpz_abs_sub_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3761 mp_size_t dn
, limb_index
;
3765 dn
= GMP_ABS (d
->_mp_size
);
3768 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3769 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3771 assert (limb_index
< dn
);
3773 gmp_assert_nocarry (mpn_sub_1 (dp
+ limb_index
, dp
+ limb_index
,
3774 dn
- limb_index
, bit
));
3775 dn
= mpn_normalized_size (dp
, dn
);
3776 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3780 mpz_setbit (mpz_t d
, mp_bitcnt_t bit_index
)
3782 if (!mpz_tstbit (d
, bit_index
))
3784 if (d
->_mp_size
>= 0)
3785 mpz_abs_add_bit (d
, bit_index
);
3787 mpz_abs_sub_bit (d
, bit_index
);
3792 mpz_clrbit (mpz_t d
, mp_bitcnt_t bit_index
)
3794 if (mpz_tstbit (d
, bit_index
))
3796 if (d
->_mp_size
>= 0)
3797 mpz_abs_sub_bit (d
, bit_index
);
3799 mpz_abs_add_bit (d
, bit_index
);
3804 mpz_combit (mpz_t d
, mp_bitcnt_t bit_index
)
3806 if (mpz_tstbit (d
, bit_index
) ^ (d
->_mp_size
< 0))
3807 mpz_abs_sub_bit (d
, bit_index
);
3809 mpz_abs_add_bit (d
, bit_index
);
3813 mpz_com (mpz_t r
, const mpz_t u
)
3815 mpz_add_ui (r
, u
, 1);
3820 mpz_and (mpz_t r
, const mpz_t u
, const mpz_t v
)
3822 mp_size_t un
, vn
, rn
, i
;
3825 mp_limb_t ux
, vx
, rx
;
3826 mp_limb_t uc
, vc
, rc
;
3827 mp_limb_t ul
, vl
, rl
;
3829 un
= GMP_ABS (u
->_mp_size
);
3830 vn
= GMP_ABS (v
->_mp_size
);
3833 MPZ_SRCPTR_SWAP (u
, v
);
3834 MP_SIZE_T_SWAP (un
, vn
);
3842 uc
= u
->_mp_size
< 0;
3843 vc
= v
->_mp_size
< 0;
3850 /* If the smaller input is positive, higher limbs don't matter. */
3853 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3861 ul
= (up
[i
] ^ ux
) + uc
;
3864 vl
= (vp
[i
] ^ vx
) + vc
;
3867 rl
= ( (ul
& vl
) ^ rx
) + rc
;
3876 ul
= (up
[i
] ^ ux
) + uc
;
3879 rl
= ( (ul
& vx
) ^ rx
) + rc
;
3886 rn
= mpn_normalized_size (rp
, rn
);
3888 r
->_mp_size
= rx
? -rn
: rn
;
3892 mpz_ior (mpz_t r
, const mpz_t u
, const mpz_t v
)
3894 mp_size_t un
, vn
, rn
, i
;
3897 mp_limb_t ux
, vx
, rx
;
3898 mp_limb_t uc
, vc
, rc
;
3899 mp_limb_t ul
, vl
, rl
;
3901 un
= GMP_ABS (u
->_mp_size
);
3902 vn
= GMP_ABS (v
->_mp_size
);
3905 MPZ_SRCPTR_SWAP (u
, v
);
3906 MP_SIZE_T_SWAP (un
, vn
);
3914 uc
= u
->_mp_size
< 0;
3915 vc
= v
->_mp_size
< 0;
3922 /* If the smaller input is negative, by sign extension higher limbs
3926 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3934 ul
= (up
[i
] ^ ux
) + uc
;
3937 vl
= (vp
[i
] ^ vx
) + vc
;
3940 rl
= ( (ul
| vl
) ^ rx
) + rc
;
3949 ul
= (up
[i
] ^ ux
) + uc
;
3952 rl
= ( (ul
| vx
) ^ rx
) + rc
;
3959 rn
= mpn_normalized_size (rp
, rn
);
3961 r
->_mp_size
= rx
? -rn
: rn
;
3965 mpz_xor (mpz_t r
, const mpz_t u
, const mpz_t v
)
3967 mp_size_t un
, vn
, i
;
3970 mp_limb_t ux
, vx
, rx
;
3971 mp_limb_t uc
, vc
, rc
;
3972 mp_limb_t ul
, vl
, rl
;
3974 un
= GMP_ABS (u
->_mp_size
);
3975 vn
= GMP_ABS (v
->_mp_size
);
3978 MPZ_SRCPTR_SWAP (u
, v
);
3979 MP_SIZE_T_SWAP (un
, vn
);
3987 uc
= u
->_mp_size
< 0;
3988 vc
= v
->_mp_size
< 0;
3995 rp
= MPZ_REALLOC (r
, un
+ (mp_size_t
) rc
);
4003 ul
= (up
[i
] ^ ux
) + uc
;
4006 vl
= (vp
[i
] ^ vx
) + vc
;
4009 rl
= (ul
^ vl
^ rx
) + rc
;
4018 ul
= (up
[i
] ^ ux
) + uc
;
4021 rl
= (ul
^ ux
) + rc
;
4028 un
= mpn_normalized_size (rp
, un
);
4030 r
->_mp_size
= rx
? -un
: un
;
4034 gmp_popcount_limb (mp_limb_t x
)
4038 /* Do 16 bits at a time, to avoid limb-sized constants. */
4039 int LOCAL_SHIFT_BITS
= 16;
4042 unsigned w
= x
- ((x
>> 1) & 0x5555);
4043 w
= ((w
>> 2) & 0x3333) + (w
& 0x3333);
4045 w
= ((w
>> 8) & 0x000f) + (w
& 0x000f);
4047 if (GMP_LIMB_BITS
> LOCAL_SHIFT_BITS
)
4048 x
>>= LOCAL_SHIFT_BITS
;
4056 mpn_popcount (mp_srcptr p
, mp_size_t n
)
4061 for (c
= 0, i
= 0; i
< n
; i
++)
4062 c
+= gmp_popcount_limb (p
[i
]);
4068 mpz_popcount (const mpz_t u
)
4075 return ~(mp_bitcnt_t
) 0;
4077 return mpn_popcount (u
->_mp_d
, un
);
4081 mpz_hamdist (const mpz_t u
, const mpz_t v
)
4083 mp_size_t un
, vn
, i
;
4084 mp_limb_t uc
, vc
, ul
, vl
, comp
;
4092 return ~(mp_bitcnt_t
) 0;
4094 comp
= - (uc
= vc
= (un
< 0));
4106 MPN_SRCPTR_SWAP (up
, un
, vp
, vn
);
4108 for (i
= 0, c
= 0; i
< vn
; i
++)
4110 ul
= (up
[i
] ^ comp
) + uc
;
4113 vl
= (vp
[i
] ^ comp
) + vc
;
4116 c
+= gmp_popcount_limb (ul
^ vl
);
4122 ul
= (up
[i
] ^ comp
) + uc
;
4125 c
+= gmp_popcount_limb (ul
^ comp
);
4132 mpz_scan1 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4135 mp_size_t us
, un
, i
;
4140 i
= starting_bit
/ GMP_LIMB_BITS
;
4142 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
4143 for u<0. Notice this test picks up any u==0 too. */
4145 return (us
>= 0 ? ~(mp_bitcnt_t
) 0 : starting_bit
);
4151 if (starting_bit
!= 0)
4155 ux
= mpn_zero_p (up
, i
);
4157 ux
= - (mp_limb_t
) (limb
>= ux
);
4160 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
4161 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4164 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4168 mpz_scan0 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4171 mp_size_t us
, un
, i
;
4175 ux
= - (mp_limb_t
) (us
>= 0);
4177 i
= starting_bit
/ GMP_LIMB_BITS
;
4179 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
4180 u<0. Notice this test picks up all cases of u==0 too. */
4182 return (ux
? starting_bit
: ~(mp_bitcnt_t
) 0);
4188 limb
-= mpn_zero_p (up
, i
); /* limb = ~(~limb + zero_p) */
4190 /* Mask all bits before starting_bit, thus ignoring them. */
4191 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4193 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4197 /* MPZ base conversion. */
4200 mpz_sizeinbase (const mpz_t u
, int base
)
4206 struct gmp_div_inverse bi
;
4210 assert (base
<= 62);
4212 un
= GMP_ABS (u
->_mp_size
);
4218 bits
= (un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1]);
4224 return (bits
+ 1) / 2;
4226 return (bits
+ 2) / 3;
4228 return (bits
+ 3) / 4;
4230 return (bits
+ 4) / 5;
4231 /* FIXME: Do something more clever for the common case of base
4235 tp
= gmp_alloc_limbs (un
);
4236 mpn_copyi (tp
, up
, un
);
4237 mpn_div_qr_1_invert (&bi
, base
);
4244 mpn_div_qr_1_preinv (tp
, tp
, tn
, &bi
);
4245 tn
-= (tp
[tn
-1] == 0);
4249 gmp_free_limbs (tp
, un
);
4254 mpz_get_str (char *sp
, int base
, const mpz_t u
)
4261 digits
= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
4265 digits
= "0123456789abcdefghijklmnopqrstuvwxyz";
4269 else if (base
>= -1)
4278 sn
= 1 + mpz_sizeinbase (u
, base
);
4282 sp
= (char *) gmp_alloc (osn
);
4286 un
= GMP_ABS (u
->_mp_size
);
4297 if (u
->_mp_size
< 0)
4300 bits
= mpn_base_power_of_two_p (base
);
4303 /* Not modified in this case. */
4304 sn
= i
+ mpn_get_str_bits ((unsigned char *) sp
+ i
, bits
, u
->_mp_d
, un
);
4307 struct mpn_base_info info
;
4310 mpn_get_base_info (&info
, base
);
4311 tp
= gmp_alloc_limbs (un
);
4312 mpn_copyi (tp
, u
->_mp_d
, un
);
4314 sn
= i
+ mpn_get_str_other ((unsigned char *) sp
+ i
, base
, &info
, tp
, un
);
4315 gmp_free_limbs (tp
, un
);
4319 sp
[i
] = digits
[(unsigned char) sp
[i
]];
4323 if (osn
&& osn
!= sn
+ 1)
4324 sp
= (char*) gmp_realloc (sp
, osn
, sn
+ 1);
4329 mpz_set_str (mpz_t r
, const char *sp
, int base
)
4331 unsigned bits
, value_of_a
;
4332 mp_size_t rn
, alloc
;
4338 assert (base
== 0 || (base
>= 2 && base
<= 62));
4340 while (isspace( (unsigned char) *sp
))
4343 sign
= (*sp
== '-');
4350 if (sp
[1] == 'x' || sp
[1] == 'X')
4355 else if (sp
[1] == 'b' || sp
[1] == 'B')
4373 dp
= (unsigned char *) gmp_alloc (sn
);
4375 value_of_a
= (base
> 36) ? 36 : 10;
4376 for (dn
= 0; *sp
; sp
++)
4380 if (isspace ((unsigned char) *sp
))
4382 else if (*sp
>= '0' && *sp
<= '9')
4384 else if (*sp
>= 'a' && *sp
<= 'z')
4385 digit
= *sp
- 'a' + value_of_a
;
4386 else if (*sp
>= 'A' && *sp
<= 'Z')
4387 digit
= *sp
- 'A' + 10;
4389 digit
= base
; /* fail */
4391 if (digit
>= (unsigned) base
)
4407 bits
= mpn_base_power_of_two_p (base
);
4411 alloc
= (dn
* bits
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
4412 rp
= MPZ_REALLOC (r
, alloc
);
4413 rn
= mpn_set_str_bits (rp
, dp
, dn
, bits
);
4417 struct mpn_base_info info
;
4418 mpn_get_base_info (&info
, base
);
4419 alloc
= (dn
+ info
.exp
- 1) / info
.exp
;
4420 rp
= MPZ_REALLOC (r
, alloc
);
4421 rn
= mpn_set_str_other (rp
, dp
, dn
, base
, &info
);
4422 /* Normalization, needed for all-zero input. */
4424 rn
-= rp
[rn
-1] == 0;
4426 assert (rn
<= alloc
);
4429 r
->_mp_size
= sign
? - rn
: rn
;
4435 mpz_init_set_str (mpz_t r
, const char *sp
, int base
)
4438 return mpz_set_str (r
, sp
, base
);
4442 mpz_out_str (FILE *stream
, int base
, const mpz_t x
)
4447 str
= mpz_get_str (NULL
, base
, x
);
4451 n
= fwrite (str
, 1, len
, stream
);
4452 gmp_free (str
, len
+ 1);
4458 gmp_detect_endian (void)
4460 static const int i
= 2;
4461 const unsigned char *p
= (const unsigned char *) &i
;
4465 /* Import and export. Does not support nails. */
4467 mpz_import (mpz_t r
, size_t count
, int order
, size_t size
, int endian
,
4468 size_t nails
, const void *src
)
4470 const unsigned char *p
;
4471 ptrdiff_t word_step
;
4475 /* The current (partial) limb. */
4477 /* The number of bytes already copied to this limb (starting from
4480 /* The index where the limb should be stored, when completed. */
4484 gmp_die ("mpz_import: Nails not supported.");
4486 assert (order
== 1 || order
== -1);
4487 assert (endian
>= -1 && endian
<= 1);
4490 endian
= gmp_detect_endian ();
4492 p
= (unsigned char *) src
;
4494 word_step
= (order
!= endian
) ? 2 * size
: 0;
4496 /* Process bytes from the least significant end, so point p at the
4497 least significant word. */
4500 p
+= size
* (count
- 1);
4501 word_step
= - word_step
;
4504 /* And at least significant byte of that word. */
4508 rn
= (size
* count
+ sizeof(mp_limb_t
) - 1) / sizeof(mp_limb_t
);
4509 rp
= MPZ_REALLOC (r
, rn
);
4511 for (limb
= 0, bytes
= 0, i
= 0; count
> 0; count
--, p
+= word_step
)
4514 for (j
= 0; j
< size
; j
++, p
-= (ptrdiff_t) endian
)
4516 limb
|= (mp_limb_t
) *p
<< (bytes
++ * CHAR_BIT
);
4517 if (bytes
== sizeof(mp_limb_t
))
4525 assert (i
+ (bytes
> 0) == rn
);
4529 i
= mpn_normalized_size (rp
, i
);
4535 mpz_export (void *r
, size_t *countp
, int order
, size_t size
, int endian
,
4536 size_t nails
, const mpz_t u
)
4542 gmp_die ("mpz_export: Nails not supported.");
4544 assert (order
== 1 || order
== -1);
4545 assert (endian
>= -1 && endian
<= 1);
4546 assert (size
> 0 || u
->_mp_size
== 0);
4554 ptrdiff_t word_step
;
4555 /* The current (partial) limb. */
4557 /* The number of bytes left to do in this limb. */
4559 /* The index where the limb was read. */
4564 /* Count bytes in top limb. */
4565 limb
= u
->_mp_d
[un
-1];
4568 k
= (GMP_LIMB_BITS
<= CHAR_BIT
);
4572 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4573 k
++; limb
>>= LOCAL_CHAR_BIT
;
4574 } while (limb
!= 0);
4576 /* else limb = 0; */
4578 count
= (k
+ (un
-1) * sizeof (mp_limb_t
) + size
- 1) / size
;
4581 r
= gmp_alloc (count
* size
);
4584 endian
= gmp_detect_endian ();
4586 p
= (unsigned char *) r
;
4588 word_step
= (order
!= endian
) ? 2 * size
: 0;
4590 /* Process bytes from the least significant end, so point p at the
4591 least significant word. */
4594 p
+= size
* (count
- 1);
4595 word_step
= - word_step
;
4598 /* And at least significant byte of that word. */
4602 for (bytes
= 0, i
= 0, k
= 0; k
< count
; k
++, p
+= word_step
)
4605 for (j
= 0; j
< size
; ++j
, p
-= (ptrdiff_t) endian
)
4607 if (sizeof (mp_limb_t
) == 1)
4616 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4620 limb
= u
->_mp_d
[i
++];
4621 bytes
= sizeof (mp_limb_t
);
4624 limb
>>= LOCAL_CHAR_BIT
;
4630 assert (k
== count
);