1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
3 Contributed to the GNU project by Niels Möller
5 Copyright 1991-1997, 1999-2020 Free Software Foundation, Inc.
7 This file is part of the GNU MP Library.
9 The GNU MP Library is free software; you can redistribute it and/or modify
10 it under the terms of either:
12 * the GNU Lesser General Public License as published by the Free
13 Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
18 * the GNU General Public License as published by the Free Software
19 Foundation; either version 2 of the License, or (at your option) any
22 or both in parallel, as here.
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 You should have received copies of the GNU General Public License and the
30 GNU Lesser General Public License along with the GNU MP Library. If not,
31 see https://www.gnu.org/licenses/. */
33 /* NOTE: All functions in this file which are not declared in
34 mini-gmp.h are internal, and are not intended to be compatible
35 neither with GMP nor with future versions of mini-gmp. */
37 /* Much of the material copied from GMP files, including: gmp-impl.h,
38 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39 mpn/generic/lshift.c, mpn/generic/mul_1.c,
40 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42 mpn/generic/submul_1.c. */
53 #if !defined(MINI_GMP_DONT_USE_FLOAT_H)
59 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
61 #define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
62 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
64 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
65 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
67 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
68 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
70 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
71 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
73 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
74 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
76 #define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
78 #if defined(DBL_MANT_DIG) && FLT_RADIX == 2
79 #define GMP_DBL_MANT_BITS DBL_MANT_DIG
81 #define GMP_DBL_MANT_BITS (53)
84 /* Return non-zero if xp,xsize and yp,ysize overlap.
85 If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
86 overlap. If both these are false, there's an overlap. */
87 #define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \
88 ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
90 #define gmp_assert_nocarry(x) do { \
91 mp_limb_t __cy = (x); \
95 #define gmp_clz(count, x) do { \
96 mp_limb_t __clz_x = (x); \
97 unsigned __clz_c = 0; \
98 int LOCAL_SHIFT_BITS = 8; \
99 if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \
101 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
103 { __clz_x <<= LOCAL_SHIFT_BITS; } \
104 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
109 #define gmp_ctz(count, x) do { \
110 mp_limb_t __ctz_x = (x); \
111 unsigned __ctz_c = 0; \
112 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
113 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
116 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
120 (sh) = (ah) + (bh) + (__x < (al)); \
124 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
128 (sh) = (ah) - (bh) - ((al) < (bl)); \
132 #define gmp_umul_ppmm(w1, w0, u, v) \
134 int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \
135 if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \
137 unsigned int __ww = (unsigned int) (u) * (v); \
138 w0 = (mp_limb_t) __ww; \
139 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
141 else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \
143 unsigned long int __ww = (unsigned long int) (u) * (v); \
144 w0 = (mp_limb_t) __ww; \
145 w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
148 mp_limb_t __x0, __x1, __x2, __x3; \
149 unsigned __ul, __vl, __uh, __vh; \
150 mp_limb_t __u = (u), __v = (v); \
152 __ul = __u & GMP_LLIMB_MASK; \
153 __uh = __u >> (GMP_LIMB_BITS / 2); \
154 __vl = __v & GMP_LLIMB_MASK; \
155 __vh = __v >> (GMP_LIMB_BITS / 2); \
157 __x0 = (mp_limb_t) __ul * __vl; \
158 __x1 = (mp_limb_t) __ul * __vh; \
159 __x2 = (mp_limb_t) __uh * __vl; \
160 __x3 = (mp_limb_t) __uh * __vh; \
162 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
163 __x1 += __x2; /* but this indeed can */ \
164 if (__x1 < __x2) /* did we get it? */ \
165 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
167 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
168 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
172 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
174 mp_limb_t _qh, _ql, _r, _mask; \
175 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
176 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
177 _r = (nl) - _qh * (d); \
178 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
191 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
193 mp_limb_t _q0, _t1, _t0, _mask; \
194 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
195 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
197 /* Compute the two most significant limbs of n - q'd */ \
198 (r1) = (n1) - (d1) * (q); \
199 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
200 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
201 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
204 /* Conditionally adjust q and the remainders */ \
205 _mask = - (mp_limb_t) ((r1) >= _q0); \
207 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
210 if ((r1) > (d1) || (r0) >= (d0)) \
213 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
219 #define MP_LIMB_T_SWAP(x, y) \
221 mp_limb_t __mp_limb_t_swap__tmp = (x); \
223 (y) = __mp_limb_t_swap__tmp; \
225 #define MP_SIZE_T_SWAP(x, y) \
227 mp_size_t __mp_size_t_swap__tmp = (x); \
229 (y) = __mp_size_t_swap__tmp; \
231 #define MP_BITCNT_T_SWAP(x,y) \
233 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
235 (y) = __mp_bitcnt_t_swap__tmp; \
237 #define MP_PTR_SWAP(x, y) \
239 mp_ptr __mp_ptr_swap__tmp = (x); \
241 (y) = __mp_ptr_swap__tmp; \
243 #define MP_SRCPTR_SWAP(x, y) \
245 mp_srcptr __mp_srcptr_swap__tmp = (x); \
247 (y) = __mp_srcptr_swap__tmp; \
250 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
252 MP_PTR_SWAP (xp, yp); \
253 MP_SIZE_T_SWAP (xs, ys); \
255 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
257 MP_SRCPTR_SWAP (xp, yp); \
258 MP_SIZE_T_SWAP (xs, ys); \
261 #define MPZ_PTR_SWAP(x, y) \
263 mpz_ptr __mpz_ptr_swap__tmp = (x); \
265 (y) = __mpz_ptr_swap__tmp; \
267 #define MPZ_SRCPTR_SWAP(x, y) \
269 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
271 (y) = __mpz_srcptr_swap__tmp; \
274 const int mp_bits_per_limb
= GMP_LIMB_BITS
;
277 /* Memory allocation and other helper functions. */
279 gmp_die (const char *msg
)
281 fprintf (stderr
, "%s\n", msg
);
286 gmp_default_alloc (size_t size
)
294 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
300 gmp_default_realloc (void *old
, size_t unused_old_size
, size_t new_size
)
304 p
= realloc (old
, new_size
);
307 gmp_die("gmp_default_realloc: Virtual memory exhausted.");
313 gmp_default_free (void *p
, size_t unused_size
)
318 static void * (*gmp_allocate_func
) (size_t) = gmp_default_alloc
;
319 static void * (*gmp_reallocate_func
) (void *, size_t, size_t) = gmp_default_realloc
;
320 static void (*gmp_free_func
) (void *, size_t) = gmp_default_free
;
323 mp_get_memory_functions (void *(**alloc_func
) (size_t),
324 void *(**realloc_func
) (void *, size_t, size_t),
325 void (**free_func
) (void *, size_t))
328 *alloc_func
= gmp_allocate_func
;
331 *realloc_func
= gmp_reallocate_func
;
334 *free_func
= gmp_free_func
;
338 mp_set_memory_functions (void *(*alloc_func
) (size_t),
339 void *(*realloc_func
) (void *, size_t, size_t),
340 void (*free_func
) (void *, size_t))
343 alloc_func
= gmp_default_alloc
;
345 realloc_func
= gmp_default_realloc
;
347 free_func
= gmp_default_free
;
349 gmp_allocate_func
= alloc_func
;
350 gmp_reallocate_func
= realloc_func
;
351 gmp_free_func
= free_func
;
354 #define gmp_alloc(size) ((*gmp_allocate_func)((size)))
355 #define gmp_free(p, size) ((*gmp_free_func) ((p), (size)))
356 #define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size))
359 gmp_alloc_limbs (mp_size_t size
)
361 return (mp_ptr
) gmp_alloc (size
* sizeof (mp_limb_t
));
365 gmp_realloc_limbs (mp_ptr old
, mp_size_t old_size
, mp_size_t size
)
368 return (mp_ptr
) gmp_realloc (old
, old_size
* sizeof (mp_limb_t
), size
* sizeof (mp_limb_t
));
372 gmp_free_limbs (mp_ptr old
, mp_size_t size
)
374 gmp_free (old
, size
* sizeof (mp_limb_t
));
381 mpn_copyi (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
384 for (i
= 0; i
< n
; i
++)
389 mpn_copyd (mp_ptr d
, mp_srcptr s
, mp_size_t n
)
396 mpn_cmp (mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
401 return ap
[n
] > bp
[n
] ? 1 : -1;
407 mpn_cmp4 (mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
410 return an
< bn
? -1 : 1;
412 return mpn_cmp (ap
, bp
, an
);
416 mpn_normalized_size (mp_srcptr xp
, mp_size_t n
)
418 while (n
> 0 && xp
[n
-1] == 0)
424 mpn_zero_p(mp_srcptr rp
, mp_size_t n
)
426 return mpn_normalized_size (rp
, n
) == 0;
430 mpn_zero (mp_ptr rp
, mp_size_t n
)
437 mpn_add_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
445 mp_limb_t r
= ap
[i
] + b
;
456 mpn_add_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
461 for (i
= 0, cy
= 0; i
< n
; i
++)
464 a
= ap
[i
]; b
= bp
[i
];
475 mpn_add (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
481 cy
= mpn_add_n (rp
, ap
, bp
, bn
);
483 cy
= mpn_add_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
488 mpn_sub_1 (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
, mp_limb_t b
)
499 mp_limb_t cy
= a
< b
;
509 mpn_sub_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
514 for (i
= 0, cy
= 0; i
< n
; i
++)
517 a
= ap
[i
]; b
= bp
[i
];
527 mpn_sub (mp_ptr rp
, mp_srcptr ap
, mp_size_t an
, mp_srcptr bp
, mp_size_t bn
)
533 cy
= mpn_sub_n (rp
, ap
, bp
, bn
);
535 cy
= mpn_sub_1 (rp
+ bn
, ap
+ bn
, an
- bn
, cy
);
540 mpn_mul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
542 mp_limb_t ul
, cl
, hpl
, lpl
;
550 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
553 cl
= (lpl
< cl
) + hpl
;
563 mpn_addmul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
565 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
573 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
576 cl
= (lpl
< cl
) + hpl
;
589 mpn_submul_1 (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, mp_limb_t vl
)
591 mp_limb_t ul
, cl
, hpl
, lpl
, rl
;
599 gmp_umul_ppmm (hpl
, lpl
, ul
, vl
);
602 cl
= (lpl
< cl
) + hpl
;
615 mpn_mul (mp_ptr rp
, mp_srcptr up
, mp_size_t un
, mp_srcptr vp
, mp_size_t vn
)
619 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, up
, un
));
620 assert (!GMP_MPN_OVERLAP_P(rp
, un
+ vn
, vp
, vn
));
622 /* We first multiply by the low order limb. This result can be
623 stored, not added, to rp. We also avoid a loop for zeroing this
626 rp
[un
] = mpn_mul_1 (rp
, up
, un
, vp
[0]);
628 /* Now accumulate the product of up[] and the next higher limb from
634 rp
[un
] = mpn_addmul_1 (rp
, up
, un
, vp
[0]);
640 mpn_mul_n (mp_ptr rp
, mp_srcptr ap
, mp_srcptr bp
, mp_size_t n
)
642 mpn_mul (rp
, ap
, n
, bp
, n
);
646 mpn_sqr (mp_ptr rp
, mp_srcptr ap
, mp_size_t n
)
648 mpn_mul (rp
, ap
, n
, ap
, n
);
652 mpn_lshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
654 mp_limb_t high_limb
, low_limb
;
660 assert (cnt
< GMP_LIMB_BITS
);
665 tnc
= GMP_LIMB_BITS
- cnt
;
667 retval
= low_limb
>> tnc
;
668 high_limb
= (low_limb
<< cnt
);
673 *--rp
= high_limb
| (low_limb
>> tnc
);
674 high_limb
= (low_limb
<< cnt
);
682 mpn_rshift (mp_ptr rp
, mp_srcptr up
, mp_size_t n
, unsigned int cnt
)
684 mp_limb_t high_limb
, low_limb
;
690 assert (cnt
< GMP_LIMB_BITS
);
692 tnc
= GMP_LIMB_BITS
- cnt
;
694 retval
= (high_limb
<< tnc
);
695 low_limb
= high_limb
>> cnt
;
700 *rp
++ = low_limb
| (high_limb
<< tnc
);
701 low_limb
= high_limb
>> cnt
;
709 mpn_common_scan (mp_limb_t limb
, mp_size_t i
, mp_srcptr up
, mp_size_t un
,
714 assert (ux
== 0 || ux
== GMP_LIMB_MAX
);
715 assert (0 <= i
&& i
<= un
);
721 return (ux
== 0 ? ~(mp_bitcnt_t
) 0 : un
* GMP_LIMB_BITS
);
725 return (mp_bitcnt_t
) i
* GMP_LIMB_BITS
+ cnt
;
729 mpn_scan1 (mp_srcptr ptr
, mp_bitcnt_t bit
)
732 i
= bit
/ GMP_LIMB_BITS
;
734 return mpn_common_scan ( ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
739 mpn_scan0 (mp_srcptr ptr
, mp_bitcnt_t bit
)
742 i
= bit
/ GMP_LIMB_BITS
;
744 return mpn_common_scan (~ptr
[i
] & (GMP_LIMB_MAX
<< (bit
% GMP_LIMB_BITS
)),
745 i
, ptr
, i
, GMP_LIMB_MAX
);
749 mpn_com (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
756 mpn_neg (mp_ptr rp
, mp_srcptr up
, mp_size_t n
)
766 mpn_com (++rp
, ++up
, --n
);
771 /* MPN division interface. */
773 /* The 3/2 inverse is defined as
775 m = floor( (B^3-1) / (B u1 + u0)) - B
778 mpn_invert_3by2 (mp_limb_t u1
, mp_limb_t u0
)
786 /* For notation, let b denote the half-limb base, so that B = b^2.
787 Split u1 = b uh + ul. */
788 ul
= u1
& GMP_LLIMB_MASK
;
789 uh
= u1
>> (GMP_LIMB_BITS
/ 2);
791 /* Approximation of the high half of quotient. Differs from the 2/1
792 inverse of the half limb uh, since we have already subtracted
794 qh
= (u1
^ GMP_LIMB_MAX
) / uh
;
796 /* Adjust to get a half-limb 3/2 inverse, i.e., we want
798 qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
799 = floor( (b (~u) + b-1) / u),
803 r = b (~u) + b-1 - qh (b uh + ul)
804 = b (~u - qh uh) + b-1 - qh ul
806 Subtraction of qh ul may underflow, which implies adjustments.
807 But by normalization, 2 u >= B > qh ul, so we need to adjust by
811 r
= ((~u1
- (mp_limb_t
) qh
* uh
) << (GMP_LIMB_BITS
/ 2)) | GMP_LLIMB_MASK
;
813 p
= (mp_limb_t
) qh
* ul
;
814 /* Adjustment steps taken from udiv_qrnnd_c */
819 if (r
>= u1
) /* i.e. we didn't get carry when adding to r */
828 /* Low half of the quotient is
830 ql = floor ( (b r + b-1) / u1).
832 This is a 3/2 division (on half-limbs), for which qh is a
835 p
= (r
>> (GMP_LIMB_BITS
/ 2)) * qh
+ r
;
836 /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
837 work, it is essential that ql is a full mp_limb_t. */
838 ql
= (p
>> (GMP_LIMB_BITS
/ 2)) + 1;
840 /* By the 3/2 trick, we don't need the high half limb. */
841 r
= (r
<< (GMP_LIMB_BITS
/ 2)) + GMP_LLIMB_MASK
- ql
* u1
;
843 if (r
>= (GMP_LIMB_MAX
& (p
<< (GMP_LIMB_BITS
/ 2))))
848 m
= ((mp_limb_t
) qh
<< (GMP_LIMB_BITS
/ 2)) + ql
;
856 /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
873 gmp_umul_ppmm (th
, tl
, u0
, m
);
878 m
-= ((r
> u1
) | ((r
== u1
) & (tl
> u0
)));
885 struct gmp_div_inverse
887 /* Normalization shift count. */
889 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
891 /* Inverse, for 2/1 or 3/2. */
896 mpn_div_qr_1_invert (struct gmp_div_inverse
*inv
, mp_limb_t d
)
903 inv
->d1
= d
<< shift
;
904 inv
->di
= mpn_invert_limb (inv
->d1
);
908 mpn_div_qr_2_invert (struct gmp_div_inverse
*inv
,
909 mp_limb_t d1
, mp_limb_t d0
)
918 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
923 inv
->di
= mpn_invert_3by2 (d1
, d0
);
927 mpn_div_qr_invert (struct gmp_div_inverse
*inv
,
928 mp_srcptr dp
, mp_size_t dn
)
933 mpn_div_qr_1_invert (inv
, dp
[0]);
935 mpn_div_qr_2_invert (inv
, dp
[1], dp
[0]);
948 d1
= (d1
<< shift
) | (d0
>> (GMP_LIMB_BITS
- shift
));
949 d0
= (d0
<< shift
) | (dp
[dn
-3] >> (GMP_LIMB_BITS
- shift
));
953 inv
->di
= mpn_invert_3by2 (d1
, d0
);
957 /* Not matching current public gmp interface, rather corresponding to
958 the sbpi1_div_* functions. */
960 mpn_div_qr_1_preinv (mp_ptr qp
, mp_srcptr np
, mp_size_t nn
,
961 const struct gmp_div_inverse
*inv
)
970 /* Shift, reusing qp area if possible. In-place shift if qp == np. */
975 tp
= gmp_alloc_limbs (tn
);
977 r
= mpn_lshift (tp
, np
, nn
, inv
->shift
);
989 gmp_udiv_qrnnd_preinv (q
, r
, r
, np
[nn
], d
, di
);
994 gmp_free_limbs (tp
, tn
);
996 return r
>> inv
->shift
;
1000 mpn_div_qr_2_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1001 const struct gmp_div_inverse
*inv
)
1005 mp_limb_t d1
, d0
, di
, r1
, r0
;
1014 r1
= mpn_lshift (np
, np
, nn
, shift
);
1025 gmp_udiv_qr_3by2 (q
, r1
, r0
, r1
, r0
, n0
, d1
, d0
, di
);
1034 assert ((r0
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- shift
))) == 0);
1035 r0
= (r0
>> shift
) | (r1
<< (GMP_LIMB_BITS
- shift
));
1044 mpn_div_qr_pi1 (mp_ptr qp
,
1045 mp_ptr np
, mp_size_t nn
, mp_limb_t n1
,
1046 mp_srcptr dp
, mp_size_t dn
,
1061 assert ((d1
& GMP_LIMB_HIGHBIT
) != 0);
1062 /* Iteration variable is the index of the q limb.
1064 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1065 * by <d1, d0, dp[dn-3], ..., dp[0] >
1071 mp_limb_t n0
= np
[dn
-1+i
];
1073 if (n1
== d1
&& n0
== d0
)
1076 mpn_submul_1 (np
+i
, dp
, dn
, q
);
1077 n1
= np
[dn
-1+i
]; /* update n1, last loop's value will now be invalid */
1081 gmp_udiv_qr_3by2 (q
, n1
, n0
, n1
, n0
, np
[dn
-2+i
], d1
, d0
, dinv
);
1083 cy
= mpn_submul_1 (np
+ i
, dp
, dn
-2, q
);
1093 n1
+= d1
+ mpn_add_n (np
+ i
, np
+ i
, dp
, dn
- 1);
1107 mpn_div_qr_preinv (mp_ptr qp
, mp_ptr np
, mp_size_t nn
,
1108 mp_srcptr dp
, mp_size_t dn
,
1109 const struct gmp_div_inverse
*inv
)
1115 np
[0] = mpn_div_qr_1_preinv (qp
, np
, nn
, inv
);
1117 mpn_div_qr_2_preinv (qp
, np
, nn
, inv
);
1123 assert (inv
->d1
== dp
[dn
-1]);
1124 assert (inv
->d0
== dp
[dn
-2]);
1125 assert ((inv
->d1
& GMP_LIMB_HIGHBIT
) != 0);
1129 nh
= mpn_lshift (np
, np
, nn
, shift
);
1133 mpn_div_qr_pi1 (qp
, np
, nn
, nh
, dp
, dn
, inv
->di
);
1136 gmp_assert_nocarry (mpn_rshift (np
, np
, dn
, shift
));
1141 mpn_div_qr (mp_ptr qp
, mp_ptr np
, mp_size_t nn
, mp_srcptr dp
, mp_size_t dn
)
1143 struct gmp_div_inverse inv
;
1149 mpn_div_qr_invert (&inv
, dp
, dn
);
1150 if (dn
> 2 && inv
.shift
> 0)
1152 tp
= gmp_alloc_limbs (dn
);
1153 gmp_assert_nocarry (mpn_lshift (tp
, dp
, dn
, inv
.shift
));
1156 mpn_div_qr_preinv (qp
, np
, nn
, dp
, dn
, &inv
);
1158 gmp_free_limbs (tp
, dn
);
1162 /* MPN base conversion. */
1164 mpn_base_power_of_two_p (unsigned b
)
1180 struct mpn_base_info
1182 /* bb is the largest power of the base which fits in one limb, and
1183 exp is the corresponding exponent. */
1189 mpn_get_base_info (struct mpn_base_info
*info
, mp_limb_t b
)
1195 m
= GMP_LIMB_MAX
/ b
;
1196 for (exp
= 1, p
= b
; p
<= m
; exp
++)
1204 mpn_limb_size_in_base_2 (mp_limb_t u
)
1210 return GMP_LIMB_BITS
- shift
;
1214 mpn_get_str_bits (unsigned char *sp
, unsigned bits
, mp_srcptr up
, mp_size_t un
)
1221 sn
= ((un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1])
1224 mask
= (1U << bits
) - 1;
1226 for (i
= 0, j
= sn
, shift
= 0; j
-- > 0;)
1228 unsigned char digit
= up
[i
] >> shift
;
1232 if (shift
>= GMP_LIMB_BITS
&& ++i
< un
)
1234 shift
-= GMP_LIMB_BITS
;
1235 digit
|= up
[i
] << (bits
- shift
);
1237 sp
[j
] = digit
& mask
;
1242 /* We generate digits from the least significant end, and reverse at
1245 mpn_limb_get_str (unsigned char *sp
, mp_limb_t w
,
1246 const struct gmp_div_inverse
*binv
)
1249 for (i
= 0; w
> 0; i
++)
1253 h
= w
>> (GMP_LIMB_BITS
- binv
->shift
);
1254 l
= w
<< binv
->shift
;
1256 gmp_udiv_qrnnd_preinv (w
, r
, h
, l
, binv
->d1
, binv
->di
);
1257 assert ((r
& (GMP_LIMB_MAX
>> (GMP_LIMB_BITS
- binv
->shift
))) == 0);
1266 mpn_get_str_other (unsigned char *sp
,
1267 int base
, const struct mpn_base_info
*info
,
1268 mp_ptr up
, mp_size_t un
)
1270 struct gmp_div_inverse binv
;
1274 mpn_div_qr_1_invert (&binv
, base
);
1280 struct gmp_div_inverse bbinv
;
1281 mpn_div_qr_1_invert (&bbinv
, info
->bb
);
1287 w
= mpn_div_qr_1_preinv (up
, up
, un
, &bbinv
);
1288 un
-= (up
[un
-1] == 0);
1289 done
= mpn_limb_get_str (sp
+ sn
, w
, &binv
);
1291 for (sn
+= done
; done
< info
->exp
; done
++)
1296 sn
+= mpn_limb_get_str (sp
+ sn
, up
[0], &binv
);
1299 for (i
= 0; 2*i
+ 1 < sn
; i
++)
1301 unsigned char t
= sp
[i
];
1302 sp
[i
] = sp
[sn
- i
- 1];
1310 mpn_get_str (unsigned char *sp
, int base
, mp_ptr up
, mp_size_t un
)
1315 assert (up
[un
-1] > 0);
1317 bits
= mpn_base_power_of_two_p (base
);
1319 return mpn_get_str_bits (sp
, bits
, up
, un
);
1322 struct mpn_base_info info
;
1324 mpn_get_base_info (&info
, base
);
1325 return mpn_get_str_other (sp
, base
, &info
, up
, un
);
1330 mpn_set_str_bits (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1337 for (j
= sn
, rn
= 0, shift
= 0; j
-- > 0; )
1346 rp
[rn
-1] |= (mp_limb_t
) sp
[j
] << shift
;
1348 if (shift
>= GMP_LIMB_BITS
)
1350 shift
-= GMP_LIMB_BITS
;
1352 rp
[rn
++] = (mp_limb_t
) sp
[j
] >> (bits
- shift
);
1356 rn
= mpn_normalized_size (rp
, rn
);
1360 /* Result is usually normalized, except for all-zero input, in which
1361 case a single zero limb is written at *RP, and 1 is returned. */
1363 mpn_set_str_other (mp_ptr rp
, const unsigned char *sp
, size_t sn
,
1364 mp_limb_t b
, const struct mpn_base_info
*info
)
1373 k
= 1 + (sn
- 1) % info
->exp
;
1378 w
= w
* b
+ sp
[j
++];
1382 for (rn
= 1; j
< sn
;)
1387 for (k
= 1; k
< info
->exp
; k
++)
1388 w
= w
* b
+ sp
[j
++];
1390 cy
= mpn_mul_1 (rp
, rp
, rn
, info
->bb
);
1391 cy
+= mpn_add_1 (rp
, rp
, rn
, w
);
1401 mpn_set_str (mp_ptr rp
, const unsigned char *sp
, size_t sn
, int base
)
1408 bits
= mpn_base_power_of_two_p (base
);
1410 return mpn_set_str_bits (rp
, sp
, sn
, bits
);
1413 struct mpn_base_info info
;
1415 mpn_get_base_info (&info
, base
);
1416 return mpn_set_str_other (rp
, sp
, sn
, base
, &info
);
1425 static const mp_limb_t dummy_limb
= GMP_LIMB_MAX
& 0xc1a0;
1429 r
->_mp_d
= (mp_ptr
) &dummy_limb
;
1432 /* The utility of this function is a bit limited, since many functions
1433 assigns the result variable using mpz_swap. */
1435 mpz_init2 (mpz_t r
, mp_bitcnt_t bits
)
1439 bits
-= (bits
!= 0); /* Round down, except if 0 */
1440 rn
= 1 + bits
/ GMP_LIMB_BITS
;
1444 r
->_mp_d
= gmp_alloc_limbs (rn
);
1451 gmp_free_limbs (r
->_mp_d
, r
->_mp_alloc
);
1455 mpz_realloc (mpz_t r
, mp_size_t size
)
1457 size
= GMP_MAX (size
, 1);
1460 r
->_mp_d
= gmp_realloc_limbs (r
->_mp_d
, r
->_mp_alloc
, size
);
1462 r
->_mp_d
= gmp_alloc_limbs (size
);
1463 r
->_mp_alloc
= size
;
1465 if (GMP_ABS (r
->_mp_size
) > size
)
1471 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1472 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1473 ? mpz_realloc(z,n) \
1476 /* MPZ assignment and basic conversions. */
1478 mpz_set_si (mpz_t r
, signed long int x
)
1483 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1485 mpz_set_ui (r
, GMP_NEG_CAST (unsigned long int, x
));
1491 MPZ_REALLOC (r
, 1)[0] = GMP_NEG_CAST (unsigned long int, x
);
1496 mpz_set_ui (mpz_t r
, unsigned long int x
)
1501 MPZ_REALLOC (r
, 1)[0] = x
;
1502 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1504 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1505 while (x
>>= LOCAL_GMP_LIMB_BITS
)
1508 MPZ_REALLOC (r
, r
->_mp_size
)[r
->_mp_size
- 1] = x
;
1517 mpz_set (mpz_t r
, const mpz_t x
)
1519 /* Allow the NOP r == x */
1525 n
= GMP_ABS (x
->_mp_size
);
1526 rp
= MPZ_REALLOC (r
, n
);
1528 mpn_copyi (rp
, x
->_mp_d
, n
);
1529 r
->_mp_size
= x
->_mp_size
;
1534 mpz_init_set_si (mpz_t r
, signed long int x
)
1541 mpz_init_set_ui (mpz_t r
, unsigned long int x
)
1548 mpz_init_set (mpz_t r
, const mpz_t x
)
1555 mpz_fits_slong_p (const mpz_t u
)
1557 return mpz_cmp_si (u
, LONG_MAX
) <= 0 && mpz_cmp_si (u
, LONG_MIN
) >= 0;
1561 mpn_absfits_ulong_p (mp_srcptr up
, mp_size_t un
)
1563 int ulongsize
= GMP_ULONG_BITS
/ GMP_LIMB_BITS
;
1564 mp_limb_t ulongrem
= 0;
1566 if (GMP_ULONG_BITS
% GMP_LIMB_BITS
!= 0)
1567 ulongrem
= (mp_limb_t
) (ULONG_MAX
>> GMP_LIMB_BITS
* ulongsize
) + 1;
1569 return un
<= ulongsize
|| (up
[ulongsize
] < ulongrem
&& un
== ulongsize
+ 1);
1573 mpz_fits_ulong_p (const mpz_t u
)
1575 mp_size_t us
= u
->_mp_size
;
1577 return us
>= 0 && mpn_absfits_ulong_p (u
->_mp_d
, us
);
1581 mpz_fits_sint_p (const mpz_t u
)
1583 return mpz_cmp_si (u
, INT_MAX
) <= 0 && mpz_cmp_si (u
, INT_MIN
) >= 0;
1587 mpz_fits_uint_p (const mpz_t u
)
1589 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, UINT_MAX
) <= 0;
1593 mpz_fits_sshort_p (const mpz_t u
)
1595 return mpz_cmp_si (u
, SHRT_MAX
) <= 0 && mpz_cmp_si (u
, SHRT_MIN
) >= 0;
1599 mpz_fits_ushort_p (const mpz_t u
)
1601 return u
->_mp_size
>= 0 && mpz_cmpabs_ui (u
, USHRT_MAX
) <= 0;
1605 mpz_get_si (const mpz_t u
)
1607 unsigned long r
= mpz_get_ui (u
);
1608 unsigned long c
= -LONG_MAX
- LONG_MIN
;
1610 if (u
->_mp_size
< 0)
1611 /* This expression is necessary to properly handle -LONG_MIN */
1612 return -(long) c
- (long) ((r
- c
) & LONG_MAX
);
1614 return (long) (r
& LONG_MAX
);
1618 mpz_get_ui (const mpz_t u
)
1620 if (GMP_LIMB_BITS
< GMP_ULONG_BITS
)
1622 int LOCAL_GMP_LIMB_BITS
= GMP_LIMB_BITS
;
1623 unsigned long r
= 0;
1624 mp_size_t n
= GMP_ABS (u
->_mp_size
);
1625 n
= GMP_MIN (n
, 1 + (mp_size_t
) (GMP_ULONG_BITS
- 1) / GMP_LIMB_BITS
);
1627 r
= (r
<< LOCAL_GMP_LIMB_BITS
) + u
->_mp_d
[n
];
1631 return u
->_mp_size
== 0 ? 0 : u
->_mp_d
[0];
1635 mpz_size (const mpz_t u
)
1637 return GMP_ABS (u
->_mp_size
);
1641 mpz_getlimbn (const mpz_t u
, mp_size_t n
)
1643 if (n
>= 0 && n
< GMP_ABS (u
->_mp_size
))
1650 mpz_realloc2 (mpz_t x
, mp_bitcnt_t n
)
1652 mpz_realloc (x
, 1 + (n
- (n
!= 0)) / GMP_LIMB_BITS
);
1656 mpz_limbs_read (mpz_srcptr x
)
1662 mpz_limbs_modify (mpz_t x
, mp_size_t n
)
1665 return MPZ_REALLOC (x
, n
);
1669 mpz_limbs_write (mpz_t x
, mp_size_t n
)
1671 return mpz_limbs_modify (x
, n
);
1675 mpz_limbs_finish (mpz_t x
, mp_size_t xs
)
1678 xn
= mpn_normalized_size (x
->_mp_d
, GMP_ABS (xs
));
1679 x
->_mp_size
= xs
< 0 ? -xn
: xn
;
1683 mpz_roinit_normal_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1686 x
->_mp_d
= (mp_ptr
) xp
;
1692 mpz_roinit_n (mpz_t x
, mp_srcptr xp
, mp_size_t xs
)
1694 mpz_roinit_normal_n (x
, xp
, xs
);
1695 mpz_limbs_finish (x
, xs
);
1700 /* Conversions and comparison to double. */
1702 mpz_set_d (mpz_t r
, double x
)
1711 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1712 zero or infinity. */
1713 if (x
!= x
|| x
== x
* 0.5)
1728 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1730 for (rn
= 1; x
>= B
; rn
++)
1733 rp
= MPZ_REALLOC (r
, rn
);
1749 r
->_mp_size
= sign
? - rn
: rn
;
1753 mpz_init_set_d (mpz_t r
, double x
)
1760 mpz_get_d (const mpz_t u
)
1766 double B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1768 un
= GMP_ABS (u
->_mp_size
);
1775 m
= m
+ GMP_DBL_MANT_BITS
- GMP_LIMB_BITS
;
1777 l
&= GMP_LIMB_MAX
<< -m
;
1779 for (x
= l
; --un
>= 0;)
1786 l
&= GMP_LIMB_MAX
<< -m
;
1791 if (u
->_mp_size
< 0)
1798 mpz_cmpabs_d (const mpz_t x
, double d
)
1811 B
= 4.0 * (double) (GMP_LIMB_HIGHBIT
>> 1);
1814 /* Scale d so it can be compared with the top limb. */
1815 for (i
= 1; i
< xn
; i
++)
1821 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1822 for (i
= xn
; i
-- > 0;)
1839 mpz_cmp_d (const mpz_t x
, double d
)
1841 if (x
->_mp_size
< 0)
1846 return -mpz_cmpabs_d (x
, d
);
1853 return mpz_cmpabs_d (x
, d
);
1858 /* MPZ comparisons and the like. */
1860 mpz_sgn (const mpz_t u
)
1862 return GMP_CMP (u
->_mp_size
, 0);
1866 mpz_cmp_si (const mpz_t u
, long v
)
1868 mp_size_t usize
= u
->_mp_size
;
1871 return mpz_cmp_ui (u
, v
);
1872 else if (usize
>= 0)
1875 return - mpz_cmpabs_ui (u
, GMP_NEG_CAST (unsigned long int, v
));
1879 mpz_cmp_ui (const mpz_t u
, unsigned long v
)
1881 mp_size_t usize
= u
->_mp_size
;
1886 return mpz_cmpabs_ui (u
, v
);
1890 mpz_cmp (const mpz_t a
, const mpz_t b
)
1892 mp_size_t asize
= a
->_mp_size
;
1893 mp_size_t bsize
= b
->_mp_size
;
1896 return (asize
< bsize
) ? -1 : 1;
1897 else if (asize
>= 0)
1898 return mpn_cmp (a
->_mp_d
, b
->_mp_d
, asize
);
1900 return mpn_cmp (b
->_mp_d
, a
->_mp_d
, -asize
);
1904 mpz_cmpabs_ui (const mpz_t u
, unsigned long v
)
1906 mp_size_t un
= GMP_ABS (u
->_mp_size
);
1908 if (! mpn_absfits_ulong_p (u
->_mp_d
, un
))
1912 unsigned long uu
= mpz_get_ui (u
);
1913 return GMP_CMP(uu
, v
);
1918 mpz_cmpabs (const mpz_t u
, const mpz_t v
)
1920 return mpn_cmp4 (u
->_mp_d
, GMP_ABS (u
->_mp_size
),
1921 v
->_mp_d
, GMP_ABS (v
->_mp_size
));
1925 mpz_abs (mpz_t r
, const mpz_t u
)
1928 r
->_mp_size
= GMP_ABS (r
->_mp_size
);
1932 mpz_neg (mpz_t r
, const mpz_t u
)
1935 r
->_mp_size
= -r
->_mp_size
;
1939 mpz_swap (mpz_t u
, mpz_t v
)
1941 MP_SIZE_T_SWAP (u
->_mp_size
, v
->_mp_size
);
1942 MP_SIZE_T_SWAP (u
->_mp_alloc
, v
->_mp_alloc
);
1943 MP_PTR_SWAP (u
->_mp_d
, v
->_mp_d
);
1947 /* MPZ addition and subtraction */
1951 mpz_add_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1954 mpz_init_set_ui (bb
, b
);
1960 mpz_sub_ui (mpz_t r
, const mpz_t a
, unsigned long b
)
1962 mpz_ui_sub (r
, b
, a
);
1967 mpz_ui_sub (mpz_t r
, unsigned long a
, const mpz_t b
)
1970 mpz_add_ui (r
, r
, a
);
1974 mpz_abs_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
1976 mp_size_t an
= GMP_ABS (a
->_mp_size
);
1977 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
1983 MPZ_SRCPTR_SWAP (a
, b
);
1984 MP_SIZE_T_SWAP (an
, bn
);
1987 rp
= MPZ_REALLOC (r
, an
+ 1);
1988 cy
= mpn_add (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
);
1996 mpz_abs_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
1998 mp_size_t an
= GMP_ABS (a
->_mp_size
);
1999 mp_size_t bn
= GMP_ABS (b
->_mp_size
);
2003 cmp
= mpn_cmp4 (a
->_mp_d
, an
, b
->_mp_d
, bn
);
2006 rp
= MPZ_REALLOC (r
, an
);
2007 gmp_assert_nocarry (mpn_sub (rp
, a
->_mp_d
, an
, b
->_mp_d
, bn
));
2008 return mpn_normalized_size (rp
, an
);
2012 rp
= MPZ_REALLOC (r
, bn
);
2013 gmp_assert_nocarry (mpn_sub (rp
, b
->_mp_d
, bn
, a
->_mp_d
, an
));
2014 return -mpn_normalized_size (rp
, bn
);
2021 mpz_add (mpz_t r
, const mpz_t a
, const mpz_t b
)
2025 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2026 rn
= mpz_abs_add (r
, a
, b
);
2028 rn
= mpz_abs_sub (r
, a
, b
);
2030 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2034 mpz_sub (mpz_t r
, const mpz_t a
, const mpz_t b
)
2038 if ( (a
->_mp_size
^ b
->_mp_size
) >= 0)
2039 rn
= mpz_abs_sub (r
, a
, b
);
2041 rn
= mpz_abs_add (r
, a
, b
);
2043 r
->_mp_size
= a
->_mp_size
>= 0 ? rn
: - rn
;
2047 /* MPZ multiplication */
2049 mpz_mul_si (mpz_t r
, const mpz_t u
, long int v
)
2053 mpz_mul_ui (r
, u
, GMP_NEG_CAST (unsigned long int, v
));
2057 mpz_mul_ui (r
, u
, v
);
2061 mpz_mul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2064 mpz_init_set_ui (vv
, v
);
2071 mpz_mul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2074 mp_size_t un
, vn
, rn
;
2081 if (un
== 0 || vn
== 0)
2087 sign
= (un
^ vn
) < 0;
2092 mpz_init2 (t
, (un
+ vn
) * GMP_LIMB_BITS
);
2096 mpn_mul (tp
, u
->_mp_d
, un
, v
->_mp_d
, vn
);
2098 mpn_mul (tp
, v
->_mp_d
, vn
, u
->_mp_d
, un
);
2101 rn
-= tp
[rn
-1] == 0;
2103 t
->_mp_size
= sign
? - rn
: rn
;
2109 mpz_mul_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bits
)
2116 un
= GMP_ABS (u
->_mp_size
);
2123 limbs
= bits
/ GMP_LIMB_BITS
;
2124 shift
= bits
% GMP_LIMB_BITS
;
2126 rn
= un
+ limbs
+ (shift
> 0);
2127 rp
= MPZ_REALLOC (r
, rn
);
2130 mp_limb_t cy
= mpn_lshift (rp
+ limbs
, u
->_mp_d
, un
, shift
);
2135 mpn_copyd (rp
+ limbs
, u
->_mp_d
, un
);
2137 mpn_zero (rp
, limbs
);
2139 r
->_mp_size
= (u
->_mp_size
< 0) ? - rn
: rn
;
2143 mpz_addmul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2146 mpz_init_set_ui (t
, v
);
2153 mpz_submul_ui (mpz_t r
, const mpz_t u
, unsigned long int v
)
2156 mpz_init_set_ui (t
, v
);
2163 mpz_addmul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2173 mpz_submul (mpz_t r
, const mpz_t u
, const mpz_t v
)
2184 enum mpz_div_round_mode
{ GMP_DIV_FLOOR
, GMP_DIV_CEIL
, GMP_DIV_TRUNC
};
2186 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2188 mpz_div_qr (mpz_t q
, mpz_t r
,
2189 const mpz_t n
, const mpz_t d
, enum mpz_div_round_mode mode
)
2191 mp_size_t ns
, ds
, nn
, dn
, qs
;
2196 gmp_die("mpz_div_qr: Divide by zero.");
2214 if (mode
== GMP_DIV_CEIL
&& qs
>= 0)
2216 /* q = 1, r = n - d */
2222 else if (mode
== GMP_DIV_FLOOR
&& qs
< 0)
2224 /* q = -1, r = n + d */
2246 mpz_init_set (tr
, n
);
2253 mpz_init2 (tq
, qn
* GMP_LIMB_BITS
);
2259 mpn_div_qr (qp
, np
, nn
, d
->_mp_d
, dn
);
2263 qn
-= (qp
[qn
-1] == 0);
2265 tq
->_mp_size
= qs
< 0 ? -qn
: qn
;
2267 rn
= mpn_normalized_size (np
, dn
);
2268 tr
->_mp_size
= ns
< 0 ? - rn
: rn
;
2270 if (mode
== GMP_DIV_FLOOR
&& qs
< 0 && rn
!= 0)
2273 mpz_sub_ui (tq
, tq
, 1);
2275 mpz_add (tr
, tr
, d
);
2277 else if (mode
== GMP_DIV_CEIL
&& qs
>= 0 && rn
!= 0)
2280 mpz_add_ui (tq
, tq
, 1);
2282 mpz_sub (tr
, tr
, d
);
2300 mpz_cdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2302 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_CEIL
);
2306 mpz_fdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2308 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2312 mpz_tdiv_qr (mpz_t q
, mpz_t r
, const mpz_t n
, const mpz_t d
)
2314 mpz_div_qr (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2318 mpz_cdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2320 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2324 mpz_fdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2326 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2330 mpz_tdiv_q (mpz_t q
, const mpz_t n
, const mpz_t d
)
2332 mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2336 mpz_cdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2338 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2342 mpz_fdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2344 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2348 mpz_tdiv_r (mpz_t r
, const mpz_t n
, const mpz_t d
)
2350 mpz_div_qr (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2354 mpz_mod (mpz_t r
, const mpz_t n
, const mpz_t d
)
2356 mpz_div_qr (NULL
, r
, n
, d
, d
->_mp_size
>= 0 ? GMP_DIV_FLOOR
: GMP_DIV_CEIL
);
2360 mpz_div_q_2exp (mpz_t q
, const mpz_t u
, mp_bitcnt_t bit_index
,
2361 enum mpz_div_round_mode mode
)
2374 limb_cnt
= bit_index
/ GMP_LIMB_BITS
;
2375 qn
= GMP_ABS (un
) - limb_cnt
;
2376 bit_index
%= GMP_LIMB_BITS
;
2378 if (mode
== ((un
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* un != 0 here. */
2379 /* Note: Below, the final indexing at limb_cnt is valid because at
2380 that point we have qn > 0. */
2382 || !mpn_zero_p (u
->_mp_d
, limb_cnt
)
2383 || (u
->_mp_d
[limb_cnt
]
2384 & (((mp_limb_t
) 1 << bit_index
) - 1)));
2392 qp
= MPZ_REALLOC (q
, qn
);
2396 mpn_rshift (qp
, u
->_mp_d
+ limb_cnt
, qn
, bit_index
);
2397 qn
-= qp
[qn
- 1] == 0;
2401 mpn_copyi (qp
, u
->_mp_d
+ limb_cnt
, qn
);
2408 mpz_add_ui (q
, q
, 1);
2414 mpz_div_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t bit_index
,
2415 enum mpz_div_round_mode mode
)
2417 mp_size_t us
, un
, rn
;
2422 if (us
== 0 || bit_index
== 0)
2427 rn
= (bit_index
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
2430 rp
= MPZ_REALLOC (r
, rn
);
2433 mask
= GMP_LIMB_MAX
>> (rn
* GMP_LIMB_BITS
- bit_index
);
2437 /* Quotient (with truncation) is zero, and remainder is
2439 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2441 /* Have to negate and sign extend. */
2444 gmp_assert_nocarry (! mpn_neg (rp
, u
->_mp_d
, un
));
2445 for (i
= un
; i
< rn
- 1; i
++)
2446 rp
[i
] = GMP_LIMB_MAX
;
2455 mpn_copyi (rp
, u
->_mp_d
, un
);
2463 mpn_copyi (rp
, u
->_mp_d
, rn
- 1);
2465 rp
[rn
-1] = u
->_mp_d
[rn
-1] & mask
;
2467 if (mode
== ((us
> 0) ? GMP_DIV_CEIL
: GMP_DIV_FLOOR
)) /* us != 0 here. */
2469 /* If r != 0, compute 2^{bit_count} - r. */
2470 mpn_neg (rp
, rp
, rn
);
2474 /* us is not used for anything else, so we can modify it
2475 here to indicate flipped sign. */
2479 rn
= mpn_normalized_size (rp
, rn
);
2480 r
->_mp_size
= us
< 0 ? -rn
: rn
;
2484 mpz_cdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2486 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2490 mpz_fdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2492 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2496 mpz_tdiv_q_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2498 mpz_div_q_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2502 mpz_cdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2504 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_CEIL
);
2508 mpz_fdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2510 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_FLOOR
);
2514 mpz_tdiv_r_2exp (mpz_t r
, const mpz_t u
, mp_bitcnt_t cnt
)
2516 mpz_div_r_2exp (r
, u
, cnt
, GMP_DIV_TRUNC
);
2520 mpz_divexact (mpz_t q
, const mpz_t n
, const mpz_t d
)
2522 gmp_assert_nocarry (mpz_div_qr (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2526 mpz_divisible_p (const mpz_t n
, const mpz_t d
)
2528 return mpz_div_qr (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2532 mpz_congruent_p (const mpz_t a
, const mpz_t b
, const mpz_t m
)
2537 /* a == b (mod 0) iff a == b */
2538 if (mpz_sgn (m
) == 0)
2539 return (mpz_cmp (a
, b
) == 0);
2543 res
= mpz_divisible_p (t
, m
);
2549 static unsigned long
2550 mpz_div_qr_ui (mpz_t q
, mpz_t r
,
2551 const mpz_t n
, unsigned long d
, enum mpz_div_round_mode mode
)
2557 mpz_init_set_ui (dd
, d
);
2558 mpz_div_qr (q
, rr
, n
, dd
, mode
);
2560 ret
= mpz_get_ui (rr
);
2570 mpz_cdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2572 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_CEIL
);
2576 mpz_fdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2578 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_FLOOR
);
2582 mpz_tdiv_qr_ui (mpz_t q
, mpz_t r
, const mpz_t n
, unsigned long d
)
2584 return mpz_div_qr_ui (q
, r
, n
, d
, GMP_DIV_TRUNC
);
2588 mpz_cdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2590 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_CEIL
);
2594 mpz_fdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2596 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2600 mpz_tdiv_q_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2602 return mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2606 mpz_cdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2608 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_CEIL
);
2611 mpz_fdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2613 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2616 mpz_tdiv_r_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2618 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_TRUNC
);
2622 mpz_cdiv_ui (const mpz_t n
, unsigned long d
)
2624 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_CEIL
);
2628 mpz_fdiv_ui (const mpz_t n
, unsigned long d
)
2630 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_FLOOR
);
2634 mpz_tdiv_ui (const mpz_t n
, unsigned long d
)
2636 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
);
2640 mpz_mod_ui (mpz_t r
, const mpz_t n
, unsigned long d
)
2642 return mpz_div_qr_ui (NULL
, r
, n
, d
, GMP_DIV_FLOOR
);
2646 mpz_divexact_ui (mpz_t q
, const mpz_t n
, unsigned long d
)
2648 gmp_assert_nocarry (mpz_div_qr_ui (q
, NULL
, n
, d
, GMP_DIV_TRUNC
));
2652 mpz_divisible_ui_p (const mpz_t n
, unsigned long d
)
2654 return mpz_div_qr_ui (NULL
, NULL
, n
, d
, GMP_DIV_TRUNC
) == 0;
2660 mpn_gcd_11 (mp_limb_t u
, mp_limb_t v
)
2664 assert ( (u
| v
) > 0);
2671 gmp_ctz (shift
, u
| v
);
2677 MP_LIMB_T_SWAP (u
, v
);
2679 while ( (v
& 1) == 0)
2689 while ( (u
& 1) == 0);
2696 while ( (v
& 1) == 0);
2703 mpz_gcd_ui (mpz_t g
, const mpz_t u
, unsigned long v
)
2706 mpz_init_set_ui(t
, v
);
2720 mpz_make_odd (mpz_t r
)
2724 assert (r
->_mp_size
> 0);
2725 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2726 shift
= mpn_common_scan (r
->_mp_d
[0], 0, r
->_mp_d
, 0, 0);
2727 mpz_tdiv_q_2exp (r
, r
, shift
);
2733 mpz_gcd (mpz_t g
, const mpz_t u
, const mpz_t v
)
2736 mp_bitcnt_t uz
, vz
, gz
;
2738 if (u
->_mp_size
== 0)
2743 if (v
->_mp_size
== 0)
2753 uz
= mpz_make_odd (tu
);
2755 vz
= mpz_make_odd (tv
);
2756 gz
= GMP_MIN (uz
, vz
);
2758 if (tu
->_mp_size
< tv
->_mp_size
)
2761 mpz_tdiv_r (tu
, tu
, tv
);
2762 if (tu
->_mp_size
== 0)
2772 c
= mpz_cmp (tu
, tv
);
2781 if (tv
->_mp_size
== 1)
2783 mp_limb_t vl
= tv
->_mp_d
[0];
2784 mp_limb_t ul
= mpz_tdiv_ui (tu
, vl
);
2785 mpz_set_ui (g
, mpn_gcd_11 (ul
, vl
));
2788 mpz_sub (tu
, tu
, tv
);
2792 mpz_mul_2exp (g
, g
, gz
);
2796 mpz_gcdext (mpz_t g
, mpz_t s
, mpz_t t
, const mpz_t u
, const mpz_t v
)
2798 mpz_t tu
, tv
, s0
, s1
, t0
, t1
;
2799 mp_bitcnt_t uz
, vz
, gz
;
2802 if (u
->_mp_size
== 0)
2804 /* g = 0 u + sgn(v) v */
2805 signed long sign
= mpz_sgn (v
);
2810 mpz_set_si (t
, sign
);
2814 if (v
->_mp_size
== 0)
2816 /* g = sgn(u) u + 0 v */
2817 signed long sign
= mpz_sgn (u
);
2820 mpz_set_si (s
, sign
);
2834 uz
= mpz_make_odd (tu
);
2836 vz
= mpz_make_odd (tv
);
2837 gz
= GMP_MIN (uz
, vz
);
2842 /* Cofactors corresponding to odd gcd. gz handled later. */
2843 if (tu
->_mp_size
< tv
->_mp_size
)
2846 MPZ_SRCPTR_SWAP (u
, v
);
2847 MPZ_PTR_SWAP (s
, t
);
2848 MP_BITCNT_T_SWAP (uz
, vz
);
2856 * where u and v denote the inputs with common factors of two
2857 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2859 * 2^p tu = s1 u - t1 v
2860 * 2^p tv = -s0 u + t0 v
2863 /* After initial division, tu = q tv + tu', we have
2865 * u = 2^uz (tu' + q tv)
2870 * t0 = 2^uz, t1 = 2^uz q
2874 mpz_setbit (t0
, uz
);
2875 mpz_tdiv_qr (t1
, tu
, tu
, tv
);
2876 mpz_mul_2exp (t1
, t1
, uz
);
2878 mpz_setbit (s1
, vz
);
2881 if (tu
->_mp_size
> 0)
2884 shift
= mpz_make_odd (tu
);
2885 mpz_mul_2exp (t0
, t0
, shift
);
2886 mpz_mul_2exp (s0
, s0
, shift
);
2892 c
= mpz_cmp (tu
, tv
);
2900 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2901 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2903 mpz_sub (tv
, tv
, tu
);
2904 mpz_add (t0
, t0
, t1
);
2905 mpz_add (s0
, s0
, s1
);
2907 shift
= mpz_make_odd (tv
);
2908 mpz_mul_2exp (t1
, t1
, shift
);
2909 mpz_mul_2exp (s1
, s1
, shift
);
2913 mpz_sub (tu
, tu
, tv
);
2914 mpz_add (t1
, t0
, t1
);
2915 mpz_add (s1
, s0
, s1
);
2917 shift
= mpz_make_odd (tu
);
2918 mpz_mul_2exp (t0
, t0
, shift
);
2919 mpz_mul_2exp (s0
, s0
, shift
);
2925 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2928 mpz_mul_2exp (tv
, tv
, gz
);
2931 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2932 adjust cofactors, we need u / g and v / g */
2934 mpz_divexact (s1
, v
, tv
);
2936 mpz_divexact (t1
, u
, tv
);
2941 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2942 if (mpz_odd_p (s0
) || mpz_odd_p (t0
))
2944 mpz_sub (s0
, s0
, s1
);
2945 mpz_add (t0
, t0
, t1
);
2947 assert (mpz_even_p (t0
) && mpz_even_p (s0
));
2948 mpz_tdiv_q_2exp (s0
, s0
, 1);
2949 mpz_tdiv_q_2exp (t0
, t0
, 1);
2952 /* Arrange so that |s| < |u| / 2g */
2953 mpz_add (s1
, s0
, s1
);
2954 if (mpz_cmpabs (s0
, s1
) > 0)
2957 mpz_sub (t0
, t0
, t1
);
2959 if (u
->_mp_size
< 0)
2961 if (v
->_mp_size
< 0)
2979 mpz_lcm (mpz_t r
, const mpz_t u
, const mpz_t v
)
2983 if (u
->_mp_size
== 0 || v
->_mp_size
== 0)
2992 mpz_divexact (g
, u
, g
);
3000 mpz_lcm_ui (mpz_t r
, const mpz_t u
, unsigned long v
)
3002 if (v
== 0 || u
->_mp_size
== 0)
3008 v
/= mpz_gcd_ui (NULL
, u
, v
);
3009 mpz_mul_ui (r
, u
, v
);
3015 mpz_invert (mpz_t r
, const mpz_t u
, const mpz_t m
)
3020 if (u
->_mp_size
== 0 || mpz_cmpabs_ui (m
, 1) <= 0)
3026 mpz_gcdext (g
, tr
, NULL
, u
, m
);
3027 invertible
= (mpz_cmp_ui (g
, 1) == 0);
3031 if (tr
->_mp_size
< 0)
3033 if (m
->_mp_size
>= 0)
3034 mpz_add (tr
, tr
, m
);
3036 mpz_sub (tr
, tr
, m
);
3047 /* Higher level operations (sqrt, pow and root) */
3050 mpz_pow_ui (mpz_t r
, const mpz_t b
, unsigned long e
)
3054 mpz_init_set_ui (tr
, 1);
3056 bit
= GMP_ULONG_HIGHBIT
;
3059 mpz_mul (tr
, tr
, tr
);
3061 mpz_mul (tr
, tr
, b
);
3071 mpz_ui_pow_ui (mpz_t r
, unsigned long blimb
, unsigned long e
)
3075 mpz_init_set_ui (b
, blimb
);
3076 mpz_pow_ui (r
, b
, e
);
3081 mpz_powm (mpz_t r
, const mpz_t b
, const mpz_t e
, const mpz_t m
)
3087 struct gmp_div_inverse minv
;
3091 en
= GMP_ABS (e
->_mp_size
);
3092 mn
= GMP_ABS (m
->_mp_size
);
3094 gmp_die ("mpz_powm: Zero modulo.");
3103 mpn_div_qr_invert (&minv
, mp
, mn
);
3108 /* To avoid shifts, we do all our reductions, except the final
3109 one, using a *normalized* m. */
3112 tp
= gmp_alloc_limbs (mn
);
3113 gmp_assert_nocarry (mpn_lshift (tp
, mp
, mn
, shift
));
3119 if (e
->_mp_size
< 0)
3121 if (!mpz_invert (base
, b
, m
))
3122 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3129 bn
= base
->_mp_size
;
3132 mpn_div_qr_preinv (NULL
, base
->_mp_d
, base
->_mp_size
, mp
, mn
, &minv
);
3136 /* We have reduced the absolute value. Now take care of the
3137 sign. Note that we get zero represented non-canonically as
3139 if (b
->_mp_size
< 0)
3141 mp_ptr bp
= MPZ_REALLOC (base
, mn
);
3142 gmp_assert_nocarry (mpn_sub (bp
, mp
, mn
, bp
, bn
));
3145 base
->_mp_size
= mpn_normalized_size (base
->_mp_d
, bn
);
3147 mpz_init_set_ui (tr
, 1);
3151 mp_limb_t w
= e
->_mp_d
[en
];
3154 bit
= GMP_LIMB_HIGHBIT
;
3157 mpz_mul (tr
, tr
, tr
);
3159 mpz_mul (tr
, tr
, base
);
3160 if (tr
->_mp_size
> mn
)
3162 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3163 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3170 /* Final reduction */
3171 if (tr
->_mp_size
>= mn
)
3174 mpn_div_qr_preinv (NULL
, tr
->_mp_d
, tr
->_mp_size
, mp
, mn
, &minv
);
3175 tr
->_mp_size
= mpn_normalized_size (tr
->_mp_d
, mn
);
3178 gmp_free_limbs (tp
, mn
);
3186 mpz_powm_ui (mpz_t r
, const mpz_t b
, unsigned long elimb
, const mpz_t m
)
3190 mpz_init_set_ui (e
, elimb
);
3191 mpz_powm (r
, b
, e
, m
);
3195 /* x=trunc(y^(1/z)), r=y-x^z */
3197 mpz_rootrem (mpz_t x
, mpz_t r
, const mpz_t y
, unsigned long z
)
3202 sgn
= y
->_mp_size
< 0;
3203 if ((~z
& sgn
) != 0)
3204 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3206 gmp_die ("mpz_rootrem: Zeroth root.");
3208 if (mpz_cmpabs_ui (y
, 1) <= 0) {
3218 mpz_setbit (t
, mpz_sizeinbase (y
, 2) / z
+ 1);
3220 if (z
== 2) /* simplify sqrt loop: z-1 == 1 */
3222 mpz_swap (u
, t
); /* u = x */
3223 mpz_tdiv_q (t
, y
, u
); /* t = y/x */
3224 mpz_add (t
, t
, u
); /* t = y/x + x */
3225 mpz_tdiv_q_2exp (t
, t
, 1); /* x'= (y/x + x)/2 */
3226 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3235 mpz_swap (u
, t
); /* u = x */
3236 mpz_pow_ui (t
, u
, z
- 1); /* t = x^(z-1) */
3237 mpz_tdiv_q (t
, y
, t
); /* t = y/x^(z-1) */
3238 mpz_mul_ui (v
, u
, z
- 1); /* v = x*(z-1) */
3239 mpz_add (t
, t
, v
); /* t = y/x^(z-1) + x*(z-1) */
3240 mpz_tdiv_q_ui (t
, t
, z
); /* x'=(y/x^(z-1) + x*(z-1))/z */
3241 } while (mpz_cmpabs (t
, u
) < 0); /* |x'| < |x| */
3247 mpz_pow_ui (t
, u
, z
);
3257 mpz_root (mpz_t x
, const mpz_t y
, unsigned long z
)
3263 mpz_rootrem (x
, r
, y
, z
);
3264 res
= r
->_mp_size
== 0;
3270 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3272 mpz_sqrtrem (mpz_t s
, mpz_t r
, const mpz_t u
)
3274 mpz_rootrem (s
, r
, u
, 2);
3278 mpz_sqrt (mpz_t s
, const mpz_t u
)
3280 mpz_rootrem (s
, NULL
, u
, 2);
3284 mpz_perfect_square_p (const mpz_t u
)
3286 if (u
->_mp_size
<= 0)
3287 return (u
->_mp_size
== 0);
3289 return mpz_root (NULL
, u
, 2);
3293 mpn_perfect_square_p (mp_srcptr p
, mp_size_t n
)
3298 assert (p
[n
-1] != 0);
3299 return mpz_root (NULL
, mpz_roinit_normal_n (t
, p
, n
), 2);
3303 mpn_sqrtrem (mp_ptr sp
, mp_ptr rp
, mp_srcptr p
, mp_size_t n
)
3309 assert (p
[n
-1] != 0);
3313 mpz_rootrem (s
, r
, mpz_roinit_normal_n (u
, p
, n
), 2);
3315 assert (s
->_mp_size
== (n
+1)/2);
3316 mpn_copyd (sp
, s
->_mp_d
, s
->_mp_size
);
3320 mpn_copyd (rp
, r
->_mp_d
, res
);
3328 mpz_mfac_uiui (mpz_t x
, unsigned long n
, unsigned long m
)
3330 mpz_set_ui (x
, n
+ (n
== 0));
3331 if (m
+ 1 < 2) return;
3333 mpz_mul_ui (x
, x
, n
-= m
);
3337 mpz_2fac_ui (mpz_t x
, unsigned long n
)
3339 mpz_mfac_uiui (x
, n
, 2);
3343 mpz_fac_ui (mpz_t x
, unsigned long n
)
3345 mpz_mfac_uiui (x
, n
, 1);
3349 mpz_bin_uiui (mpz_t r
, unsigned long n
, unsigned long k
)
3353 mpz_set_ui (r
, k
<= n
);
3356 k
= (k
<= n
) ? n
- k
: 0;
3362 mpz_mul_ui (r
, r
, n
--);
3364 mpz_divexact (r
, r
, t
);
3369 /* Primality testing */
3371 /* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
3372 /* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
3374 gmp_jacobi_coprime (mp_limb_t a
, mp_limb_t b
)
3380 /* assert (mpn_gcd_11 (a, b) == 1); */
3382 /* Below, we represent a and b shifted right so that the least
3383 significant one bit is implicit. */
3392 /* (2/b) = -1 if b = 3 or 5 mod 8 */
3393 bit
^= c
& (b
^ (b
>> 1));
3397 return bit
& 1 ? -1 : 1;
3414 gmp_lucas_step_k_2k (mpz_t V
, mpz_t Qk
, const mpz_t n
)
3416 mpz_mod (Qk
, Qk
, n
);
3417 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3419 mpz_submul_ui (V
, Qk
, 2);
3420 mpz_tdiv_r (V
, V
, n
);
3421 /* Q^{2k} = (Q^k)^2 */
3422 mpz_mul (Qk
, Qk
, Qk
);
3425 /* Computes V_k, Q^k (mod n) for the Lucas' sequence */
3426 /* with P=1, Q=Q; k = (n>>b0)|1. */
3427 /* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
3428 /* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
3430 gmp_lucas_mod (mpz_t V
, mpz_t Qk
, long Q
,
3431 mp_bitcnt_t b0
, const mpz_t n
)
3438 assert (Q
<= - (LONG_MIN
/ 2));
3439 assert (Q
>= - (LONG_MAX
/ 2));
3440 assert (mpz_cmp_ui (n
, 4) > 0);
3441 assert (mpz_odd_p (n
));
3443 mpz_init_set_ui (U
, 1); /* U1 = 1 */
3444 mpz_set_ui (V
, 1); /* V1 = 1 */
3447 for (bs
= mpz_sizeinbase (n
, 2) - 1; --bs
>= b0
;)
3449 /* U_{2k} <- U_k * V_k */
3451 /* V_{2k} <- V_k ^ 2 - 2Q^k */
3452 /* Q^{2k} = (Q^k)^2 */
3453 gmp_lucas_step_k_2k (V
, Qk
, n
);
3455 /* A step k->k+1 is performed if the bit in $n$ is 1 */
3456 /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */
3457 /* should be 1 in $n+1$ (bs == b0) */
3458 if (b0
== bs
|| mpz_tstbit (n
, bs
))
3460 /* Q^{k+1} <- Q^k * Q */
3461 mpz_mul_si (Qk
, Qk
, Q
);
3462 /* U_{k+1} <- (U_k + V_k) / 2 */
3463 mpz_swap (U
, V
); /* Keep in V the old value of U_k */
3465 /* We have to compute U/2, so we need an even value, */
3466 /* equivalent (mod n) */
3469 mpz_tdiv_q_2exp (U
, U
, 1);
3470 /* V_{k+1} <-(D*U_k + V_k) / 2 =
3471 U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
3472 mpz_mul_si (V
, V
, -2*Q
);
3474 mpz_tdiv_r (V
, V
, n
);
3476 mpz_tdiv_r (U
, U
, n
);
3479 res
= U
->_mp_size
== 0;
3484 /* Performs strong Lucas' test on x, with parameters suggested */
3485 /* for the BPSW test. Qk is only passed to recycle a variable. */
3486 /* Requires GCD (x,6) = 1.*/
3488 gmp_stronglucas (const mpz_t x
, mpz_t Qk
)
3492 mp_limb_t maxD
, D
; /* The absolute value is stored. */
3496 /* Test on the absolute value. */
3497 mpz_roinit_normal_n (n
, x
->_mp_d
, GMP_ABS (x
->_mp_size
));
3499 assert (mpz_odd_p (n
));
3500 /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
3501 if (mpz_root (Qk
, n
, 2))
3502 return 0; /* A square is composite. */
3504 /* Check Ds up to square root (in case, n is prime)
3505 or avoid overflows */
3506 maxD
= (Qk
->_mp_size
== 1) ? Qk
->_mp_d
[0] - 1 : GMP_LIMB_MAX
;
3509 /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
3510 /* For those Ds we have (D/n) = (n/|D|) */
3514 return 1 + (D
!= GMP_LIMB_MAX
); /* (1 + ! ~ D) */
3516 tl
= mpz_tdiv_ui (n
, D
);
3520 while (gmp_jacobi_coprime (tl
, D
) == 1);
3524 /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
3525 b0
= mpz_scan0 (n
, 0);
3527 /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
3528 Q
= (D
& 2) ? (long) (D
>> 2) + 1 : -(long) (D
>> 2);
3530 if (! gmp_lucas_mod (V
, Qk
, Q
, b0
, n
)) /* If Ud != 0 */
3531 while (V
->_mp_size
!= 0 && --b0
!= 0) /* while Vk != 0 */
3532 /* V <- V ^ 2 - 2Q^k */
3533 /* Q^{2k} = (Q^k)^2 */
3534 gmp_lucas_step_k_2k (V
, Qk
, n
);
3541 gmp_millerrabin (const mpz_t n
, const mpz_t nm1
, mpz_t y
,
3542 const mpz_t q
, mp_bitcnt_t k
)
3546 /* Caller must initialize y to the base. */
3547 mpz_powm (y
, y
, q
, n
);
3549 if (mpz_cmp_ui (y
, 1) == 0 || mpz_cmp (y
, nm1
) == 0)
3554 mpz_powm_ui (y
, y
, 2, n
);
3555 if (mpz_cmp (y
, nm1
) == 0)
3557 /* y == 1 means that the previous y was a non-trivial square root
3558 of 1 (mod n). y == 0 means that n is a power of the base.
3559 In either case, n is not prime. */
3560 if (mpz_cmp_ui (y
, 1) <= 0)
3566 /* This product is 0xc0cfd797, and fits in 32 bits. */
3567 #define GMP_PRIME_PRODUCT \
3568 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3570 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3571 #define GMP_PRIME_MASK 0xc96996dcUL
3574 mpz_probab_prime_p (const mpz_t n
, int reps
)
3583 /* Note that we use the absolute value of n only, for compatibility
3584 with the real GMP. */
3586 return (mpz_cmpabs_ui (n
, 2) == 0) ? 2 : 0;
3588 /* Above test excludes n == 0 */
3589 assert (n
->_mp_size
!= 0);
3591 if (mpz_cmpabs_ui (n
, 64) < 0)
3592 return (GMP_PRIME_MASK
>> (n
->_mp_d
[0] >> 1)) & 2;
3594 if (mpz_gcd_ui (NULL
, n
, GMP_PRIME_PRODUCT
) != 1)
3597 /* All prime factors are >= 31. */
3598 if (mpz_cmpabs_ui (n
, 31*31) < 0)
3604 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3607 k
= mpz_scan1 (nm1
, 0);
3608 mpz_tdiv_q_2exp (q
, nm1
, k
);
3611 mpz_init_set_ui (y
, 2);
3612 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
) && gmp_stronglucas (n
, y
);
3613 reps
-= 24; /* skip the first 24 repetitions */
3615 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3616 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3617 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3618 30 (a[30] == 971 > 31*31 == 961). */
3620 for (j
= 0; is_prime
& (j
< reps
); j
++)
3622 mpz_set_ui (y
, (unsigned long) j
*j
+j
+41);
3623 if (mpz_cmp (y
, nm1
) >= 0)
3625 /* Don't try any further bases. This "early" break does not affect
3626 the result for any reasonable reps value (<=5000 was tested) */
3630 is_prime
= gmp_millerrabin (n
, nm1
, y
, q
, k
);
3640 /* Logical operations and bit manipulation. */
3642 /* Numbers are treated as if represented in two's complement (and
3643 infinitely sign extended). For a negative values we get the two's
3644 complement from -x = ~x + 1, where ~ is bitwise complement.
3653 where yyyy is the bitwise complement of xxxx. So least significant
3654 bits, up to and including the first one bit, are unchanged, and
3655 the more significant bits are all complemented.
3657 To change a bit from zero to one in a negative number, subtract the
3658 corresponding power of two from the absolute value. This can never
3659 underflow. To change a bit from one to zero, add the corresponding
3660 power of two, and this might overflow. E.g., if x = -001111, the
3661 two's complement is 110001. Clearing the least significant bit, we
3662 get two's complement 110000, and -010000. */
3665 mpz_tstbit (const mpz_t d
, mp_bitcnt_t bit_index
)
3667 mp_size_t limb_index
;
3676 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3677 if (limb_index
>= dn
)
3680 shift
= bit_index
% GMP_LIMB_BITS
;
3681 w
= d
->_mp_d
[limb_index
];
3682 bit
= (w
>> shift
) & 1;
3686 /* d < 0. Check if any of the bits below is set: If so, our bit
3687 must be complemented. */
3688 if (shift
> 0 && (mp_limb_t
) (w
<< (GMP_LIMB_BITS
- shift
)) > 0)
3690 while (--limb_index
>= 0)
3691 if (d
->_mp_d
[limb_index
] > 0)
3698 mpz_abs_add_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3700 mp_size_t dn
, limb_index
;
3704 dn
= GMP_ABS (d
->_mp_size
);
3706 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3707 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3709 if (limb_index
>= dn
)
3712 /* The bit should be set outside of the end of the number.
3713 We have to increase the size of the number. */
3714 dp
= MPZ_REALLOC (d
, limb_index
+ 1);
3716 dp
[limb_index
] = bit
;
3717 for (i
= dn
; i
< limb_index
; i
++)
3719 dn
= limb_index
+ 1;
3727 cy
= mpn_add_1 (dp
+ limb_index
, dp
+ limb_index
, dn
- limb_index
, bit
);
3730 dp
= MPZ_REALLOC (d
, dn
+ 1);
3735 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3739 mpz_abs_sub_bit (mpz_t d
, mp_bitcnt_t bit_index
)
3741 mp_size_t dn
, limb_index
;
3745 dn
= GMP_ABS (d
->_mp_size
);
3748 limb_index
= bit_index
/ GMP_LIMB_BITS
;
3749 bit
= (mp_limb_t
) 1 << (bit_index
% GMP_LIMB_BITS
);
3751 assert (limb_index
< dn
);
3753 gmp_assert_nocarry (mpn_sub_1 (dp
+ limb_index
, dp
+ limb_index
,
3754 dn
- limb_index
, bit
));
3755 dn
= mpn_normalized_size (dp
, dn
);
3756 d
->_mp_size
= (d
->_mp_size
< 0) ? - dn
: dn
;
3760 mpz_setbit (mpz_t d
, mp_bitcnt_t bit_index
)
3762 if (!mpz_tstbit (d
, bit_index
))
3764 if (d
->_mp_size
>= 0)
3765 mpz_abs_add_bit (d
, bit_index
);
3767 mpz_abs_sub_bit (d
, bit_index
);
3772 mpz_clrbit (mpz_t d
, mp_bitcnt_t bit_index
)
3774 if (mpz_tstbit (d
, bit_index
))
3776 if (d
->_mp_size
>= 0)
3777 mpz_abs_sub_bit (d
, bit_index
);
3779 mpz_abs_add_bit (d
, bit_index
);
3784 mpz_combit (mpz_t d
, mp_bitcnt_t bit_index
)
3786 if (mpz_tstbit (d
, bit_index
) ^ (d
->_mp_size
< 0))
3787 mpz_abs_sub_bit (d
, bit_index
);
3789 mpz_abs_add_bit (d
, bit_index
);
3793 mpz_com (mpz_t r
, const mpz_t u
)
3795 mpz_add_ui (r
, u
, 1);
3800 mpz_and (mpz_t r
, const mpz_t u
, const mpz_t v
)
3802 mp_size_t un
, vn
, rn
, i
;
3805 mp_limb_t ux
, vx
, rx
;
3806 mp_limb_t uc
, vc
, rc
;
3807 mp_limb_t ul
, vl
, rl
;
3809 un
= GMP_ABS (u
->_mp_size
);
3810 vn
= GMP_ABS (v
->_mp_size
);
3813 MPZ_SRCPTR_SWAP (u
, v
);
3814 MP_SIZE_T_SWAP (un
, vn
);
3822 uc
= u
->_mp_size
< 0;
3823 vc
= v
->_mp_size
< 0;
3830 /* If the smaller input is positive, higher limbs don't matter. */
3833 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3841 ul
= (up
[i
] ^ ux
) + uc
;
3844 vl
= (vp
[i
] ^ vx
) + vc
;
3847 rl
= ( (ul
& vl
) ^ rx
) + rc
;
3856 ul
= (up
[i
] ^ ux
) + uc
;
3859 rl
= ( (ul
& vx
) ^ rx
) + rc
;
3866 rn
= mpn_normalized_size (rp
, rn
);
3868 r
->_mp_size
= rx
? -rn
: rn
;
3872 mpz_ior (mpz_t r
, const mpz_t u
, const mpz_t v
)
3874 mp_size_t un
, vn
, rn
, i
;
3877 mp_limb_t ux
, vx
, rx
;
3878 mp_limb_t uc
, vc
, rc
;
3879 mp_limb_t ul
, vl
, rl
;
3881 un
= GMP_ABS (u
->_mp_size
);
3882 vn
= GMP_ABS (v
->_mp_size
);
3885 MPZ_SRCPTR_SWAP (u
, v
);
3886 MP_SIZE_T_SWAP (un
, vn
);
3894 uc
= u
->_mp_size
< 0;
3895 vc
= v
->_mp_size
< 0;
3902 /* If the smaller input is negative, by sign extension higher limbs
3906 rp
= MPZ_REALLOC (r
, rn
+ (mp_size_t
) rc
);
3914 ul
= (up
[i
] ^ ux
) + uc
;
3917 vl
= (vp
[i
] ^ vx
) + vc
;
3920 rl
= ( (ul
| vl
) ^ rx
) + rc
;
3929 ul
= (up
[i
] ^ ux
) + uc
;
3932 rl
= ( (ul
| vx
) ^ rx
) + rc
;
3939 rn
= mpn_normalized_size (rp
, rn
);
3941 r
->_mp_size
= rx
? -rn
: rn
;
3945 mpz_xor (mpz_t r
, const mpz_t u
, const mpz_t v
)
3947 mp_size_t un
, vn
, i
;
3950 mp_limb_t ux
, vx
, rx
;
3951 mp_limb_t uc
, vc
, rc
;
3952 mp_limb_t ul
, vl
, rl
;
3954 un
= GMP_ABS (u
->_mp_size
);
3955 vn
= GMP_ABS (v
->_mp_size
);
3958 MPZ_SRCPTR_SWAP (u
, v
);
3959 MP_SIZE_T_SWAP (un
, vn
);
3967 uc
= u
->_mp_size
< 0;
3968 vc
= v
->_mp_size
< 0;
3975 rp
= MPZ_REALLOC (r
, un
+ (mp_size_t
) rc
);
3983 ul
= (up
[i
] ^ ux
) + uc
;
3986 vl
= (vp
[i
] ^ vx
) + vc
;
3989 rl
= (ul
^ vl
^ rx
) + rc
;
3998 ul
= (up
[i
] ^ ux
) + uc
;
4001 rl
= (ul
^ ux
) + rc
;
4008 un
= mpn_normalized_size (rp
, un
);
4010 r
->_mp_size
= rx
? -un
: un
;
4014 gmp_popcount_limb (mp_limb_t x
)
4018 /* Do 16 bits at a time, to avoid limb-sized constants. */
4019 int LOCAL_SHIFT_BITS
= 16;
4022 unsigned w
= x
- ((x
>> 1) & 0x5555);
4023 w
= ((w
>> 2) & 0x3333) + (w
& 0x3333);
4025 w
= ((w
>> 8) & 0x000f) + (w
& 0x000f);
4027 if (GMP_LIMB_BITS
> LOCAL_SHIFT_BITS
)
4028 x
>>= LOCAL_SHIFT_BITS
;
4036 mpn_popcount (mp_srcptr p
, mp_size_t n
)
4041 for (c
= 0, i
= 0; i
< n
; i
++)
4042 c
+= gmp_popcount_limb (p
[i
]);
4048 mpz_popcount (const mpz_t u
)
4055 return ~(mp_bitcnt_t
) 0;
4057 return mpn_popcount (u
->_mp_d
, un
);
4061 mpz_hamdist (const mpz_t u
, const mpz_t v
)
4063 mp_size_t un
, vn
, i
;
4064 mp_limb_t uc
, vc
, ul
, vl
, comp
;
4072 return ~(mp_bitcnt_t
) 0;
4074 comp
= - (uc
= vc
= (un
< 0));
4086 MPN_SRCPTR_SWAP (up
, un
, vp
, vn
);
4088 for (i
= 0, c
= 0; i
< vn
; i
++)
4090 ul
= (up
[i
] ^ comp
) + uc
;
4093 vl
= (vp
[i
] ^ comp
) + vc
;
4096 c
+= gmp_popcount_limb (ul
^ vl
);
4102 ul
= (up
[i
] ^ comp
) + uc
;
4105 c
+= gmp_popcount_limb (ul
^ comp
);
4112 mpz_scan1 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4115 mp_size_t us
, un
, i
;
4120 i
= starting_bit
/ GMP_LIMB_BITS
;
4122 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
4123 for u<0. Notice this test picks up any u==0 too. */
4125 return (us
>= 0 ? ~(mp_bitcnt_t
) 0 : starting_bit
);
4131 if (starting_bit
!= 0)
4135 ux
= mpn_zero_p (up
, i
);
4137 ux
= - (mp_limb_t
) (limb
>= ux
);
4140 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
4141 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4144 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4148 mpz_scan0 (const mpz_t u
, mp_bitcnt_t starting_bit
)
4151 mp_size_t us
, un
, i
;
4155 ux
= - (mp_limb_t
) (us
>= 0);
4157 i
= starting_bit
/ GMP_LIMB_BITS
;
4159 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
4160 u<0. Notice this test picks up all cases of u==0 too. */
4162 return (ux
? starting_bit
: ~(mp_bitcnt_t
) 0);
4168 limb
-= mpn_zero_p (up
, i
); /* limb = ~(~limb + zero_p) */
4170 /* Mask all bits before starting_bit, thus ignoring them. */
4171 limb
&= GMP_LIMB_MAX
<< (starting_bit
% GMP_LIMB_BITS
);
4173 return mpn_common_scan (limb
, i
, up
, un
, ux
);
4177 /* MPZ base conversion. */
4180 mpz_sizeinbase (const mpz_t u
, int base
)
4186 struct gmp_div_inverse bi
;
4190 assert (base
<= 62);
4192 un
= GMP_ABS (u
->_mp_size
);
4198 bits
= (un
- 1) * GMP_LIMB_BITS
+ mpn_limb_size_in_base_2 (up
[un
-1]);
4204 return (bits
+ 1) / 2;
4206 return (bits
+ 2) / 3;
4208 return (bits
+ 3) / 4;
4210 return (bits
+ 4) / 5;
4211 /* FIXME: Do something more clever for the common case of base
4215 tp
= gmp_alloc_limbs (un
);
4216 mpn_copyi (tp
, up
, un
);
4217 mpn_div_qr_1_invert (&bi
, base
);
4224 mpn_div_qr_1_preinv (tp
, tp
, tn
, &bi
);
4225 tn
-= (tp
[tn
-1] == 0);
4229 gmp_free_limbs (tp
, un
);
4234 mpz_get_str (char *sp
, int base
, const mpz_t u
)
4241 digits
= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
4245 digits
= "0123456789abcdefghijklmnopqrstuvwxyz";
4249 else if (base
>= -1)
4258 sn
= 1 + mpz_sizeinbase (u
, base
);
4262 sp
= (char *) gmp_alloc (osn
);
4266 un
= GMP_ABS (u
->_mp_size
);
4277 if (u
->_mp_size
< 0)
4280 bits
= mpn_base_power_of_two_p (base
);
4283 /* Not modified in this case. */
4284 sn
= i
+ mpn_get_str_bits ((unsigned char *) sp
+ i
, bits
, u
->_mp_d
, un
);
4287 struct mpn_base_info info
;
4290 mpn_get_base_info (&info
, base
);
4291 tp
= gmp_alloc_limbs (un
);
4292 mpn_copyi (tp
, u
->_mp_d
, un
);
4294 sn
= i
+ mpn_get_str_other ((unsigned char *) sp
+ i
, base
, &info
, tp
, un
);
4295 gmp_free_limbs (tp
, un
);
4299 sp
[i
] = digits
[(unsigned char) sp
[i
]];
4303 if (osn
&& osn
!= sn
+ 1)
4304 sp
= gmp_realloc(sp
, osn
, sn
+ 1);
4309 mpz_set_str (mpz_t r
, const char *sp
, int base
)
4311 unsigned bits
, value_of_a
;
4312 mp_size_t rn
, alloc
;
4318 assert (base
== 0 || (base
>= 2 && base
<= 62));
4320 while (isspace( (unsigned char) *sp
))
4323 sign
= (*sp
== '-');
4330 if (sp
[1] == 'x' || sp
[1] == 'X')
4335 else if (sp
[1] == 'b' || sp
[1] == 'B')
4353 dp
= (unsigned char *) gmp_alloc (sn
);
4355 value_of_a
= (base
> 36) ? 36 : 10;
4356 for (dn
= 0; *sp
; sp
++)
4360 if (isspace ((unsigned char) *sp
))
4362 else if (*sp
>= '0' && *sp
<= '9')
4364 else if (*sp
>= 'a' && *sp
<= 'z')
4365 digit
= *sp
- 'a' + value_of_a
;
4366 else if (*sp
>= 'A' && *sp
<= 'Z')
4367 digit
= *sp
- 'A' + 10;
4369 digit
= base
; /* fail */
4371 if (digit
>= (unsigned) base
)
4387 bits
= mpn_base_power_of_two_p (base
);
4391 alloc
= (dn
* bits
+ GMP_LIMB_BITS
- 1) / GMP_LIMB_BITS
;
4392 rp
= MPZ_REALLOC (r
, alloc
);
4393 rn
= mpn_set_str_bits (rp
, dp
, dn
, bits
);
4397 struct mpn_base_info info
;
4398 mpn_get_base_info (&info
, base
);
4399 alloc
= (dn
+ info
.exp
- 1) / info
.exp
;
4400 rp
= MPZ_REALLOC (r
, alloc
);
4401 rn
= mpn_set_str_other (rp
, dp
, dn
, base
, &info
);
4402 /* Normalization, needed for all-zero input. */
4404 rn
-= rp
[rn
-1] == 0;
4406 assert (rn
<= alloc
);
4409 r
->_mp_size
= sign
? - rn
: rn
;
4415 mpz_init_set_str (mpz_t r
, const char *sp
, int base
)
4418 return mpz_set_str (r
, sp
, base
);
4422 mpz_out_str (FILE *stream
, int base
, const mpz_t x
)
4427 str
= mpz_get_str (NULL
, base
, x
);
4429 n
= fwrite (str
, 1, len
, stream
);
4430 gmp_free (str
, len
+ 1);
4436 gmp_detect_endian (void)
4438 static const int i
= 2;
4439 const unsigned char *p
= (const unsigned char *) &i
;
4443 /* Import and export. Does not support nails. */
4445 mpz_import (mpz_t r
, size_t count
, int order
, size_t size
, int endian
,
4446 size_t nails
, const void *src
)
4448 const unsigned char *p
;
4449 ptrdiff_t word_step
;
4453 /* The current (partial) limb. */
4455 /* The number of bytes already copied to this limb (starting from
4458 /* The index where the limb should be stored, when completed. */
4462 gmp_die ("mpz_import: Nails not supported.");
4464 assert (order
== 1 || order
== -1);
4465 assert (endian
>= -1 && endian
<= 1);
4468 endian
= gmp_detect_endian ();
4470 p
= (unsigned char *) src
;
4472 word_step
= (order
!= endian
) ? 2 * size
: 0;
4474 /* Process bytes from the least significant end, so point p at the
4475 least significant word. */
4478 p
+= size
* (count
- 1);
4479 word_step
= - word_step
;
4482 /* And at least significant byte of that word. */
4486 rn
= (size
* count
+ sizeof(mp_limb_t
) - 1) / sizeof(mp_limb_t
);
4487 rp
= MPZ_REALLOC (r
, rn
);
4489 for (limb
= 0, bytes
= 0, i
= 0; count
> 0; count
--, p
+= word_step
)
4492 for (j
= 0; j
< size
; j
++, p
-= (ptrdiff_t) endian
)
4494 limb
|= (mp_limb_t
) *p
<< (bytes
++ * CHAR_BIT
);
4495 if (bytes
== sizeof(mp_limb_t
))
4503 assert (i
+ (bytes
> 0) == rn
);
4507 i
= mpn_normalized_size (rp
, i
);
4513 mpz_export (void *r
, size_t *countp
, int order
, size_t size
, int endian
,
4514 size_t nails
, const mpz_t u
)
4520 gmp_die ("mpz_import: Nails not supported.");
4522 assert (order
== 1 || order
== -1);
4523 assert (endian
>= -1 && endian
<= 1);
4524 assert (size
> 0 || u
->_mp_size
== 0);
4532 ptrdiff_t word_step
;
4533 /* The current (partial) limb. */
4535 /* The number of bytes left to do in this limb. */
4537 /* The index where the limb was read. */
4542 /* Count bytes in top limb. */
4543 limb
= u
->_mp_d
[un
-1];
4546 k
= (GMP_LIMB_BITS
<= CHAR_BIT
);
4550 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4551 k
++; limb
>>= LOCAL_CHAR_BIT
;
4552 } while (limb
!= 0);
4554 /* else limb = 0; */
4556 count
= (k
+ (un
-1) * sizeof (mp_limb_t
) + size
- 1) / size
;
4559 r
= gmp_alloc (count
* size
);
4562 endian
= gmp_detect_endian ();
4564 p
= (unsigned char *) r
;
4566 word_step
= (order
!= endian
) ? 2 * size
: 0;
4568 /* Process bytes from the least significant end, so point p at the
4569 least significant word. */
4572 p
+= size
* (count
- 1);
4573 word_step
= - word_step
;
4576 /* And at least significant byte of that word. */
4580 for (bytes
= 0, i
= 0, k
= 0; k
< count
; k
++, p
+= word_step
)
4583 for (j
= 0; j
< size
; ++j
, p
-= (ptrdiff_t) endian
)
4585 if (sizeof (mp_limb_t
) == 1)
4594 int LOCAL_CHAR_BIT
= CHAR_BIT
;
4598 limb
= u
->_mp_d
[i
++];
4599 bytes
= sizeof (mp_limb_t
);
4602 limb
>>= LOCAL_CHAR_BIT
;
4608 assert (k
== count
);