1 /* Include file for internal GNU MP types and definitions.
3 Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Library General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
15 License for more details.
17 You should have received a copy of the GNU Library General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19 the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
21 #if ! defined (alloca)
22 #if defined (__GNUC__)
23 #define alloca __builtin_alloca
27 #if ! defined (alloca)
28 #if defined (__sparc__) || defined (sparc) || defined (__sgi)
37 #if ! defined (__GNUC__)
38 #define inline /* Empty */
42 #define ABS(x) (x >= 0 ? x : -x)
43 #define MIN(l,o) ((l) < (o) ? (l) : (o))
44 #define MAX(h,i) ((h) > (i) ? (h) : (i))
46 #include "gmp-mparam.h"
47 /* #include "longlong.h" */
50 void *malloc (size_t);
51 void *realloc (void *, size_t);
54 extern void * (*_mp_allocate_func
) (size_t);
55 extern void * (*_mp_reallocate_func
) (void *, size_t, size_t);
56 extern void (*_mp_free_func
) (void *, size_t);
58 void *_mp_default_allocate (size_t);
59 void *_mp_default_reallocate (void *, size_t, size_t);
60 void _mp_default_free (void *, size_t);
64 #define const /* Empty */
65 #define signed /* Empty */
71 extern void * (*_mp_allocate_func
) ();
72 extern void * (*_mp_reallocate_func
) ();
73 extern void (*_mp_free_func
) ();
75 void *_mp_default_allocate ();
76 void *_mp_default_reallocate ();
77 void _mp_default_free ();
80 /* Copy NLIMBS *limbs* from SRC to DST. */
81 #define MPN_COPY_INCR(DST, SRC, NLIMBS) \
84 for (__i = 0; __i < (NLIMBS); __i++) \
85 (DST)[__i] = (SRC)[__i]; \
87 #define MPN_COPY_DECR(DST, SRC, NLIMBS) \
90 for (__i = (NLIMBS) - 1; __i >= 0; __i--) \
91 (DST)[__i] = (SRC)[__i]; \
93 #define MPN_COPY MPN_COPY_INCR
95 /* Zero NLIMBS *limbs* AT DST. */
96 #define MPN_ZERO(DST, NLIMBS) \
99 for (__i = 0; __i < (NLIMBS); __i++) \
103 #define MPN_NORMALIZE(DST, NLIMBS) \
107 if ((DST)[(NLIMBS) - 1] != 0) \
112 #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS) \
116 if ((DST)[(NLIMBS) - 1] != 0) \
122 /* Swap (mp_ptr, mp_size_t) (U, UL) with (V, VL) */
123 #define MPN_SWAP(u, l, v, m) \
125 { mp_ptr _; _ = (u), (u) = (v), (v) = _;} \
126 { mp_size_t _; _ = (l), (l) = (m), (m) = _;} \
129 /* Return true iff the limb X has less bits than the limb Y. */
130 #define MPN_LESS_BITS_LIMB(x,y) ((x) < (y) && (x) < ((x) ^ (y)))
132 /* Return true iff (mp_ptr, mp_size_t) (U, UL) has less bits than (V, VL). */
133 #define MPN_LESS_BITS(u, l, v, m) \
135 || ((l) == (m) && (l) != 0 && MPN_LESS_BITS_LIMB ((u)[(l - 1)], (v)[(l) - 1])))
137 /* Return true iff (mp_ptr, mp_size_t) (U, UL) has more bits than (V, VL). */
138 #define MPN_MORE_BITS(u, l, v, m) MPN_LESS_BITS (v, m, u, l)
140 /* Perform twos complement on (mp_ptr, mp_size_t) (U, UL),
141 putting result at (v, VL). Precondition: U[0] != 0. */
142 #define MPN_COMPL_INCR(u, v, l) \
149 #define MPN_COMPL MPN_COMPL_INCR
151 /* Initialize the MP_INT X with space for NLIMBS limbs.
152 X should be a temporary variable, and it will be automatically
153 cleared out when the running function returns.
154 We use __x here to make it possible to accept both mpz_ptr and mpz_t
156 #define MPZ_TMP_INIT(X, NLIMBS) \
159 __x->alloc = (NLIMBS); \
160 __x->d = (mp_ptr) alloca ((NLIMBS) * BYTES_PER_MP_LIMB); \
163 #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
165 if ((size) < KARATSUBA_THRESHOLD) \
166 ____mpn_mul_n_basecase (prodp, up, vp, size); \
168 ____mpn_mul_n (prodp, up, vp, size, tspace); \
170 #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
172 if ((size) < KARATSUBA_THRESHOLD) \
173 ____mpn_sqr_n_basecase (prodp, up, size); \
175 ____mpn_sqr_n (prodp, up, size, tspace); \
178 /* Structure for conversion between internal binary format and
179 strings in base 2..36. */
182 /* Number of digits in the conversion base that always fits in
183 an mp_limb. For example, for base 10 this is 10, since
184 2**32 = 4294967296 has ten digits. */
187 /* log(2)/log(conversion_base) */
188 float chars_per_bit_exactly
;
190 /* big_base is conversion_base**chars_per_limb, i.e. the biggest
191 number that fits a word, built by factors of conversion_base.
192 Exception: For 2, 4, 8, etc, big_base is log2(base), i.e. the
193 number of bits used to represent each digit in the base. */
196 /* big_base_inverted is a BITS_PER_MP_LIMB bit approximation to
197 1/big_base, represented as a fixed-point number. Instead of
198 dividing by big_base an application can choose to multiply
199 by big_base_inverted. */
200 mp_limb big_base_inverted
;
203 extern const struct bases __mp_bases
[];
204 extern mp_size_t __gmp_default_fp_limb_precision
;
206 /* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
207 limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
208 If this would yield overflow, DI should be the largest possible number
209 (i.e., only ones). For correct operation, the most significant bit of D
210 has to be set. Put the quotient in Q and the remainder in R. */
211 #define udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
213 mp_limb _q, _ql, _r; \
215 umul_ppmm (_q, _ql, (nh), (di)); \
216 _q += (nh); /* DI is 2**BITS_PER_MP_LIMB too small */\
217 umul_ppmm (_xh, _xl, _q, (d)); \
218 sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl); \
221 sub_ddmmss (_xh, _r, _xh, _r, 0, (d)); \
225 sub_ddmmss (_xh, _r, _xh, _r, 0, (d)); \
237 /* Like udiv_qrnnd_preinv, but for for any value D. DNORM is D shifted left
238 so that its most significant bit is set. LGUP is ceil(log2(D)). */
239 #define udiv_qrnnd_preinv2gen(q, r, nh, nl, d, di, dnorm, lgup) \
241 mp_limb n2, n10, n1, nadj, q1; \
243 n2 = ((nh) << (BITS_PER_MP_LIMB - (lgup))) + ((nl) >> 1 >> (l - 1));\
244 n10 = (nl) << (BITS_PER_MP_LIMB - (lgup)); \
245 n1 = ((mp_limb_signed) n10 >> (BITS_PER_MP_LIMB - 1)); \
246 nadj = n10 + (n1 & (dnorm)); \
247 umul_ppmm (_xh, _xl, di, n2 - n1); \
248 add_ssaaaa (_xh, _xl, _xh, _xl, 0, nadj); \
250 umul_ppmm (_xh, _xl, q1, d); \
251 add_ssaaaa (_xh, _xl, _xh, _xl, nh, nl); \
253 (r) = _xl + ((d) & _xh); \
256 /* Exactly like udiv_qrnnd_preinv, but branch-free. It is not clear which
258 #define udiv_qrnnd_preinv2norm(q, r, nh, nl, d, di) \
260 mp_limb n2, n10, n1, nadj, q1; \
264 n1 = ((mp_limb_signed) n10 >> (BITS_PER_MP_LIMB - 1)); \
265 nadj = n10 + (n1 & (d)); \
266 umul_ppmm (_xh, _xl, di, n2 - n1); \
267 add_ssaaaa (_xh, _xl, _xh, _xl, 0, nadj); \
269 umul_ppmm (_xh, _xl, q1, d); \
270 add_ssaaaa (_xh, _xl, _xh, _xl, nh, nl); \
272 (r) = _xl + ((d) & _xh); \
276 #if defined (__GNUC__)
277 /* Define stuff for longlong.h. */
278 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
279 typedef int SItype
__attribute__ ((mode (SI
)));
280 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
281 typedef int DItype
__attribute__ ((mode (DI
)));
282 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
284 typedef unsigned char UQItype
;
286 typedef unsigned long USItype
;
289 typedef mp_limb UWtype
;
290 typedef unsigned int UHWtype
;
291 #define W_TYPE_SIZE BITS_PER_MP_LIMB
294 #ifndef IEEE_DOUBLE_BIG_ENDIAN
295 #define IEEE_DOUBLE_BIG_ENDIAN 1
298 #if IEEE_DOUBLE_BIG_ENDIAN
299 union ieee_double_extract
304 unsigned long exp
:11;
305 unsigned long manh
:20;
306 unsigned long manl
:32;
311 union ieee_double_extract
315 unsigned long manl
:32;
316 unsigned long manh
:20;
317 unsigned long exp
:11;