miniupnpd 1.9 (20160113)
[tomato.git] / release / src / router / gmp / gmp-impl.h
blobdc4e084a723d9544e4cd82575361a81c75ce507d
1 /* Include file for internal GNU MP types and definitions.
3 THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND ARE ALMOST CERTAIN TO
4 BE SUBJECT TO INCOMPATIBLE CHANGES IN FUTURE GNU MP RELEASES.
6 Copyright 1991, 1993-1997, 1999-2014 Free Software Foundation, Inc.
8 This file is part of the GNU MP Library.
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of either:
13 * the GNU Lesser General Public License as published by the Free
14 Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
19 * the GNU General Public License as published by the Free Software
20 Foundation; either version 2 of the License, or (at your option) any
21 later version.
23 or both in parallel, as here.
25 The GNU MP Library is distributed in the hope that it will be useful, but
26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
27 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
28 for more details.
30 You should have received copies of the GNU General Public License and the
31 GNU Lesser General Public License along with the GNU MP Library. If not,
32 see https://www.gnu.org/licenses/. */
35 /* __GMP_DECLSPEC must be given on any global data that will be accessed
36 from outside libgmp, meaning from the test or development programs, or
37 from libgmpxx. Failing to do this will result in an incorrect address
38 being used for the accesses. On functions __GMP_DECLSPEC makes calls
39 from outside libgmp more efficient, but they'll still work fine without
40 it. */
43 #ifndef __GMP_IMPL_H__
44 #define __GMP_IMPL_H__
46 #if defined _CRAY
47 #include <intrinsics.h> /* for _popcnt */
48 #endif
50 /* For INT_MAX, etc. We used to avoid it because of a bug (on solaris,
51 gcc 2.95 under -mcpu=ultrasparc in ABI=32 ends up getting wrong
52 values (the ABI=64 values)), but it should be safe now.
54 On Cray vector systems, however, we need the system limits.h since sizes
55 of signed and unsigned types can differ there, depending on compiler
56 options (eg. -hnofastmd), making our SHRT_MAX etc expressions fail. For
57 reference, int can be 46 or 64 bits, whereas uint is always 64 bits; and
58 short can be 24, 32, 46 or 64 bits, and different for ushort. */
60 #include <limits.h>
62 /* For fat.h and other fat binary stuff.
63 No need for __GMP_ATTRIBUTE_PURE or __GMP_NOTHROW, since functions
64 declared this way are only used to set function pointers in __gmpn_cpuvec,
65 they're not called directly. */
66 #define DECL_add_n(name) \
67 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
68 #define DECL_addlsh1_n(name) \
69 DECL_add_n (name)
70 #define DECL_addlsh2_n(name) \
71 DECL_add_n (name)
72 #define DECL_addmul_1(name) \
73 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
74 #define DECL_addmul_2(name) \
75 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr)
76 #define DECL_bdiv_dbm1c(name) \
77 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)
78 #define DECL_cnd_add_n(name) \
79 __GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
80 #define DECL_cnd_sub_n(name) \
81 __GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
82 #define DECL_com(name) \
83 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t)
84 #define DECL_copyd(name) \
85 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t)
86 #define DECL_copyi(name) \
87 DECL_copyd (name)
88 #define DECL_divexact_1(name) \
89 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
90 #define DECL_divexact_by3c(name) \
91 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
92 #define DECL_divrem_1(name) \
93 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t)
94 #define DECL_gcd_1(name) \
95 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t)
96 #define DECL_lshift(name) \
97 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, unsigned)
98 #define DECL_lshiftc(name) \
99 DECL_lshift (name)
100 #define DECL_mod_1(name) \
101 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t)
102 #define DECL_mod_1_1p(name) \
103 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [])
104 #define DECL_mod_1_1p_cps(name) \
105 __GMP_DECLSPEC void name (mp_limb_t cps[], mp_limb_t b)
106 #define DECL_mod_1s_2p(name) \
107 DECL_mod_1_1p (name)
108 #define DECL_mod_1s_2p_cps(name) \
109 DECL_mod_1_1p_cps (name)
110 #define DECL_mod_1s_4p(name) \
111 DECL_mod_1_1p (name)
112 #define DECL_mod_1s_4p_cps(name) \
113 DECL_mod_1_1p_cps (name)
114 #define DECL_mod_34lsub1(name) \
115 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t)
116 #define DECL_modexact_1c_odd(name) \
117 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)
118 #define DECL_mul_1(name) \
119 DECL_addmul_1 (name)
120 #define DECL_mul_basecase(name) \
121 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t)
122 #define DECL_mullo_basecase(name) \
123 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
124 #define DECL_preinv_divrem_1(name) \
125 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int)
126 #define DECL_preinv_mod_1(name) \
127 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)
128 #define DECL_redc_1(name) \
129 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
130 #define DECL_redc_2(name) \
131 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr)
132 #define DECL_rshift(name) \
133 DECL_lshift (name)
134 #define DECL_sqr_basecase(name) \
135 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t)
136 #define DECL_sub_n(name) \
137 DECL_add_n (name)
138 #define DECL_sublsh1_n(name) \
139 DECL_add_n (name)
140 #define DECL_submul_1(name) \
141 DECL_addmul_1 (name)
143 #if ! defined (__GMP_WITHIN_CONFIGURE)
144 #include "config.h"
145 #include "gmp-mparam.h"
146 #include "fib_table.h"
147 #include "fac_table.h"
148 #include "mp_bases.h"
149 #if WANT_FAT_BINARY
150 #include "fat.h"
151 #endif
152 #endif
154 #if HAVE_INTTYPES_H /* for uint_least32_t */
155 # include <inttypes.h>
156 #else
157 # if HAVE_STDINT_H
158 # include <stdint.h>
159 # endif
160 #endif
162 #ifdef __cplusplus
163 #include <cstring> /* for strlen */
164 #include <string> /* for std::string */
165 #endif
168 #ifndef WANT_TMP_DEBUG /* for TMP_ALLOC_LIMBS_2 and others */
169 #define WANT_TMP_DEBUG 0
170 #endif
172 /* The following tries to get a good version of alloca. The tests are
173 adapted from autoconf AC_FUNC_ALLOCA, with a couple of additions.
174 Whether this succeeds is tested by GMP_FUNC_ALLOCA and HAVE_ALLOCA will
175 be setup appropriately.
177 ifndef alloca - a cpp define might already exist.
178 glibc <stdlib.h> includes <alloca.h> which uses GCC __builtin_alloca.
179 HP cc +Olibcalls adds a #define of alloca to __builtin_alloca.
181 GCC __builtin_alloca - preferred whenever available.
183 _AIX pragma - IBM compilers need a #pragma in "each module that needs to
184 use alloca". Pragma indented to protect pre-ANSI cpp's. _IBMR2 was
185 used in past versions of GMP, retained still in case it matters.
187 The autoconf manual says this pragma needs to be at the start of a C
188 file, apart from comments and preprocessor directives. Is that true?
189 xlc on aix 4.xxx doesn't seem to mind it being after prototypes etc
190 from gmp.h.
193 #ifndef alloca
194 # ifdef __GNUC__
195 # define alloca __builtin_alloca
196 # else
197 # ifdef __DECC
198 # define alloca(x) __ALLOCA(x)
199 # else
200 # ifdef _MSC_VER
201 # include <malloc.h>
202 # define alloca _alloca
203 # else
204 # if HAVE_ALLOCA_H
205 # include <alloca.h>
206 # else
207 # if defined (_AIX) || defined (_IBMR2)
208 #pragma alloca
209 # else
210 char *alloca ();
211 # endif
212 # endif
213 # endif
214 # endif
215 # endif
216 #endif
219 /* if not provided by gmp-mparam.h */
220 #ifndef GMP_LIMB_BYTES
221 #define GMP_LIMB_BYTES SIZEOF_MP_LIMB_T
222 #endif
223 #ifndef GMP_LIMB_BITS
224 #define GMP_LIMB_BITS (8 * SIZEOF_MP_LIMB_T)
225 #endif
227 #define BITS_PER_ULONG (8 * SIZEOF_UNSIGNED_LONG)
230 /* gmp_uint_least32_t is an unsigned integer type with at least 32 bits. */
231 #if HAVE_UINT_LEAST32_T
232 typedef uint_least32_t gmp_uint_least32_t;
233 #else
234 #if SIZEOF_UNSIGNED_SHORT >= 4
235 typedef unsigned short gmp_uint_least32_t;
236 #else
237 #if SIZEOF_UNSIGNED >= 4
238 typedef unsigned gmp_uint_least32_t;
239 #else
240 typedef unsigned long gmp_uint_least32_t;
241 #endif
242 #endif
243 #endif
246 /* gmp_intptr_t, for pointer to integer casts */
247 #if HAVE_INTPTR_T
248 typedef intptr_t gmp_intptr_t;
249 #else /* fallback */
250 typedef size_t gmp_intptr_t;
251 #endif
254 /* pre-inverse types for truncating division and modulo */
255 typedef struct {mp_limb_t inv32;} gmp_pi1_t;
256 typedef struct {mp_limb_t inv21, inv32, inv53;} gmp_pi2_t;
259 /* "const" basically means a function does nothing but examine its arguments
260 and give a return value, it doesn't read or write any memory (neither
261 global nor pointed to by arguments), and has no other side-effects. This
262 is more restrictive than "pure". See info node "(gcc)Function
263 Attributes". __GMP_NO_ATTRIBUTE_CONST_PURE lets tune/common.c etc turn
264 this off when trying to write timing loops. */
265 #if HAVE_ATTRIBUTE_CONST && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE)
266 #define ATTRIBUTE_CONST __attribute__ ((const))
267 #else
268 #define ATTRIBUTE_CONST
269 #endif
271 #if HAVE_ATTRIBUTE_NORETURN
272 #define ATTRIBUTE_NORETURN __attribute__ ((noreturn))
273 #else
274 #define ATTRIBUTE_NORETURN
275 #endif
277 /* "malloc" means a function behaves like malloc in that the pointer it
278 returns doesn't alias anything. */
279 #if HAVE_ATTRIBUTE_MALLOC
280 #define ATTRIBUTE_MALLOC __attribute__ ((malloc))
281 #else
282 #define ATTRIBUTE_MALLOC
283 #endif
286 #if ! HAVE_STRCHR
287 #define strchr(s,c) index(s,c)
288 #endif
290 #if ! HAVE_MEMSET
291 #define memset(p, c, n) \
292 do { \
293 ASSERT ((n) >= 0); \
294 char *__memset__p = (p); \
295 int __i; \
296 for (__i = 0; __i < (n); __i++) \
297 __memset__p[__i] = (c); \
298 } while (0)
299 #endif
301 /* va_copy is standard in C99, and gcc provides __va_copy when in strict C89
302 mode. Falling back to a memcpy will give maximum portability, since it
303 works no matter whether va_list is a pointer, struct or array. */
304 #if ! defined (va_copy) && defined (__va_copy)
305 #define va_copy(dst,src) __va_copy(dst,src)
306 #endif
307 #if ! defined (va_copy)
308 #define va_copy(dst,src) \
309 do { memcpy (&(dst), &(src), sizeof (va_list)); } while (0)
310 #endif
313 /* HAVE_HOST_CPU_alpha_CIX is 1 on an alpha with the CIX instructions
314 (ie. ctlz, ctpop, cttz). */
315 #if HAVE_HOST_CPU_alphaev67 || HAVE_HOST_CPU_alphaev68 \
316 || HAVE_HOST_CPU_alphaev7
317 #define HAVE_HOST_CPU_alpha_CIX 1
318 #endif
321 #if defined (__cplusplus)
322 extern "C" {
323 #endif
326 /* Usage: TMP_DECL;
327 TMP_MARK;
328 ptr = TMP_ALLOC (bytes);
329 TMP_FREE;
331 Small allocations should use TMP_SALLOC, big allocations should use
332 TMP_BALLOC. Allocations that might be small or big should use TMP_ALLOC.
334 Functions that use just TMP_SALLOC should use TMP_SDECL, TMP_SMARK, and
335 TMP_SFREE.
337 TMP_DECL just declares a variable, but might be empty and so must be last
338 in a list of variables. TMP_MARK must be done before any TMP_ALLOC.
339 TMP_ALLOC(0) is not allowed. TMP_FREE doesn't need to be done if a
340 TMP_MARK was made, but then no TMP_ALLOCs. */
342 /* The alignment in bytes, used for TMP_ALLOCed blocks, when alloca or
343 __gmp_allocate_func doesn't already determine it. Currently TMP_ALLOC
344 isn't used for "double"s, so that's not in the union. */
345 union tmp_align_t {
346 mp_limb_t l;
347 char *p;
349 #define __TMP_ALIGN sizeof (union tmp_align_t)
351 /* Return "a" rounded upwards to a multiple of "m", if it isn't already.
352 "a" must be an unsigned type.
353 This is designed for use with a compile-time constant "m".
354 The POW2 case is expected to be usual, and gcc 3.0 and up recognises
355 "(-(8*n))%8" or the like is always zero, which means the rounding up in
356 the WANT_TMP_NOTREENTRANT version of TMP_ALLOC below will be a noop. */
357 #define ROUND_UP_MULTIPLE(a,m) \
358 (POW2_P(m) ? (a) + (-(a))%(m) \
359 : (a)+(m)-1 - (((a)+(m)-1) % (m)))
361 #if defined (WANT_TMP_ALLOCA) || defined (WANT_TMP_REENTRANT)
362 struct tmp_reentrant_t {
363 struct tmp_reentrant_t *next;
364 size_t size; /* bytes, including header */
366 __GMP_DECLSPEC void *__gmp_tmp_reentrant_alloc (struct tmp_reentrant_t **, size_t) ATTRIBUTE_MALLOC;
367 __GMP_DECLSPEC void __gmp_tmp_reentrant_free (struct tmp_reentrant_t *);
368 #endif
370 #if WANT_TMP_ALLOCA
371 #define TMP_SDECL
372 #define TMP_DECL struct tmp_reentrant_t *__tmp_marker
373 #define TMP_SMARK
374 #define TMP_MARK __tmp_marker = 0
375 #define TMP_SALLOC(n) alloca(n)
376 #define TMP_BALLOC(n) __gmp_tmp_reentrant_alloc (&__tmp_marker, n)
377 #define TMP_ALLOC(n) \
378 (LIKELY ((n) < 65536) ? TMP_SALLOC(n) : TMP_BALLOC(n))
379 #define TMP_SFREE
380 #define TMP_FREE \
381 do { \
382 if (UNLIKELY (__tmp_marker != 0)) \
383 __gmp_tmp_reentrant_free (__tmp_marker); \
384 } while (0)
385 #endif
387 #if WANT_TMP_REENTRANT
388 #define TMP_SDECL TMP_DECL
389 #define TMP_DECL struct tmp_reentrant_t *__tmp_marker
390 #define TMP_SMARK TMP_MARK
391 #define TMP_MARK __tmp_marker = 0
392 #define TMP_SALLOC(n) TMP_ALLOC(n)
393 #define TMP_BALLOC(n) TMP_ALLOC(n)
394 #define TMP_ALLOC(n) __gmp_tmp_reentrant_alloc (&__tmp_marker, n)
395 #define TMP_SFREE TMP_FREE
396 #define TMP_FREE __gmp_tmp_reentrant_free (__tmp_marker)
397 #endif
399 #if WANT_TMP_NOTREENTRANT
400 struct tmp_marker
402 struct tmp_stack *which_chunk;
403 void *alloc_point;
405 __GMP_DECLSPEC void *__gmp_tmp_alloc (unsigned long) ATTRIBUTE_MALLOC;
406 __GMP_DECLSPEC void __gmp_tmp_mark (struct tmp_marker *);
407 __GMP_DECLSPEC void __gmp_tmp_free (struct tmp_marker *);
408 #define TMP_SDECL TMP_DECL
409 #define TMP_DECL struct tmp_marker __tmp_marker
410 #define TMP_SMARK TMP_MARK
411 #define TMP_MARK __gmp_tmp_mark (&__tmp_marker)
412 #define TMP_SALLOC(n) TMP_ALLOC(n)
413 #define TMP_BALLOC(n) TMP_ALLOC(n)
414 #define TMP_ALLOC(n) \
415 __gmp_tmp_alloc (ROUND_UP_MULTIPLE ((unsigned long) (n), __TMP_ALIGN))
416 #define TMP_SFREE TMP_FREE
417 #define TMP_FREE __gmp_tmp_free (&__tmp_marker)
418 #endif
420 #if WANT_TMP_DEBUG
421 /* See tal-debug.c for some comments. */
422 struct tmp_debug_t {
423 struct tmp_debug_entry_t *list;
424 const char *file;
425 int line;
427 struct tmp_debug_entry_t {
428 struct tmp_debug_entry_t *next;
429 char *block;
430 size_t size;
432 __GMP_DECLSPEC void __gmp_tmp_debug_mark (const char *, int, struct tmp_debug_t **,
433 struct tmp_debug_t *,
434 const char *, const char *);
435 __GMP_DECLSPEC void *__gmp_tmp_debug_alloc (const char *, int, int,
436 struct tmp_debug_t **, const char *,
437 size_t) ATTRIBUTE_MALLOC;
438 __GMP_DECLSPEC void __gmp_tmp_debug_free (const char *, int, int,
439 struct tmp_debug_t **,
440 const char *, const char *);
441 #define TMP_SDECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
442 #define TMP_DECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
443 #define TMP_SMARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
444 #define TMP_MARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
445 #define TMP_SFREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
446 #define TMP_FREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
447 /* The marker variable is designed to provoke an uninitialized variable
448 warning from the compiler if TMP_FREE is used without a TMP_MARK.
449 __tmp_marker_inscope does the same for TMP_ALLOC. Runtime tests pick
450 these things up too. */
451 #define TMP_DECL_NAME(marker, marker_name) \
452 int marker; \
453 int __tmp_marker_inscope; \
454 const char *__tmp_marker_name = marker_name; \
455 struct tmp_debug_t __tmp_marker_struct; \
456 /* don't demand NULL, just cast a zero */ \
457 struct tmp_debug_t *__tmp_marker = (struct tmp_debug_t *) 0
458 #define TMP_MARK_NAME(marker, marker_name) \
459 do { \
460 marker = 1; \
461 __tmp_marker_inscope = 1; \
462 __gmp_tmp_debug_mark (ASSERT_FILE, ASSERT_LINE, \
463 &__tmp_marker, &__tmp_marker_struct, \
464 __tmp_marker_name, marker_name); \
465 } while (0)
466 #define TMP_SALLOC(n) TMP_ALLOC(n)
467 #define TMP_BALLOC(n) TMP_ALLOC(n)
468 #define TMP_ALLOC(size) \
469 __gmp_tmp_debug_alloc (ASSERT_FILE, ASSERT_LINE, \
470 __tmp_marker_inscope, \
471 &__tmp_marker, __tmp_marker_name, size)
472 #define TMP_FREE_NAME(marker, marker_name) \
473 do { \
474 __gmp_tmp_debug_free (ASSERT_FILE, ASSERT_LINE, \
475 marker, &__tmp_marker, \
476 __tmp_marker_name, marker_name); \
477 } while (0)
478 #endif /* WANT_TMP_DEBUG */
481 /* Allocating various types. */
482 #define TMP_ALLOC_TYPE(n,type) ((type *) TMP_ALLOC ((n) * sizeof (type)))
483 #define TMP_SALLOC_TYPE(n,type) ((type *) TMP_SALLOC ((n) * sizeof (type)))
484 #define TMP_BALLOC_TYPE(n,type) ((type *) TMP_BALLOC ((n) * sizeof (type)))
485 #define TMP_ALLOC_LIMBS(n) TMP_ALLOC_TYPE(n,mp_limb_t)
486 #define TMP_SALLOC_LIMBS(n) TMP_SALLOC_TYPE(n,mp_limb_t)
487 #define TMP_BALLOC_LIMBS(n) TMP_BALLOC_TYPE(n,mp_limb_t)
488 #define TMP_ALLOC_MP_PTRS(n) TMP_ALLOC_TYPE(n,mp_ptr)
489 #define TMP_SALLOC_MP_PTRS(n) TMP_SALLOC_TYPE(n,mp_ptr)
490 #define TMP_BALLOC_MP_PTRS(n) TMP_BALLOC_TYPE(n,mp_ptr)
492 /* It's more efficient to allocate one block than two. This is certainly
493 true of the malloc methods, but it can even be true of alloca if that
494 involves copying a chunk of stack (various RISCs), or a call to a stack
495 bounds check (mingw). In any case, when debugging keep separate blocks
496 so a redzoning malloc debugger can protect each individually. */
497 #define TMP_ALLOC_LIMBS_2(xp,xsize, yp,ysize) \
498 do { \
499 if (WANT_TMP_DEBUG) \
501 (xp) = TMP_ALLOC_LIMBS (xsize); \
502 (yp) = TMP_ALLOC_LIMBS (ysize); \
504 else \
506 (xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize)); \
507 (yp) = (xp) + (xsize); \
509 } while (0)
512 /* From gmp.h, nicer names for internal use. */
513 #define CRAY_Pragma(str) __GMP_CRAY_Pragma(str)
514 #define MPN_CMP(result, xp, yp, size) __GMPN_CMP(result, xp, yp, size)
515 #define LIKELY(cond) __GMP_LIKELY(cond)
516 #define UNLIKELY(cond) __GMP_UNLIKELY(cond)
518 #define ABS(x) ((x) >= 0 ? (x) : -(x))
519 #define NEG_CAST(T,x) (- (__GMP_CAST (T, (x) + 1) - 1))
520 #define ABS_CAST(T,x) ((x) >= 0 ? __GMP_CAST (T, x) : NEG_CAST (T,x))
521 #undef MIN
522 #define MIN(l,o) ((l) < (o) ? (l) : (o))
523 #undef MAX
524 #define MAX(h,i) ((h) > (i) ? (h) : (i))
525 #define numberof(x) (sizeof (x) / sizeof ((x)[0]))
527 /* Field access macros. */
528 #define SIZ(x) ((x)->_mp_size)
529 #define ABSIZ(x) ABS (SIZ (x))
530 #define PTR(x) ((x)->_mp_d)
531 #define EXP(x) ((x)->_mp_exp)
532 #define PREC(x) ((x)->_mp_prec)
533 #define ALLOC(x) ((x)->_mp_alloc)
534 #define NUM(x) mpq_numref(x)
535 #define DEN(x) mpq_denref(x)
537 /* n-1 inverts any low zeros and the lowest one bit. If n&(n-1) leaves zero
538 then that lowest one bit must have been the only bit set. n==0 will
539 return true though, so avoid that. */
540 #define POW2_P(n) (((n) & ((n) - 1)) == 0)
542 /* This is intended for constant THRESHOLDs only, where the compiler
543 can completely fold the result. */
544 #define LOG2C(n) \
545 (((n) >= 0x1) + ((n) >= 0x2) + ((n) >= 0x4) + ((n) >= 0x8) + \
546 ((n) >= 0x10) + ((n) >= 0x20) + ((n) >= 0x40) + ((n) >= 0x80) + \
547 ((n) >= 0x100) + ((n) >= 0x200) + ((n) >= 0x400) + ((n) >= 0x800) + \
548 ((n) >= 0x1000) + ((n) >= 0x2000) + ((n) >= 0x4000) + ((n) >= 0x8000))
550 /* The "short" defines are a bit different because shorts are promoted to
551 ints by ~ or >> etc.
553 #ifndef's are used since on some systems (HP?) header files other than
554 limits.h setup these defines. We could forcibly #undef in that case, but
555 there seems no need to worry about that.
557 Now that we include <limits.h> we should be able to remove all this. */
559 #ifndef ULONG_MAX
560 #define ULONG_MAX __GMP_ULONG_MAX
561 #endif
562 #ifndef UINT_MAX
563 #define UINT_MAX __GMP_UINT_MAX
564 #endif
565 #ifndef USHRT_MAX
566 #define USHRT_MAX __GMP_USHRT_MAX
567 #endif
568 #define MP_LIMB_T_MAX (~ (mp_limb_t) 0)
570 /* Must cast ULONG_MAX etc to unsigned long etc, since they might not be
571 unsigned on a K&R compiler. In particular the HP-UX 10 bundled K&R cc
572 treats the plain decimal values in <limits.h> as signed. */
573 #define ULONG_HIGHBIT (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
574 #define UINT_HIGHBIT (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
575 #define USHRT_HIGHBIT (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1))
576 #define GMP_LIMB_HIGHBIT (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
578 #ifndef LONG_MIN
579 #define LONG_MIN ((long) ULONG_HIGHBIT)
580 #endif
581 #ifndef LONG_MAX
582 #define LONG_MAX (-(LONG_MIN+1))
583 #endif
585 #ifndef INT_MIN
586 #define INT_MIN ((int) UINT_HIGHBIT)
587 #endif
588 #ifndef INT_MAX
589 #define INT_MAX (-(INT_MIN+1))
590 #endif
592 #ifndef SHRT_MIN
593 #define SHRT_MIN ((int) (short) USHRT_HIGHBIT)
594 #endif
595 #ifndef SHRT_MAX
596 #define SHRT_MAX (-(SHRT_MIN+1))
597 #endif
599 #if __GMP_MP_SIZE_T_INT
600 #define MP_SIZE_T_MAX INT_MAX
601 #define MP_SIZE_T_MIN INT_MIN
602 #else
603 #define MP_SIZE_T_MAX LONG_MAX
604 #define MP_SIZE_T_MIN LONG_MIN
605 #endif
607 /* mp_exp_t is the same as mp_size_t */
608 #define MP_EXP_T_MAX MP_SIZE_T_MAX
609 #define MP_EXP_T_MIN MP_SIZE_T_MIN
611 #define LONG_HIGHBIT LONG_MIN
612 #define INT_HIGHBIT INT_MIN
613 #define SHRT_HIGHBIT SHRT_MIN
616 #define GMP_NUMB_HIGHBIT (CNST_LIMB(1) << (GMP_NUMB_BITS-1))
618 #if GMP_NAIL_BITS == 0
619 #define GMP_NAIL_LOWBIT CNST_LIMB(0)
620 #else
621 #define GMP_NAIL_LOWBIT (CNST_LIMB(1) << GMP_NUMB_BITS)
622 #endif
624 #if GMP_NAIL_BITS != 0
625 /* Set various *_THRESHOLD values to be used for nails. Thus we avoid using
626 code that has not yet been qualified. */
628 #undef DC_DIV_QR_THRESHOLD
629 #define DC_DIV_QR_THRESHOLD 50
631 #undef DIVREM_1_NORM_THRESHOLD
632 #undef DIVREM_1_UNNORM_THRESHOLD
633 #undef MOD_1_NORM_THRESHOLD
634 #undef MOD_1_UNNORM_THRESHOLD
635 #undef USE_PREINV_DIVREM_1
636 #undef DIVREM_2_THRESHOLD
637 #undef DIVEXACT_1_THRESHOLD
638 #define DIVREM_1_NORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
639 #define DIVREM_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
640 #define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
641 #define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
642 #define USE_PREINV_DIVREM_1 0 /* no preinv */
643 #define DIVREM_2_THRESHOLD MP_SIZE_T_MAX /* no preinv */
645 /* mpn/generic/mul_fft.c is not nails-capable. */
646 #undef MUL_FFT_THRESHOLD
647 #undef SQR_FFT_THRESHOLD
648 #define MUL_FFT_THRESHOLD MP_SIZE_T_MAX
649 #define SQR_FFT_THRESHOLD MP_SIZE_T_MAX
650 #endif
652 /* Swap macros. */
654 #define MP_LIMB_T_SWAP(x, y) \
655 do { \
656 mp_limb_t __mp_limb_t_swap__tmp = (x); \
657 (x) = (y); \
658 (y) = __mp_limb_t_swap__tmp; \
659 } while (0)
660 #define MP_SIZE_T_SWAP(x, y) \
661 do { \
662 mp_size_t __mp_size_t_swap__tmp = (x); \
663 (x) = (y); \
664 (y) = __mp_size_t_swap__tmp; \
665 } while (0)
667 #define MP_PTR_SWAP(x, y) \
668 do { \
669 mp_ptr __mp_ptr_swap__tmp = (x); \
670 (x) = (y); \
671 (y) = __mp_ptr_swap__tmp; \
672 } while (0)
673 #define MP_SRCPTR_SWAP(x, y) \
674 do { \
675 mp_srcptr __mp_srcptr_swap__tmp = (x); \
676 (x) = (y); \
677 (y) = __mp_srcptr_swap__tmp; \
678 } while (0)
680 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
681 do { \
682 MP_PTR_SWAP (xp, yp); \
683 MP_SIZE_T_SWAP (xs, ys); \
684 } while(0)
685 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
686 do { \
687 MP_SRCPTR_SWAP (xp, yp); \
688 MP_SIZE_T_SWAP (xs, ys); \
689 } while(0)
691 #define MPZ_PTR_SWAP(x, y) \
692 do { \
693 mpz_ptr __mpz_ptr_swap__tmp = (x); \
694 (x) = (y); \
695 (y) = __mpz_ptr_swap__tmp; \
696 } while (0)
697 #define MPZ_SRCPTR_SWAP(x, y) \
698 do { \
699 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
700 (x) = (y); \
701 (y) = __mpz_srcptr_swap__tmp; \
702 } while (0)
705 /* Enhancement: __gmp_allocate_func could have "__attribute__ ((malloc))",
706 but current gcc (3.0) doesn't seem to support that. */
707 __GMP_DECLSPEC extern void * (*__gmp_allocate_func) (size_t);
708 __GMP_DECLSPEC extern void * (*__gmp_reallocate_func) (void *, size_t, size_t);
709 __GMP_DECLSPEC extern void (*__gmp_free_func) (void *, size_t);
711 __GMP_DECLSPEC void *__gmp_default_allocate (size_t);
712 __GMP_DECLSPEC void *__gmp_default_reallocate (void *, size_t, size_t);
713 __GMP_DECLSPEC void __gmp_default_free (void *, size_t);
715 #define __GMP_ALLOCATE_FUNC_TYPE(n,type) \
716 ((type *) (*__gmp_allocate_func) ((n) * sizeof (type)))
717 #define __GMP_ALLOCATE_FUNC_LIMBS(n) __GMP_ALLOCATE_FUNC_TYPE (n, mp_limb_t)
719 #define __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, type) \
720 ((type *) (*__gmp_reallocate_func) \
721 (p, (old_size) * sizeof (type), (new_size) * sizeof (type)))
722 #define __GMP_REALLOCATE_FUNC_LIMBS(p, old_size, new_size) \
723 __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, mp_limb_t)
725 #define __GMP_FREE_FUNC_TYPE(p,n,type) (*__gmp_free_func) (p, (n) * sizeof (type))
726 #define __GMP_FREE_FUNC_LIMBS(p,n) __GMP_FREE_FUNC_TYPE (p, n, mp_limb_t)
728 #define __GMP_REALLOCATE_FUNC_MAYBE(ptr, oldsize, newsize) \
729 do { \
730 if ((oldsize) != (newsize)) \
731 (ptr) = (*__gmp_reallocate_func) (ptr, oldsize, newsize); \
732 } while (0)
734 #define __GMP_REALLOCATE_FUNC_MAYBE_TYPE(ptr, oldsize, newsize, type) \
735 do { \
736 if ((oldsize) != (newsize)) \
737 (ptr) = (type *) (*__gmp_reallocate_func) \
738 (ptr, (oldsize) * sizeof (type), (newsize) * sizeof (type)); \
739 } while (0)
742 /* Dummy for non-gcc, code involving it will go dead. */
743 #if ! defined (__GNUC__) || __GNUC__ < 2
744 #define __builtin_constant_p(x) 0
745 #endif
748 /* In gcc 2.96 and up on i386, tail calls are optimized to jumps if the
749 stack usage is compatible. __attribute__ ((regparm (N))) helps by
750 putting leading parameters in registers, avoiding extra stack.
752 regparm cannot be used with calls going through the PLT, because the
753 binding code there may clobber the registers (%eax, %edx, %ecx) used for
754 the regparm parameters. Calls to local (ie. static) functions could
755 still use this, if we cared to differentiate locals and globals.
757 On athlon-unknown-freebsd4.9 with gcc 3.3.3, regparm cannot be used with
758 -p or -pg profiling, since that version of gcc doesn't realize the
759 .mcount calls will clobber the parameter registers. Other systems are
760 ok, like debian with glibc 2.3.2 (mcount doesn't clobber), but we don't
761 bother to try to detect this. regparm is only an optimization so we just
762 disable it when profiling (profiling being a slowdown anyway). */
764 #if HAVE_HOST_CPU_FAMILY_x86 && __GMP_GNUC_PREREQ (2,96) && ! defined (PIC) \
765 && ! WANT_PROFILING_PROF && ! WANT_PROFILING_GPROF
766 #define USE_LEADING_REGPARM 1
767 #else
768 #define USE_LEADING_REGPARM 0
769 #endif
771 /* Macros for altering parameter order according to regparm usage. */
772 #if USE_LEADING_REGPARM
773 #define REGPARM_2_1(a,b,x) x,a,b
774 #define REGPARM_3_1(a,b,c,x) x,a,b,c
775 #define REGPARM_ATTR(n) __attribute__ ((regparm (n)))
776 #else
777 #define REGPARM_2_1(a,b,x) a,b,x
778 #define REGPARM_3_1(a,b,c,x) a,b,c,x
779 #define REGPARM_ATTR(n)
780 #endif
783 /* ASM_L gives a local label for a gcc asm block, for use when temporary
784 local labels like "1:" might not be available, which is the case for
785 instance on the x86s (the SCO assembler doesn't support them).
787 The label generated is made unique by including "%=" which is a unique
788 number for each insn. This ensures the same name can be used in multiple
789 asm blocks, perhaps via a macro. Since jumps between asm blocks are not
790 allowed there's no need for a label to be usable outside a single
791 block. */
793 #define ASM_L(name) LSYM_PREFIX "asm_%=_" #name
796 #if defined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86
797 #if 0
798 /* FIXME: Check that these actually improve things.
799 FIXME: Need a cld after each std.
800 FIXME: Can't have inputs in clobbered registers, must describe them as
801 dummy outputs, and add volatile. */
802 #define MPN_COPY_INCR(DST, SRC, N) \
803 __asm__ ("cld\n\trep\n\tmovsl" : : \
804 "D" (DST), "S" (SRC), "c" (N) : \
805 "cx", "di", "si", "memory")
806 #define MPN_COPY_DECR(DST, SRC, N) \
807 __asm__ ("std\n\trep\n\tmovsl" : : \
808 "D" ((DST) + (N) - 1), "S" ((SRC) + (N) - 1), "c" (N) : \
809 "cx", "di", "si", "memory")
810 #endif
811 #endif
814 __GMP_DECLSPEC void __gmpz_aorsmul_1 (REGPARM_3_1 (mpz_ptr, mpz_srcptr, mp_limb_t, mp_size_t)) REGPARM_ATTR(1);
815 #define mpz_aorsmul_1(w,u,v,sub) __gmpz_aorsmul_1 (REGPARM_3_1 (w, u, v, sub))
817 #define mpz_n_pow_ui __gmpz_n_pow_ui
818 __GMP_DECLSPEC void mpz_n_pow_ui (mpz_ptr, mp_srcptr, mp_size_t, unsigned long);
821 #define mpn_addmul_1c __MPN(addmul_1c)
822 __GMP_DECLSPEC mp_limb_t mpn_addmul_1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
824 #ifndef mpn_addmul_2 /* if not done with cpuvec in a fat binary */
825 #define mpn_addmul_2 __MPN(addmul_2)
826 __GMP_DECLSPEC mp_limb_t mpn_addmul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
827 #endif
829 #define mpn_addmul_3 __MPN(addmul_3)
830 __GMP_DECLSPEC mp_limb_t mpn_addmul_3 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
832 #define mpn_addmul_4 __MPN(addmul_4)
833 __GMP_DECLSPEC mp_limb_t mpn_addmul_4 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
835 #define mpn_addmul_5 __MPN(addmul_5)
836 __GMP_DECLSPEC mp_limb_t mpn_addmul_5 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
838 #define mpn_addmul_6 __MPN(addmul_6)
839 __GMP_DECLSPEC mp_limb_t mpn_addmul_6 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
841 #define mpn_addmul_7 __MPN(addmul_7)
842 __GMP_DECLSPEC mp_limb_t mpn_addmul_7 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
844 #define mpn_addmul_8 __MPN(addmul_8)
845 __GMP_DECLSPEC mp_limb_t mpn_addmul_8 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
847 /* Alternative entry point in mpn_addmul_2 for the benefit of mpn_sqr_basecase. */
848 #define mpn_addmul_2s __MPN(addmul_2s)
849 __GMP_DECLSPEC mp_limb_t mpn_addmul_2s (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
851 /* mpn_addlsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+2*{b,n}, and
852 returns the carry out (0, 1 or 2). Use _ip1 when a=c. */
853 #ifndef mpn_addlsh1_n /* if not done with cpuvec in a fat binary */
854 #define mpn_addlsh1_n __MPN(addlsh1_n)
855 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
856 #endif
857 #define mpn_addlsh1_nc __MPN(addlsh1_nc)
858 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
859 #if HAVE_NATIVE_mpn_addlsh1_n && ! HAVE_NATIVE_mpn_addlsh1_n_ip1
860 #define mpn_addlsh1_n_ip1(dst,src,n) mpn_addlsh1_n(dst,dst,src,n)
861 #define HAVE_NATIVE_mpn_addlsh1_n_ip1 1
862 #else
863 #define mpn_addlsh1_n_ip1 __MPN(addlsh1_n_ip1)
864 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
865 #endif
866 #if HAVE_NATIVE_mpn_addlsh1_nc && ! HAVE_NATIVE_mpn_addlsh1_nc_ip1
867 #define mpn_addlsh1_nc_ip1(dst,src,n,c) mpn_addlsh1_nc(dst,dst,src,n,c)
868 #define HAVE_NATIVE_mpn_addlsh1_nc_ip1 1
869 #else
870 #define mpn_addlsh1_nc_ip1 __MPN(addlsh1_nc_ip1)
871 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
872 #endif
874 #ifndef mpn_addlsh2_n /* if not done with cpuvec in a fat binary */
875 /* mpn_addlsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+4*{b,n}, and
876 returns the carry out (0, ..., 4). Use _ip1 when a=c. */
877 #define mpn_addlsh2_n __MPN(addlsh2_n)
878 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
879 #endif
880 #define mpn_addlsh2_nc __MPN(addlsh2_nc)
881 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
882 #if HAVE_NATIVE_mpn_addlsh2_n && ! HAVE_NATIVE_mpn_addlsh2_n_ip1
883 #define mpn_addlsh2_n_ip1(dst,src,n) mpn_addlsh2_n(dst,dst,src,n)
884 #define HAVE_NATIVE_mpn_addlsh2_n_ip1 1
885 #else
886 #define mpn_addlsh2_n_ip1 __MPN(addlsh2_n_ip1)
887 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
888 #endif
889 #if HAVE_NATIVE_mpn_addlsh2_nc && ! HAVE_NATIVE_mpn_addlsh2_nc_ip1
890 #define mpn_addlsh2_nc_ip1(dst,src,n,c) mpn_addlsh2_nc(dst,dst,src,n,c)
891 #define HAVE_NATIVE_mpn_addlsh2_nc_ip1 1
892 #else
893 #define mpn_addlsh2_nc_ip1 __MPN(addlsh2_nc_ip1)
894 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
895 #endif
897 /* mpn_addlsh_n(c,a,b,n,k), when it exists, sets {c,n} to {a,n}+2^k*{b,n}, and
898 returns the carry out (0, ..., 2^k). Use _ip1 when a=c. */
899 #define mpn_addlsh_n __MPN(addlsh_n)
900 __GMP_DECLSPEC mp_limb_t mpn_addlsh_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int);
901 #define mpn_addlsh_nc __MPN(addlsh_nc)
902 __GMP_DECLSPEC mp_limb_t mpn_addlsh_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
903 #if HAVE_NATIVE_mpn_addlsh_n && ! HAVE_NATIVE_mpn_addlsh_n_ip1
904 #define mpn_addlsh_n_ip1(dst,src,n,s) mpn_addlsh_n(dst,dst,src,n,s)
905 #define HAVE_NATIVE_mpn_addlsh_n_ip1 1
906 #else
907 #define mpn_addlsh_n_ip1 __MPN(addlsh_n_ip1)
908 __GMP_DECLSPEC mp_limb_t mpn_addlsh_n_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int);
909 #endif
910 #if HAVE_NATIVE_mpn_addlsh_nc && ! HAVE_NATIVE_mpn_addlsh_nc_ip1
911 #define mpn_addlsh_nc_ip1(dst,src,n,s,c) mpn_addlsh_nc(dst,dst,src,n,s,c)
912 #define HAVE_NATIVE_mpn_addlsh_nc_ip1 1
913 #else
914 #define mpn_addlsh_nc_ip1 __MPN(addlsh_nc_ip1)
915 __GMP_DECLSPEC mp_limb_t mpn_addlsh_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
916 #endif
918 #ifndef mpn_sublsh1_n /* if not done with cpuvec in a fat binary */
919 /* mpn_sublsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-2*{b,n}, and
920 returns the borrow out (0, 1 or 2). Use _ip1 when a=c. */
921 #define mpn_sublsh1_n __MPN(sublsh1_n)
922 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
923 #endif
924 #define mpn_sublsh1_nc __MPN(sublsh1_nc)
925 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
926 #if HAVE_NATIVE_mpn_sublsh1_n && ! HAVE_NATIVE_mpn_sublsh1_n_ip1
927 #define mpn_sublsh1_n_ip1(dst,src,n) mpn_sublsh1_n(dst,dst,src,n)
928 #define HAVE_NATIVE_mpn_sublsh1_n_ip1 1
929 #else
930 #define mpn_sublsh1_n_ip1 __MPN(sublsh1_n_ip1)
931 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
932 #endif
933 #if HAVE_NATIVE_mpn_sublsh1_nc && ! HAVE_NATIVE_mpn_sublsh1_nc_ip1
934 #define mpn_sublsh1_nc_ip1(dst,src,n,c) mpn_sublsh1_nc(dst,dst,src,n,c)
935 #define HAVE_NATIVE_mpn_sublsh1_nc_ip1 1
936 #else
937 #define mpn_sublsh1_nc_ip1 __MPN(sublsh1_nc_ip1)
938 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
939 #endif
941 /* mpn_rsblsh1_n(c,a,b,n), when it exists, sets {c,n} to 2*{b,n}-{a,n}, and
942 returns the carry out (-1, 0, 1). */
943 #define mpn_rsblsh1_n __MPN(rsblsh1_n)
944 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
945 #define mpn_rsblsh1_nc __MPN(rsblsh1_nc)
946 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh1_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
948 /* mpn_sublsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-4*{b,n}, and
949 returns the borrow out (0, ..., 4). Use _ip1 when a=c. */
950 #define mpn_sublsh2_n __MPN(sublsh2_n)
951 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
952 #define mpn_sublsh2_nc __MPN(sublsh2_nc)
953 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
954 #if HAVE_NATIVE_mpn_sublsh2_n && ! HAVE_NATIVE_mpn_sublsh2_n_ip1
955 #define mpn_sublsh2_n_ip1(dst,src,n) mpn_sublsh2_n(dst,dst,src,n)
956 #define HAVE_NATIVE_mpn_sublsh2_n_ip1 1
957 #else
958 #define mpn_sublsh2_n_ip1 __MPN(sublsh2_n_ip1)
959 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
960 #endif
961 #if HAVE_NATIVE_mpn_sublsh2_nc && ! HAVE_NATIVE_mpn_sublsh2_nc_ip1
962 #define mpn_sublsh2_nc_ip1(dst,src,n,c) mpn_sublsh2_nc(dst,dst,src,n,c)
963 #define HAVE_NATIVE_mpn_sublsh2_nc_ip1 1
964 #else
965 #define mpn_sublsh2_nc_ip1 __MPN(sublsh2_nc_ip1)
966 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
967 #endif
969 /* mpn_sublsh_n(c,a,b,n,k), when it exists, sets {c,n} to {a,n}-2^k*{b,n}, and
970 returns the carry out (0, ..., 2^k). Use _ip1 when a=c. */
971 #define mpn_sublsh_n __MPN(sublsh_n)
972 __GMP_DECLSPEC mp_limb_t mpn_sublsh_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int);
973 #if HAVE_NATIVE_mpn_sublsh_n && ! HAVE_NATIVE_mpn_sublsh_n_ip1
974 #define mpn_sublsh_n_ip1(dst,src,n,s) mpn_sublsh_n(dst,dst,src,n,s)
975 #define HAVE_NATIVE_mpn_sublsh_n_ip1 1
976 #else
977 #define mpn_sublsh_n_ip1 __MPN(sublsh_n_ip1)
978 __GMP_DECLSPEC mp_limb_t mpn_sublsh_n_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int);
979 #endif
980 #if HAVE_NATIVE_mpn_sublsh_nc && ! HAVE_NATIVE_mpn_sublsh_nc_ip1
981 #define mpn_sublsh_nc_ip1(dst,src,n,s,c) mpn_sublsh_nc(dst,dst,src,n,s,c)
982 #define HAVE_NATIVE_mpn_sublsh_nc_ip1 1
983 #else
984 #define mpn_sublsh_nc_ip1 __MPN(sublsh_nc_ip1)
985 __GMP_DECLSPEC mp_limb_t mpn_sublsh_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
986 #endif
988 /* mpn_rsblsh2_n(c,a,b,n), when it exists, sets {c,n} to 4*{b,n}-{a,n}, and
989 returns the carry out (-1, ..., 3). */
990 #define mpn_rsblsh2_n __MPN(rsblsh2_n)
991 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
992 #define mpn_rsblsh2_nc __MPN(rsblsh2_nc)
993 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh2_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
995 /* mpn_rsblsh_n(c,a,b,n,k), when it exists, sets {c,n} to 2^k*{b,n}-{a,n}, and
996 returns the carry out (-1, 0, ..., 2^k-1). */
997 #define mpn_rsblsh_n __MPN(rsblsh_n)
998 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int);
999 #define mpn_rsblsh_nc __MPN(rsblsh_nc)
1000 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
1002 /* mpn_rsh1add_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} + {b,n}) >> 1,
1003 and returns the bit rshifted out (0 or 1). */
1004 #define mpn_rsh1add_n __MPN(rsh1add_n)
1005 __GMP_DECLSPEC mp_limb_t mpn_rsh1add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1006 #define mpn_rsh1add_nc __MPN(rsh1add_nc)
1007 __GMP_DECLSPEC mp_limb_t mpn_rsh1add_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1009 /* mpn_rsh1sub_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} - {b,n}) >> 1,
1010 and returns the bit rshifted out (0 or 1). If there's a borrow from the
1011 subtract, it's stored as a 1 in the high bit of c[n-1], like a twos
1012 complement negative. */
1013 #define mpn_rsh1sub_n __MPN(rsh1sub_n)
1014 __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1015 #define mpn_rsh1sub_nc __MPN(rsh1sub_nc)
1016 __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1018 #ifndef mpn_lshiftc /* if not done with cpuvec in a fat binary */
1019 #define mpn_lshiftc __MPN(lshiftc)
1020 __GMP_DECLSPEC mp_limb_t mpn_lshiftc (mp_ptr, mp_srcptr, mp_size_t, unsigned int);
1021 #endif
1023 #define mpn_add_err1_n __MPN(add_err1_n)
1024 __GMP_DECLSPEC mp_limb_t mpn_add_err1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1026 #define mpn_add_err2_n __MPN(add_err2_n)
1027 __GMP_DECLSPEC mp_limb_t mpn_add_err2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1029 #define mpn_add_err3_n __MPN(add_err3_n)
1030 __GMP_DECLSPEC mp_limb_t mpn_add_err3_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1032 #define mpn_sub_err1_n __MPN(sub_err1_n)
1033 __GMP_DECLSPEC mp_limb_t mpn_sub_err1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1035 #define mpn_sub_err2_n __MPN(sub_err2_n)
1036 __GMP_DECLSPEC mp_limb_t mpn_sub_err2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1038 #define mpn_sub_err3_n __MPN(sub_err3_n)
1039 __GMP_DECLSPEC mp_limb_t mpn_sub_err3_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1041 #define mpn_add_n_sub_n __MPN(add_n_sub_n)
1042 __GMP_DECLSPEC mp_limb_t mpn_add_n_sub_n (mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1044 #define mpn_add_n_sub_nc __MPN(add_n_sub_nc)
1045 __GMP_DECLSPEC mp_limb_t mpn_add_n_sub_nc (mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1047 #define mpn_addaddmul_1msb0 __MPN(addaddmul_1msb0)
1048 __GMP_DECLSPEC mp_limb_t mpn_addaddmul_1msb0 (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1050 #define mpn_divrem_1c __MPN(divrem_1c)
1051 __GMP_DECLSPEC mp_limb_t mpn_divrem_1c (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1053 #define mpn_dump __MPN(dump)
1054 __GMP_DECLSPEC void mpn_dump (mp_srcptr, mp_size_t);
1056 #define mpn_fib2_ui __MPN(fib2_ui)
1057 __GMP_DECLSPEC mp_size_t mpn_fib2_ui (mp_ptr, mp_ptr, unsigned long);
1059 /* Remap names of internal mpn functions. */
1060 #define __clz_tab __MPN(clz_tab)
1061 #define mpn_udiv_w_sdiv __MPN(udiv_w_sdiv)
1063 #define mpn_jacobi_base __MPN(jacobi_base)
1064 __GMP_DECLSPEC int mpn_jacobi_base (mp_limb_t, mp_limb_t, int) ATTRIBUTE_CONST;
1066 #define mpn_jacobi_2 __MPN(jacobi_2)
1067 __GMP_DECLSPEC int mpn_jacobi_2 (mp_srcptr, mp_srcptr, unsigned);
1069 #define mpn_jacobi_n __MPN(jacobi_n)
1070 __GMP_DECLSPEC int mpn_jacobi_n (mp_ptr, mp_ptr, mp_size_t, unsigned);
1072 #define mpn_mod_1c __MPN(mod_1c)
1073 __GMP_DECLSPEC mp_limb_t mpn_mod_1c (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) __GMP_ATTRIBUTE_PURE;
1075 #define mpn_mul_1c __MPN(mul_1c)
1076 __GMP_DECLSPEC mp_limb_t mpn_mul_1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1078 #define mpn_mul_2 __MPN(mul_2)
1079 __GMP_DECLSPEC mp_limb_t mpn_mul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1081 #define mpn_mul_3 __MPN(mul_3)
1082 __GMP_DECLSPEC mp_limb_t mpn_mul_3 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1084 #define mpn_mul_4 __MPN(mul_4)
1085 __GMP_DECLSPEC mp_limb_t mpn_mul_4 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1087 #define mpn_mul_5 __MPN(mul_5)
1088 __GMP_DECLSPEC mp_limb_t mpn_mul_5 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1090 #define mpn_mul_6 __MPN(mul_6)
1091 __GMP_DECLSPEC mp_limb_t mpn_mul_6 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1093 #ifndef mpn_mul_basecase /* if not done with cpuvec in a fat binary */
1094 #define mpn_mul_basecase __MPN(mul_basecase)
1095 __GMP_DECLSPEC void mpn_mul_basecase (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1096 #endif
1098 #define mpn_mullo_n __MPN(mullo_n)
1099 __GMP_DECLSPEC void mpn_mullo_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1101 #ifndef mpn_mullo_basecase /* if not done with cpuvec in a fat binary */
1102 #define mpn_mullo_basecase __MPN(mullo_basecase)
1103 __GMP_DECLSPEC void mpn_mullo_basecase (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1104 #endif
1106 #ifndef mpn_sqr_basecase /* if not done with cpuvec in a fat binary */
1107 #define mpn_sqr_basecase __MPN(sqr_basecase)
1108 __GMP_DECLSPEC void mpn_sqr_basecase (mp_ptr, mp_srcptr, mp_size_t);
1109 #endif
1111 #define mpn_mulmid_basecase __MPN(mulmid_basecase)
1112 __GMP_DECLSPEC void mpn_mulmid_basecase (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1114 #define mpn_mulmid_n __MPN(mulmid_n)
1115 __GMP_DECLSPEC void mpn_mulmid_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1117 #define mpn_mulmid __MPN(mulmid)
1118 __GMP_DECLSPEC void mpn_mulmid (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1120 #define mpn_submul_1c __MPN(submul_1c)
1121 __GMP_DECLSPEC mp_limb_t mpn_submul_1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1123 #ifndef mpn_redc_1 /* if not done with cpuvec in a fat binary */
1124 #define mpn_redc_1 __MPN(redc_1)
1125 __GMP_DECLSPEC mp_limb_t mpn_redc_1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1126 #endif
1128 #ifndef mpn_redc_2 /* if not done with cpuvec in a fat binary */
1129 #define mpn_redc_2 __MPN(redc_2)
1130 __GMP_DECLSPEC mp_limb_t mpn_redc_2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1131 #endif
1133 #define mpn_redc_n __MPN(redc_n)
1134 __GMP_DECLSPEC void mpn_redc_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1137 #ifndef mpn_mod_1_1p_cps /* if not done with cpuvec in a fat binary */
1138 #define mpn_mod_1_1p_cps __MPN(mod_1_1p_cps)
1139 __GMP_DECLSPEC void mpn_mod_1_1p_cps (mp_limb_t [4], mp_limb_t);
1140 #endif
1141 #ifndef mpn_mod_1_1p /* if not done with cpuvec in a fat binary */
1142 #define mpn_mod_1_1p __MPN(mod_1_1p)
1143 __GMP_DECLSPEC mp_limb_t mpn_mod_1_1p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [4]) __GMP_ATTRIBUTE_PURE;
1144 #endif
1146 #ifndef mpn_mod_1s_2p_cps /* if not done with cpuvec in a fat binary */
1147 #define mpn_mod_1s_2p_cps __MPN(mod_1s_2p_cps)
1148 __GMP_DECLSPEC void mpn_mod_1s_2p_cps (mp_limb_t [5], mp_limb_t);
1149 #endif
1150 #ifndef mpn_mod_1s_2p /* if not done with cpuvec in a fat binary */
1151 #define mpn_mod_1s_2p __MPN(mod_1s_2p)
1152 __GMP_DECLSPEC mp_limb_t mpn_mod_1s_2p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [5]) __GMP_ATTRIBUTE_PURE;
1153 #endif
1155 #ifndef mpn_mod_1s_3p_cps /* if not done with cpuvec in a fat binary */
1156 #define mpn_mod_1s_3p_cps __MPN(mod_1s_3p_cps)
1157 __GMP_DECLSPEC void mpn_mod_1s_3p_cps (mp_limb_t [6], mp_limb_t);
1158 #endif
1159 #ifndef mpn_mod_1s_3p /* if not done with cpuvec in a fat binary */
1160 #define mpn_mod_1s_3p __MPN(mod_1s_3p)
1161 __GMP_DECLSPEC mp_limb_t mpn_mod_1s_3p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [6]) __GMP_ATTRIBUTE_PURE;
1162 #endif
1164 #ifndef mpn_mod_1s_4p_cps /* if not done with cpuvec in a fat binary */
1165 #define mpn_mod_1s_4p_cps __MPN(mod_1s_4p_cps)
1166 __GMP_DECLSPEC void mpn_mod_1s_4p_cps (mp_limb_t [7], mp_limb_t);
1167 #endif
1168 #ifndef mpn_mod_1s_4p /* if not done with cpuvec in a fat binary */
1169 #define mpn_mod_1s_4p __MPN(mod_1s_4p)
1170 __GMP_DECLSPEC mp_limb_t mpn_mod_1s_4p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [7]) __GMP_ATTRIBUTE_PURE;
1171 #endif
1173 #define mpn_bc_mulmod_bnm1 __MPN(bc_mulmod_bnm1)
1174 __GMP_DECLSPEC void mpn_bc_mulmod_bnm1 (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
1175 #define mpn_mulmod_bnm1 __MPN(mulmod_bnm1)
1176 __GMP_DECLSPEC void mpn_mulmod_bnm1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1177 #define mpn_mulmod_bnm1_next_size __MPN(mulmod_bnm1_next_size)
1178 __GMP_DECLSPEC mp_size_t mpn_mulmod_bnm1_next_size (mp_size_t) ATTRIBUTE_CONST;
1179 static inline mp_size_t
1180 mpn_mulmod_bnm1_itch (mp_size_t rn, mp_size_t an, mp_size_t bn) {
1181 mp_size_t n, itch;
1182 n = rn >> 1;
1183 itch = rn + 4 +
1184 (an > n ? (bn > n ? rn : n) : 0);
1185 return itch;
1188 #define mpn_sqrmod_bnm1 __MPN(sqrmod_bnm1)
1189 __GMP_DECLSPEC void mpn_sqrmod_bnm1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1190 #define mpn_sqrmod_bnm1_next_size __MPN(sqrmod_bnm1_next_size)
1191 __GMP_DECLSPEC mp_size_t mpn_sqrmod_bnm1_next_size (mp_size_t) ATTRIBUTE_CONST;
1192 static inline mp_size_t
1193 mpn_sqrmod_bnm1_itch (mp_size_t rn, mp_size_t an) {
1194 mp_size_t n, itch;
1195 n = rn >> 1;
1196 itch = rn + 3 +
1197 (an > n ? an : 0);
1198 return itch;
1201 typedef __gmp_randstate_struct *gmp_randstate_ptr;
1202 typedef const __gmp_randstate_struct *gmp_randstate_srcptr;
1204 /* Pseudo-random number generator function pointers structure. */
1205 typedef struct {
1206 void (*randseed_fn) (gmp_randstate_t, mpz_srcptr);
1207 void (*randget_fn) (gmp_randstate_t, mp_ptr, unsigned long int);
1208 void (*randclear_fn) (gmp_randstate_t);
1209 void (*randiset_fn) (gmp_randstate_ptr, gmp_randstate_srcptr);
1210 } gmp_randfnptr_t;
1212 /* Macro to obtain a void pointer to the function pointers structure. */
1213 #define RNG_FNPTR(rstate) ((rstate)->_mp_algdata._mp_lc)
1215 /* Macro to obtain a pointer to the generator's state.
1216 When used as a lvalue the rvalue needs to be cast to mp_ptr. */
1217 #define RNG_STATE(rstate) ((rstate)->_mp_seed->_mp_d)
1219 /* Write a given number of random bits to rp. */
1220 #define _gmp_rand(rp, state, bits) \
1221 do { \
1222 gmp_randstate_ptr __rstate = (state); \
1223 (*((gmp_randfnptr_t *) RNG_FNPTR (__rstate))->randget_fn) \
1224 (__rstate, rp, bits); \
1225 } while (0)
1227 __GMP_DECLSPEC void __gmp_randinit_mt_noseed (gmp_randstate_t);
1230 /* __gmp_rands is the global state for the old-style random functions, and
1231 is also used in the test programs (hence the __GMP_DECLSPEC).
1233 There's no seeding here, so mpz_random etc will generate the same
1234 sequence every time. This is not unlike the C library random functions
1235 if you don't seed them, so perhaps it's acceptable. Digging up a seed
1236 from /dev/random or the like would work on many systems, but might
1237 encourage a false confidence, since it'd be pretty much impossible to do
1238 something that would work reliably everywhere. In any case the new style
1239 functions are recommended to applications which care about randomness, so
1240 the old functions aren't too important. */
1242 __GMP_DECLSPEC extern char __gmp_rands_initialized;
1243 __GMP_DECLSPEC extern gmp_randstate_t __gmp_rands;
1245 #define RANDS \
1246 ((__gmp_rands_initialized ? 0 \
1247 : (__gmp_rands_initialized = 1, \
1248 __gmp_randinit_mt_noseed (__gmp_rands), 0)), \
1249 __gmp_rands)
1251 /* this is used by the test programs, to free memory */
1252 #define RANDS_CLEAR() \
1253 do { \
1254 if (__gmp_rands_initialized) \
1256 __gmp_rands_initialized = 0; \
1257 gmp_randclear (__gmp_rands); \
1259 } while (0)
1262 /* For a threshold between algorithms A and B, size>=thresh is where B
1263 should be used. Special value MP_SIZE_T_MAX means only ever use A, or
1264 value 0 means only ever use B. The tests for these special values will
1265 be compile-time constants, so the compiler should be able to eliminate
1266 the code for the unwanted algorithm. */
1268 #if ! defined (__GNUC__) || __GNUC__ < 2
1269 #define ABOVE_THRESHOLD(size,thresh) \
1270 ((thresh) == 0 \
1271 || ((thresh) != MP_SIZE_T_MAX \
1272 && (size) >= (thresh)))
1273 #else
1274 #define ABOVE_THRESHOLD(size,thresh) \
1275 ((__builtin_constant_p (thresh) && (thresh) == 0) \
1276 || (!(__builtin_constant_p (thresh) && (thresh) == MP_SIZE_T_MAX) \
1277 && (size) >= (thresh)))
1278 #endif
1279 #define BELOW_THRESHOLD(size,thresh) (! ABOVE_THRESHOLD (size, thresh))
1281 #define MPN_TOOM22_MUL_MINSIZE 4
1282 #define MPN_TOOM2_SQR_MINSIZE 4
1284 #define MPN_TOOM33_MUL_MINSIZE 17
1285 #define MPN_TOOM3_SQR_MINSIZE 17
1287 #define MPN_TOOM44_MUL_MINSIZE 30
1288 #define MPN_TOOM4_SQR_MINSIZE 30
1290 #define MPN_TOOM6H_MUL_MINSIZE 46
1291 #define MPN_TOOM6_SQR_MINSIZE 46
1293 #define MPN_TOOM8H_MUL_MINSIZE 86
1294 #define MPN_TOOM8_SQR_MINSIZE 86
1296 #define MPN_TOOM32_MUL_MINSIZE 10
1297 #define MPN_TOOM42_MUL_MINSIZE 10
1298 #define MPN_TOOM43_MUL_MINSIZE 25
1299 #define MPN_TOOM53_MUL_MINSIZE 17
1300 #define MPN_TOOM54_MUL_MINSIZE 31
1301 #define MPN_TOOM63_MUL_MINSIZE 49
1303 #define MPN_TOOM42_MULMID_MINSIZE 4
1305 #define mpn_sqr_diagonal __MPN(sqr_diagonal)
1306 __GMP_DECLSPEC void mpn_sqr_diagonal (mp_ptr, mp_srcptr, mp_size_t);
1308 #define mpn_sqr_diag_addlsh1 __MPN(sqr_diag_addlsh1)
1309 __GMP_DECLSPEC void mpn_sqr_diag_addlsh1 (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1311 #define mpn_toom_interpolate_5pts __MPN(toom_interpolate_5pts)
1312 __GMP_DECLSPEC void mpn_toom_interpolate_5pts (mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_limb_t);
1314 enum toom6_flags {toom6_all_pos = 0, toom6_vm1_neg = 1, toom6_vm2_neg = 2};
1315 #define mpn_toom_interpolate_6pts __MPN(toom_interpolate_6pts)
1316 __GMP_DECLSPEC void mpn_toom_interpolate_6pts (mp_ptr, mp_size_t, enum toom6_flags, mp_ptr, mp_ptr, mp_ptr, mp_size_t);
1318 enum toom7_flags { toom7_w1_neg = 1, toom7_w3_neg = 2 };
1319 #define mpn_toom_interpolate_7pts __MPN(toom_interpolate_7pts)
1320 __GMP_DECLSPEC void mpn_toom_interpolate_7pts (mp_ptr, mp_size_t, enum toom7_flags, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
1322 #define mpn_toom_interpolate_8pts __MPN(toom_interpolate_8pts)
1323 __GMP_DECLSPEC void mpn_toom_interpolate_8pts (mp_ptr, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
1325 #define mpn_toom_interpolate_12pts __MPN(toom_interpolate_12pts)
1326 __GMP_DECLSPEC void mpn_toom_interpolate_12pts (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr);
1328 #define mpn_toom_interpolate_16pts __MPN(toom_interpolate_16pts)
1329 __GMP_DECLSPEC void mpn_toom_interpolate_16pts (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr);
1331 #define mpn_toom_couple_handling __MPN(toom_couple_handling)
1332 __GMP_DECLSPEC void mpn_toom_couple_handling (mp_ptr, mp_size_t, mp_ptr, int, mp_size_t, int, int);
1334 #define mpn_toom_eval_dgr3_pm1 __MPN(toom_eval_dgr3_pm1)
1335 __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1337 #define mpn_toom_eval_dgr3_pm2 __MPN(toom_eval_dgr3_pm2)
1338 __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1340 #define mpn_toom_eval_pm1 __MPN(toom_eval_pm1)
1341 __GMP_DECLSPEC int mpn_toom_eval_pm1 (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1343 #define mpn_toom_eval_pm2 __MPN(toom_eval_pm2)
1344 __GMP_DECLSPEC int mpn_toom_eval_pm2 (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1346 #define mpn_toom_eval_pm2exp __MPN(toom_eval_pm2exp)
1347 __GMP_DECLSPEC int mpn_toom_eval_pm2exp (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr);
1349 #define mpn_toom_eval_pm2rexp __MPN(toom_eval_pm2rexp)
1350 __GMP_DECLSPEC int mpn_toom_eval_pm2rexp (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr);
1352 #define mpn_toom22_mul __MPN(toom22_mul)
1353 __GMP_DECLSPEC void mpn_toom22_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1355 #define mpn_toom32_mul __MPN(toom32_mul)
1356 __GMP_DECLSPEC void mpn_toom32_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1358 #define mpn_toom42_mul __MPN(toom42_mul)
1359 __GMP_DECLSPEC void mpn_toom42_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1361 #define mpn_toom52_mul __MPN(toom52_mul)
1362 __GMP_DECLSPEC void mpn_toom52_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1364 #define mpn_toom62_mul __MPN(toom62_mul)
1365 __GMP_DECLSPEC void mpn_toom62_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1367 #define mpn_toom2_sqr __MPN(toom2_sqr)
1368 __GMP_DECLSPEC void mpn_toom2_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1370 #define mpn_toom33_mul __MPN(toom33_mul)
1371 __GMP_DECLSPEC void mpn_toom33_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1373 #define mpn_toom43_mul __MPN(toom43_mul)
1374 __GMP_DECLSPEC void mpn_toom43_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1376 #define mpn_toom53_mul __MPN(toom53_mul)
1377 __GMP_DECLSPEC void mpn_toom53_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1379 #define mpn_toom54_mul __MPN(toom54_mul)
1380 __GMP_DECLSPEC void mpn_toom54_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1382 #define mpn_toom63_mul __MPN(toom63_mul)
1383 __GMP_DECLSPEC void mpn_toom63_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1385 #define mpn_toom3_sqr __MPN(toom3_sqr)
1386 __GMP_DECLSPEC void mpn_toom3_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1388 #define mpn_toom44_mul __MPN(toom44_mul)
1389 __GMP_DECLSPEC void mpn_toom44_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1391 #define mpn_toom4_sqr __MPN(toom4_sqr)
1392 __GMP_DECLSPEC void mpn_toom4_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1394 #define mpn_toom6h_mul __MPN(toom6h_mul)
1395 __GMP_DECLSPEC void mpn_toom6h_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1397 #define mpn_toom6_sqr __MPN(toom6_sqr)
1398 __GMP_DECLSPEC void mpn_toom6_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1400 #define mpn_toom8h_mul __MPN(toom8h_mul)
1401 __GMP_DECLSPEC void mpn_toom8h_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1403 #define mpn_toom8_sqr __MPN(toom8_sqr)
1404 __GMP_DECLSPEC void mpn_toom8_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1406 #define mpn_toom42_mulmid __MPN(toom42_mulmid)
1407 __GMP_DECLSPEC void mpn_toom42_mulmid (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
1409 #define mpn_fft_best_k __MPN(fft_best_k)
1410 __GMP_DECLSPEC int mpn_fft_best_k (mp_size_t, int) ATTRIBUTE_CONST;
1412 #define mpn_mul_fft __MPN(mul_fft)
1413 __GMP_DECLSPEC mp_limb_t mpn_mul_fft (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int);
1415 #define mpn_mul_fft_full __MPN(mul_fft_full)
1416 __GMP_DECLSPEC void mpn_mul_fft_full (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1418 #define mpn_nussbaumer_mul __MPN(nussbaumer_mul)
1419 __GMP_DECLSPEC void mpn_nussbaumer_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1421 #define mpn_fft_next_size __MPN(fft_next_size)
1422 __GMP_DECLSPEC mp_size_t mpn_fft_next_size (mp_size_t, int) ATTRIBUTE_CONST;
1424 #define mpn_div_qr_1n_pi1 __MPN(div_qr_1n_pi1)
1425 __GMP_DECLSPEC mp_limb_t mpn_div_qr_1n_pi1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, mp_limb_t);
1427 #define mpn_div_qr_2n_pi1 __MPN(div_qr_2n_pi1)
1428 __GMP_DECLSPEC mp_limb_t mpn_div_qr_2n_pi1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, mp_limb_t);
1430 #define mpn_div_qr_2u_pi1 __MPN(div_qr_2u_pi1)
1431 __GMP_DECLSPEC mp_limb_t mpn_div_qr_2u_pi1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int, mp_limb_t);
1433 #define mpn_sbpi1_div_qr __MPN(sbpi1_div_qr)
1434 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1436 #define mpn_sbpi1_div_q __MPN(sbpi1_div_q)
1437 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1439 #define mpn_sbpi1_divappr_q __MPN(sbpi1_divappr_q)
1440 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_divappr_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1442 #define mpn_dcpi1_div_qr __MPN(dcpi1_div_qr)
1443 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *);
1444 #define mpn_dcpi1_div_qr_n __MPN(dcpi1_div_qr_n)
1445 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr);
1447 #define mpn_dcpi1_div_q __MPN(dcpi1_div_q)
1448 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *);
1450 #define mpn_dcpi1_divappr_q __MPN(dcpi1_divappr_q)
1451 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *);
1452 #define mpn_dcpi1_divappr_q_n __MPN(dcpi1_divappr_q_n)
1453 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr);
1455 #define mpn_mu_div_qr __MPN(mu_div_qr)
1456 __GMP_DECLSPEC mp_limb_t mpn_mu_div_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1457 #define mpn_mu_div_qr_itch __MPN(mu_div_qr_itch)
1458 __GMP_DECLSPEC mp_size_t mpn_mu_div_qr_itch (mp_size_t, mp_size_t, int) ATTRIBUTE_CONST;
1459 #define mpn_mu_div_qr_choose_in __MPN(mu_div_qr_choose_in)
1460 __GMP_DECLSPEC mp_size_t mpn_mu_div_qr_choose_in (mp_size_t, mp_size_t, int);
1462 #define mpn_preinv_mu_div_qr __MPN(preinv_mu_div_qr)
1463 __GMP_DECLSPEC mp_limb_t mpn_preinv_mu_div_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1464 #define mpn_preinv_mu_div_qr_itch __MPN(preinv_mu_div_qr_itch)
1465 __GMP_DECLSPEC mp_size_t mpn_preinv_mu_div_qr_itch (mp_size_t, mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1467 #define mpn_mu_divappr_q __MPN(mu_divappr_q)
1468 __GMP_DECLSPEC mp_limb_t mpn_mu_divappr_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1469 #define mpn_mu_divappr_q_itch __MPN(mu_divappr_q_itch)
1470 __GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_itch (mp_size_t, mp_size_t, int) ATTRIBUTE_CONST;
1471 #define mpn_mu_divappr_q_choose_in __MPN(mu_divappr_q_choose_in)
1472 __GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_choose_in (mp_size_t, mp_size_t, int);
1474 #define mpn_preinv_mu_divappr_q __MPN(preinv_mu_divappr_q)
1475 __GMP_DECLSPEC mp_limb_t mpn_preinv_mu_divappr_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1477 #define mpn_mu_div_q __MPN(mu_div_q)
1478 __GMP_DECLSPEC mp_limb_t mpn_mu_div_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1479 #define mpn_mu_div_q_itch __MPN(mu_div_q_itch)
1480 __GMP_DECLSPEC mp_size_t mpn_mu_div_q_itch (mp_size_t, mp_size_t, int) ATTRIBUTE_CONST;
1482 #define mpn_div_q __MPN(div_q)
1483 __GMP_DECLSPEC void mpn_div_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1485 #define mpn_invert __MPN(invert)
1486 __GMP_DECLSPEC void mpn_invert (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1487 #define mpn_invert_itch(n) mpn_invertappr_itch(n)
1489 #define mpn_ni_invertappr __MPN(ni_invertappr)
1490 __GMP_DECLSPEC mp_limb_t mpn_ni_invertappr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1491 #define mpn_invertappr __MPN(invertappr)
1492 __GMP_DECLSPEC mp_limb_t mpn_invertappr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1493 #define mpn_invertappr_itch(n) (3 * (n) + 2)
1495 #define mpn_binvert __MPN(binvert)
1496 __GMP_DECLSPEC void mpn_binvert (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1497 #define mpn_binvert_itch __MPN(binvert_itch)
1498 __GMP_DECLSPEC mp_size_t mpn_binvert_itch (mp_size_t) ATTRIBUTE_CONST;
1500 #define mpn_bdiv_q_1 __MPN(bdiv_q_1)
1501 __GMP_DECLSPEC mp_limb_t mpn_bdiv_q_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1503 #define mpn_pi1_bdiv_q_1 __MPN(pi1_bdiv_q_1)
1504 __GMP_DECLSPEC mp_limb_t mpn_pi1_bdiv_q_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int);
1506 #define mpn_sbpi1_bdiv_qr __MPN(sbpi1_bdiv_qr)
1507 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_bdiv_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1509 #define mpn_sbpi1_bdiv_q __MPN(sbpi1_bdiv_q)
1510 __GMP_DECLSPEC void mpn_sbpi1_bdiv_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1512 #define mpn_dcpi1_bdiv_qr __MPN(dcpi1_bdiv_qr)
1513 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1514 #define mpn_dcpi1_bdiv_qr_n_itch __MPN(dcpi1_bdiv_qr_n_itch)
1515 __GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_qr_n_itch (mp_size_t) ATTRIBUTE_CONST;
1517 #define mpn_dcpi1_bdiv_qr_n __MPN(dcpi1_bdiv_qr_n)
1518 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1519 #define mpn_dcpi1_bdiv_q __MPN(dcpi1_bdiv_q)
1520 __GMP_DECLSPEC void mpn_dcpi1_bdiv_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1522 #define mpn_dcpi1_bdiv_q_n __MPN(dcpi1_bdiv_q_n)
1523 __GMP_DECLSPEC void mpn_dcpi1_bdiv_q_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1524 #define mpn_dcpi1_bdiv_q_n_itch __MPN(dcpi1_bdiv_q_n_itch)
1525 __GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_q_n_itch (mp_size_t) ATTRIBUTE_CONST;
1527 #define mpn_mu_bdiv_qr __MPN(mu_bdiv_qr)
1528 __GMP_DECLSPEC mp_limb_t mpn_mu_bdiv_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1529 #define mpn_mu_bdiv_qr_itch __MPN(mu_bdiv_qr_itch)
1530 __GMP_DECLSPEC mp_size_t mpn_mu_bdiv_qr_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1532 #define mpn_mu_bdiv_q __MPN(mu_bdiv_q)
1533 __GMP_DECLSPEC void mpn_mu_bdiv_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1534 #define mpn_mu_bdiv_q_itch __MPN(mu_bdiv_q_itch)
1535 __GMP_DECLSPEC mp_size_t mpn_mu_bdiv_q_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1537 #define mpn_bdiv_qr __MPN(bdiv_qr)
1538 __GMP_DECLSPEC mp_limb_t mpn_bdiv_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1539 #define mpn_bdiv_qr_itch __MPN(bdiv_qr_itch)
1540 __GMP_DECLSPEC mp_size_t mpn_bdiv_qr_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1542 #define mpn_bdiv_q __MPN(bdiv_q)
1543 __GMP_DECLSPEC void mpn_bdiv_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1544 #define mpn_bdiv_q_itch __MPN(bdiv_q_itch)
1545 __GMP_DECLSPEC mp_size_t mpn_bdiv_q_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1547 #define mpn_divexact __MPN(divexact)
1548 __GMP_DECLSPEC void mpn_divexact (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1549 #define mpn_divexact_itch __MPN(divexact_itch)
1550 __GMP_DECLSPEC mp_size_t mpn_divexact_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1552 #ifndef mpn_bdiv_dbm1c /* if not done with cpuvec in a fat binary */
1553 #define mpn_bdiv_dbm1c __MPN(bdiv_dbm1c)
1554 __GMP_DECLSPEC mp_limb_t mpn_bdiv_dbm1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1555 #endif
1557 #define mpn_bdiv_dbm1(dst, src, size, divisor) \
1558 mpn_bdiv_dbm1c (dst, src, size, divisor, __GMP_CAST (mp_limb_t, 0))
1560 #define mpn_powm __MPN(powm)
1561 __GMP_DECLSPEC void mpn_powm (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1562 #define mpn_powlo __MPN(powlo)
1563 __GMP_DECLSPEC void mpn_powlo (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1565 #define mpn_sec_pi1_div_qr __MPN(sec_pi1_div_qr)
1566 __GMP_DECLSPEC mp_limb_t mpn_sec_pi1_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1567 #define mpn_sec_pi1_div_r __MPN(sec_pi1_div_r)
1568 __GMP_DECLSPEC void mpn_sec_pi1_div_r (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1571 /* Override mpn_addlsh1_n, mpn_addlsh2_n, mpn_sublsh1_n, etc with mpn_addlsh_n,
1572 etc when !HAVE_NATIVE the former but HAVE_NATIVE_ the latter. We then lie
1573 and say these macros represent native functions, but leave a trace by using
1574 the value 2 rather than 1. */
1576 #if HAVE_NATIVE_mpn_addlsh_n && ! HAVE_NATIVE_mpn_addlsh1_n
1577 #undef mpn_addlsh1_n
1578 #define mpn_addlsh1_n(a,b,c,d) mpn_addlsh_n(a,b,c,d,1)
1579 #define HAVE_NATIVE_mpn_addlsh1_n 2
1580 #endif
1582 #if HAVE_NATIVE_mpn_addlsh_n && ! HAVE_NATIVE_mpn_addlsh2_n
1583 #undef mpn_addlsh2_n
1584 #define mpn_addlsh2_n(a,b,c,d) mpn_addlsh_n(a,b,c,d,2)
1585 #define HAVE_NATIVE_mpn_addlsh2_n 2
1586 #endif
1588 #if HAVE_NATIVE_mpn_sublsh_n && ! HAVE_NATIVE_mpn_sublsh1_n
1589 #undef mpn_sublsh1_n
1590 #define mpn_sublsh1_n(a,b,c,d) mpn_sublsh_n(a,b,c,d,1)
1591 #define HAVE_NATIVE_mpn_sublsh1_n 2
1592 #endif
1594 #if HAVE_NATIVE_mpn_sublsh_n && ! HAVE_NATIVE_mpn_sublsh2_n
1595 #undef mpn_sublsh2_n
1596 #define mpn_sublsh2_n(a,b,c,d) mpn_sublsh_n(a,b,c,d,2)
1597 #define HAVE_NATIVE_mpn_sublsh2_n 2
1598 #endif
1600 #if HAVE_NATIVE_mpn_rsblsh_n && ! HAVE_NATIVE_mpn_rsblsh1_n
1601 #undef mpn_rsblsh1_n
1602 #define mpn_rsblsh1_n(a,b,c,d) mpn_rsblsh_n(a,b,c,d,1)
1603 #define HAVE_NATIVE_mpn_rsblsh1_n 2
1604 #endif
1606 #if HAVE_NATIVE_mpn_rsblsh_n && ! HAVE_NATIVE_mpn_rsblsh2_n
1607 #undef mpn_rsblsh2_n
1608 #define mpn_rsblsh2_n(a,b,c,d) mpn_rsblsh_n(a,b,c,d,2)
1609 #define HAVE_NATIVE_mpn_rsblsh2_n 2
1610 #endif
1613 #ifndef DIVEXACT_BY3_METHOD
1614 #if GMP_NUMB_BITS % 2 == 0 && ! defined (HAVE_NATIVE_mpn_divexact_by3c)
1615 #define DIVEXACT_BY3_METHOD 0 /* default to using mpn_bdiv_dbm1c */
1616 #else
1617 #define DIVEXACT_BY3_METHOD 1
1618 #endif
1619 #endif
1621 #if DIVEXACT_BY3_METHOD == 0
1622 #undef mpn_divexact_by3
1623 #define mpn_divexact_by3(dst,src,size) \
1624 (3 & mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3)))
1625 /* override mpn_divexact_by3c defined in gmp.h */
1627 #undef mpn_divexact_by3c
1628 #define mpn_divexact_by3c(dst,src,size,cy) \
1629 (3 & mpn_bdiv_dbm1c (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3, GMP_NUMB_MASK / 3 * cy)))
1631 #endif
1633 #if GMP_NUMB_BITS % 4 == 0
1634 #define mpn_divexact_by5(dst,src,size) \
1635 (7 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 5)))
1636 #endif
1638 #if GMP_NUMB_BITS % 3 == 0
1639 #define mpn_divexact_by7(dst,src,size) \
1640 (7 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 7)))
1641 #endif
1643 #if GMP_NUMB_BITS % 6 == 0
1644 #define mpn_divexact_by9(dst,src,size) \
1645 (15 & 7 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 9)))
1646 #endif
1648 #if GMP_NUMB_BITS % 10 == 0
1649 #define mpn_divexact_by11(dst,src,size) \
1650 (15 & 5 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 11)))
1651 #endif
1653 #if GMP_NUMB_BITS % 12 == 0
1654 #define mpn_divexact_by13(dst,src,size) \
1655 (15 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 13)))
1656 #endif
1658 #if GMP_NUMB_BITS % 4 == 0
1659 #define mpn_divexact_by15(dst,src,size) \
1660 (15 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 15)))
1661 #endif
1663 #define mpz_divexact_gcd __gmpz_divexact_gcd
1664 __GMP_DECLSPEC void mpz_divexact_gcd (mpz_ptr, mpz_srcptr, mpz_srcptr);
1666 #define mpz_prodlimbs __gmpz_prodlimbs
1667 __GMP_DECLSPEC mp_size_t mpz_prodlimbs (mpz_ptr, mp_ptr, mp_size_t);
1669 #define mpz_oddfac_1 __gmpz_oddfac_1
1670 __GMP_DECLSPEC void mpz_oddfac_1 (mpz_ptr, mp_limb_t, unsigned);
1672 #define mpz_inp_str_nowhite __gmpz_inp_str_nowhite
1673 #ifdef _GMP_H_HAVE_FILE
1674 __GMP_DECLSPEC size_t mpz_inp_str_nowhite (mpz_ptr, FILE *, int, int, size_t);
1675 #endif
1677 #define mpn_divisible_p __MPN(divisible_p)
1678 __GMP_DECLSPEC int mpn_divisible_p (mp_srcptr, mp_size_t, mp_srcptr, mp_size_t) __GMP_ATTRIBUTE_PURE;
1680 #define mpn_rootrem __MPN(rootrem)
1681 __GMP_DECLSPEC mp_size_t mpn_rootrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1683 #define mpn_broot __MPN(broot)
1684 __GMP_DECLSPEC void mpn_broot (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1686 #define mpn_broot_invm1 __MPN(broot_invm1)
1687 __GMP_DECLSPEC void mpn_broot_invm1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1689 #define mpn_brootinv __MPN(brootinv)
1690 __GMP_DECLSPEC void mpn_brootinv (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1692 #define mpn_bsqrt __MPN(bsqrt)
1693 __GMP_DECLSPEC void mpn_bsqrt (mp_ptr, mp_srcptr, mp_bitcnt_t, mp_ptr);
1695 #define mpn_bsqrtinv __MPN(bsqrtinv)
1696 __GMP_DECLSPEC int mpn_bsqrtinv (mp_ptr, mp_srcptr, mp_bitcnt_t, mp_ptr);
1698 #if defined (_CRAY)
1699 #define MPN_COPY_INCR(dst, src, n) \
1700 do { \
1701 int __i; /* Faster on some Crays with plain int */ \
1702 _Pragma ("_CRI ivdep"); \
1703 for (__i = 0; __i < (n); __i++) \
1704 (dst)[__i] = (src)[__i]; \
1705 } while (0)
1706 #endif
1708 /* used by test programs, hence __GMP_DECLSPEC */
1709 #ifndef mpn_copyi /* if not done with cpuvec in a fat binary */
1710 #define mpn_copyi __MPN(copyi)
1711 __GMP_DECLSPEC void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t);
1712 #endif
1714 #if ! defined (MPN_COPY_INCR) && HAVE_NATIVE_mpn_copyi
1715 #define MPN_COPY_INCR(dst, src, size) \
1716 do { \
1717 ASSERT ((size) >= 0); \
1718 ASSERT (MPN_SAME_OR_INCR_P (dst, src, size)); \
1719 mpn_copyi (dst, src, size); \
1720 } while (0)
1721 #endif
1723 /* Copy N limbs from SRC to DST incrementing, N==0 allowed. */
1724 #if ! defined (MPN_COPY_INCR)
1725 #define MPN_COPY_INCR(dst, src, n) \
1726 do { \
1727 ASSERT ((n) >= 0); \
1728 ASSERT (MPN_SAME_OR_INCR_P (dst, src, n)); \
1729 if ((n) != 0) \
1731 mp_size_t __n = (n) - 1; \
1732 mp_ptr __dst = (dst); \
1733 mp_srcptr __src = (src); \
1734 mp_limb_t __x; \
1735 __x = *__src++; \
1736 if (__n != 0) \
1738 do \
1740 *__dst++ = __x; \
1741 __x = *__src++; \
1743 while (--__n); \
1745 *__dst++ = __x; \
1747 } while (0)
1748 #endif
1751 #if defined (_CRAY)
1752 #define MPN_COPY_DECR(dst, src, n) \
1753 do { \
1754 int __i; /* Faster on some Crays with plain int */ \
1755 _Pragma ("_CRI ivdep"); \
1756 for (__i = (n) - 1; __i >= 0; __i--) \
1757 (dst)[__i] = (src)[__i]; \
1758 } while (0)
1759 #endif
1761 /* used by test programs, hence __GMP_DECLSPEC */
1762 #ifndef mpn_copyd /* if not done with cpuvec in a fat binary */
1763 #define mpn_copyd __MPN(copyd)
1764 __GMP_DECLSPEC void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t);
1765 #endif
1767 #if ! defined (MPN_COPY_DECR) && HAVE_NATIVE_mpn_copyd
1768 #define MPN_COPY_DECR(dst, src, size) \
1769 do { \
1770 ASSERT ((size) >= 0); \
1771 ASSERT (MPN_SAME_OR_DECR_P (dst, src, size)); \
1772 mpn_copyd (dst, src, size); \
1773 } while (0)
1774 #endif
1776 /* Copy N limbs from SRC to DST decrementing, N==0 allowed. */
1777 #if ! defined (MPN_COPY_DECR)
1778 #define MPN_COPY_DECR(dst, src, n) \
1779 do { \
1780 ASSERT ((n) >= 0); \
1781 ASSERT (MPN_SAME_OR_DECR_P (dst, src, n)); \
1782 if ((n) != 0) \
1784 mp_size_t __n = (n) - 1; \
1785 mp_ptr __dst = (dst) + __n; \
1786 mp_srcptr __src = (src) + __n; \
1787 mp_limb_t __x; \
1788 __x = *__src--; \
1789 if (__n != 0) \
1791 do \
1793 *__dst-- = __x; \
1794 __x = *__src--; \
1796 while (--__n); \
1798 *__dst-- = __x; \
1800 } while (0)
1801 #endif
1804 #ifndef MPN_COPY
1805 #define MPN_COPY(d,s,n) \
1806 do { \
1807 ASSERT (MPN_SAME_OR_SEPARATE_P (d, s, n)); \
1808 MPN_COPY_INCR (d, s, n); \
1809 } while (0)
1810 #endif
1813 /* Set {dst,size} to the limbs of {src,size} in reverse order. */
1814 #define MPN_REVERSE(dst, src, size) \
1815 do { \
1816 mp_ptr __dst = (dst); \
1817 mp_size_t __size = (size); \
1818 mp_srcptr __src = (src) + __size - 1; \
1819 mp_size_t __i; \
1820 ASSERT ((size) >= 0); \
1821 ASSERT (! MPN_OVERLAP_P (dst, size, src, size)); \
1822 CRAY_Pragma ("_CRI ivdep"); \
1823 for (__i = 0; __i < __size; __i++) \
1825 *__dst = *__src; \
1826 __dst++; \
1827 __src--; \
1829 } while (0)
1832 /* Zero n limbs at dst.
1834 For power and powerpc we want an inline stu/bdnz loop for zeroing. On
1835 ppc630 for instance this is optimal since it can sustain only 1 store per
1836 cycle.
1838 gcc 2.95.x (for powerpc64 -maix64, or powerpc32) doesn't recognise the
1839 "for" loop in the generic code below can become stu/bdnz. The do/while
1840 here helps it get to that. The same caveat about plain -mpowerpc64 mode
1841 applies here as to __GMPN_COPY_INCR in gmp.h.
1843 xlc 3.1 already generates stu/bdnz from the generic C, and does so from
1844 this loop too.
1846 Enhancement: GLIBC does some trickery with dcbz to zero whole cache lines
1847 at a time. MPN_ZERO isn't all that important in GMP, so it might be more
1848 trouble than it's worth to do the same, though perhaps a call to memset
1849 would be good when on a GNU system. */
1851 #if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
1852 #define MPN_ZERO(dst, n) \
1853 do { \
1854 ASSERT ((n) >= 0); \
1855 if ((n) != 0) \
1857 mp_ptr __dst = (dst) - 1; \
1858 mp_size_t __n = (n); \
1859 do \
1860 *++__dst = 0; \
1861 while (--__n); \
1863 } while (0)
1864 #endif
1866 #ifndef MPN_ZERO
1867 #define MPN_ZERO(dst, n) \
1868 do { \
1869 ASSERT ((n) >= 0); \
1870 if ((n) != 0) \
1872 mp_ptr __dst = (dst); \
1873 mp_size_t __n = (n); \
1874 do \
1875 *__dst++ = 0; \
1876 while (--__n); \
1878 } while (0)
1879 #endif
1882 /* On the x86s repe/scasl doesn't seem useful, since it takes many cycles to
1883 start up and would need to strip a lot of zeros before it'd be faster
1884 than a simple cmpl loop. Here are some times in cycles for
1885 std/repe/scasl/cld and cld/repe/scasl (the latter would be for stripping
1886 low zeros).
1888 std cld
1889 P5 18 16
1890 P6 46 38
1891 K6 36 13
1892 K7 21 20
1894 #ifndef MPN_NORMALIZE
1895 #define MPN_NORMALIZE(DST, NLIMBS) \
1896 do { \
1897 while ((NLIMBS) > 0) \
1899 if ((DST)[(NLIMBS) - 1] != 0) \
1900 break; \
1901 (NLIMBS)--; \
1903 } while (0)
1904 #endif
1905 #ifndef MPN_NORMALIZE_NOT_ZERO
1906 #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS) \
1907 do { \
1908 while (1) \
1910 ASSERT ((NLIMBS) >= 1); \
1911 if ((DST)[(NLIMBS) - 1] != 0) \
1912 break; \
1913 (NLIMBS)--; \
1915 } while (0)
1916 #endif
1918 /* Strip least significant zero limbs from {ptr,size} by incrementing ptr
1919 and decrementing size. low should be ptr[0], and will be the new ptr[0]
1920 on returning. The number in {ptr,size} must be non-zero, ie. size!=0 and
1921 somewhere a non-zero limb. */
1922 #define MPN_STRIP_LOW_ZEROS_NOT_ZERO(ptr, size, low) \
1923 do { \
1924 ASSERT ((size) >= 1); \
1925 ASSERT ((low) == (ptr)[0]); \
1927 while ((low) == 0) \
1929 (size)--; \
1930 ASSERT ((size) >= 1); \
1931 (ptr)++; \
1932 (low) = *(ptr); \
1934 } while (0)
1936 /* Initialize X of type mpz_t with space for NLIMBS limbs. X should be a
1937 temporary variable; it will be automatically cleared out at function
1938 return. We use __x here to make it possible to accept both mpz_ptr and
1939 mpz_t arguments. */
1940 #define MPZ_TMP_INIT(X, NLIMBS) \
1941 do { \
1942 mpz_ptr __x = (X); \
1943 ASSERT ((NLIMBS) >= 1); \
1944 __x->_mp_alloc = (NLIMBS); \
1945 __x->_mp_d = TMP_ALLOC_LIMBS (NLIMBS); \
1946 } while (0)
1948 #if WANT_ASSERT
1949 static inline void *
1950 _mpz_newalloc (mpz_ptr z, mp_size_t n)
1952 void * res = _mpz_realloc(z,n);
1953 /* If we are checking the code, force a random change to limbs. */
1954 ((mp_ptr) res)[0] = ~ ((mp_ptr) res)[ALLOC (z) - 1];
1955 return res;
1957 #else
1958 #define _mpz_newalloc _mpz_realloc
1959 #endif
1960 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1961 #define MPZ_REALLOC(z,n) (UNLIKELY ((n) > ALLOC(z)) \
1962 ? (mp_ptr) _mpz_realloc(z,n) \
1963 : PTR(z))
1964 #define MPZ_NEWALLOC(z,n) (UNLIKELY ((n) > ALLOC(z)) \
1965 ? (mp_ptr) _mpz_newalloc(z,n) \
1966 : PTR(z))
1968 #define MPZ_EQUAL_1_P(z) (SIZ(z)==1 && PTR(z)[0] == 1)
1971 /* MPN_FIB2_SIZE(n) is the size in limbs required by mpn_fib2_ui for fp and
1972 f1p.
1974 From Knuth vol 1 section 1.2.8, F[n] = phi^n/sqrt(5) rounded to the
1975 nearest integer, where phi=(1+sqrt(5))/2 is the golden ratio. So the
1976 number of bits required is n*log_2((1+sqrt(5))/2) = n*0.6942419.
1978 The multiplier used is 23/32=0.71875 for efficient calculation on CPUs
1979 without good floating point. There's +2 for rounding up, and a further
1980 +2 since at the last step x limbs are doubled into a 2x+1 limb region
1981 whereas the actual F[2k] value might be only 2x-1 limbs.
1983 Note that a division is done first, since on a 32-bit system it's at
1984 least conceivable to go right up to n==ULONG_MAX. (F[2^32-1] would be
1985 about 380Mbytes, plus temporary workspace of about 1.2Gbytes here and
1986 whatever a multiply of two 190Mbyte numbers takes.)
1988 Enhancement: When GMP_NUMB_BITS is not a power of 2 the division could be
1989 worked into the multiplier. */
1991 #define MPN_FIB2_SIZE(n) \
1992 ((mp_size_t) ((n) / 32 * 23 / GMP_NUMB_BITS) + 4)
1995 /* FIB_TABLE(n) returns the Fibonacci number F[n]. Must have n in the range
1996 -1 <= n <= FIB_TABLE_LIMIT (that constant in fib_table.h).
1998 FIB_TABLE_LUCNUM_LIMIT (in fib_table.h) is the largest n for which L[n] =
1999 F[n] + 2*F[n-1] fits in a limb. */
2001 __GMP_DECLSPEC extern const mp_limb_t __gmp_fib_table[];
2002 #define FIB_TABLE(n) (__gmp_fib_table[(n)+1])
2004 extern const mp_limb_t __gmp_oddfac_table[];
2005 extern const mp_limb_t __gmp_odd2fac_table[];
2006 extern const unsigned char __gmp_fac2cnt_table[];
2007 extern const mp_limb_t __gmp_limbroots_table[];
2009 /* n^log <= GMP_NUMB_MAX, a limb can store log factors less than n */
2010 static inline unsigned
2011 log_n_max (mp_limb_t n)
2013 unsigned log;
2014 for (log = 8; n > __gmp_limbroots_table[log - 1]; log--);
2015 return log;
2018 #define SIEVESIZE 512 /* FIXME: Allow gmp_init_primesieve to choose */
2019 typedef struct
2021 unsigned long d; /* current index in s[] */
2022 unsigned long s0; /* number corresponding to s[0] */
2023 unsigned long sqrt_s0; /* misnomer for sqrt(s[SIEVESIZE-1]) */
2024 unsigned char s[SIEVESIZE + 1]; /* sieve table */
2025 } gmp_primesieve_t;
2027 #define gmp_init_primesieve __gmp_init_primesieve
2028 __GMP_DECLSPEC void gmp_init_primesieve (gmp_primesieve_t *);
2030 #define gmp_nextprime __gmp_nextprime
2031 __GMP_DECLSPEC unsigned long int gmp_nextprime (gmp_primesieve_t *);
2033 #define gmp_primesieve __gmp_primesieve
2034 __GMP_DECLSPEC mp_limb_t gmp_primesieve (mp_ptr, mp_limb_t);
2037 #ifndef MUL_TOOM22_THRESHOLD
2038 #define MUL_TOOM22_THRESHOLD 30
2039 #endif
2041 #ifndef MUL_TOOM33_THRESHOLD
2042 #define MUL_TOOM33_THRESHOLD 100
2043 #endif
2045 #ifndef MUL_TOOM44_THRESHOLD
2046 #define MUL_TOOM44_THRESHOLD 300
2047 #endif
2049 #ifndef MUL_TOOM6H_THRESHOLD
2050 #define MUL_TOOM6H_THRESHOLD 350
2051 #endif
2053 #ifndef SQR_TOOM6_THRESHOLD
2054 #define SQR_TOOM6_THRESHOLD MUL_TOOM6H_THRESHOLD
2055 #endif
2057 #ifndef MUL_TOOM8H_THRESHOLD
2058 #define MUL_TOOM8H_THRESHOLD 450
2059 #endif
2061 #ifndef SQR_TOOM8_THRESHOLD
2062 #define SQR_TOOM8_THRESHOLD MUL_TOOM8H_THRESHOLD
2063 #endif
2065 #ifndef MUL_TOOM32_TO_TOOM43_THRESHOLD
2066 #define MUL_TOOM32_TO_TOOM43_THRESHOLD 100
2067 #endif
2069 #ifndef MUL_TOOM32_TO_TOOM53_THRESHOLD
2070 #define MUL_TOOM32_TO_TOOM53_THRESHOLD 110
2071 #endif
2073 #ifndef MUL_TOOM42_TO_TOOM53_THRESHOLD
2074 #define MUL_TOOM42_TO_TOOM53_THRESHOLD 100
2075 #endif
2077 #ifndef MUL_TOOM42_TO_TOOM63_THRESHOLD
2078 #define MUL_TOOM42_TO_TOOM63_THRESHOLD 110
2079 #endif
2081 #ifndef MUL_TOOM43_TO_TOOM54_THRESHOLD
2082 #define MUL_TOOM43_TO_TOOM54_THRESHOLD 150
2083 #endif
2085 /* MUL_TOOM22_THRESHOLD_LIMIT is the maximum for MUL_TOOM22_THRESHOLD. In a
2086 normal build MUL_TOOM22_THRESHOLD is a constant and we use that. In a fat
2087 binary or tune program build MUL_TOOM22_THRESHOLD is a variable and a
2088 separate hard limit will have been defined. Similarly for TOOM3. */
2089 #ifndef MUL_TOOM22_THRESHOLD_LIMIT
2090 #define MUL_TOOM22_THRESHOLD_LIMIT MUL_TOOM22_THRESHOLD
2091 #endif
2092 #ifndef MUL_TOOM33_THRESHOLD_LIMIT
2093 #define MUL_TOOM33_THRESHOLD_LIMIT MUL_TOOM33_THRESHOLD
2094 #endif
2095 #ifndef MULLO_BASECASE_THRESHOLD_LIMIT
2096 #define MULLO_BASECASE_THRESHOLD_LIMIT MULLO_BASECASE_THRESHOLD
2097 #endif
2099 /* SQR_BASECASE_THRESHOLD is where mpn_sqr_basecase should take over from
2100 mpn_mul_basecase. Default is to use mpn_sqr_basecase from 0. (Note that we
2101 certainly always want it if there's a native assembler mpn_sqr_basecase.)
2103 If it turns out that mpn_toom2_sqr becomes faster than mpn_mul_basecase
2104 before mpn_sqr_basecase does, then SQR_BASECASE_THRESHOLD is the toom2
2105 threshold and SQR_TOOM2_THRESHOLD is 0. This oddity arises more or less
2106 because SQR_TOOM2_THRESHOLD represents the size up to which mpn_sqr_basecase
2107 should be used, and that may be never. */
2109 #ifndef SQR_BASECASE_THRESHOLD
2110 #define SQR_BASECASE_THRESHOLD 0
2111 #endif
2113 #ifndef SQR_TOOM2_THRESHOLD
2114 #define SQR_TOOM2_THRESHOLD 50
2115 #endif
2117 #ifndef SQR_TOOM3_THRESHOLD
2118 #define SQR_TOOM3_THRESHOLD 120
2119 #endif
2121 #ifndef SQR_TOOM4_THRESHOLD
2122 #define SQR_TOOM4_THRESHOLD 400
2123 #endif
2125 /* See comments above about MUL_TOOM33_THRESHOLD_LIMIT. */
2126 #ifndef SQR_TOOM3_THRESHOLD_LIMIT
2127 #define SQR_TOOM3_THRESHOLD_LIMIT SQR_TOOM3_THRESHOLD
2128 #endif
2130 #ifndef MULMID_TOOM42_THRESHOLD
2131 #define MULMID_TOOM42_THRESHOLD MUL_TOOM22_THRESHOLD
2132 #endif
2134 #ifndef DC_DIV_QR_THRESHOLD
2135 #define DC_DIV_QR_THRESHOLD 50
2136 #endif
2138 #ifndef DC_DIVAPPR_Q_THRESHOLD
2139 #define DC_DIVAPPR_Q_THRESHOLD 200
2140 #endif
2142 #ifndef DC_BDIV_QR_THRESHOLD
2143 #define DC_BDIV_QR_THRESHOLD 50
2144 #endif
2146 #ifndef DC_BDIV_Q_THRESHOLD
2147 #define DC_BDIV_Q_THRESHOLD 180
2148 #endif
2150 #ifndef DIVEXACT_JEB_THRESHOLD
2151 #define DIVEXACT_JEB_THRESHOLD 25
2152 #endif
2154 #ifndef INV_MULMOD_BNM1_THRESHOLD
2155 #define INV_MULMOD_BNM1_THRESHOLD (5*MULMOD_BNM1_THRESHOLD)
2156 #endif
2158 #ifndef INV_APPR_THRESHOLD
2159 #define INV_APPR_THRESHOLD INV_NEWTON_THRESHOLD
2160 #endif
2162 #ifndef INV_NEWTON_THRESHOLD
2163 #define INV_NEWTON_THRESHOLD 200
2164 #endif
2166 #ifndef BINV_NEWTON_THRESHOLD
2167 #define BINV_NEWTON_THRESHOLD 300
2168 #endif
2170 #ifndef MU_DIVAPPR_Q_THRESHOLD
2171 #define MU_DIVAPPR_Q_THRESHOLD 2000
2172 #endif
2174 #ifndef MU_DIV_QR_THRESHOLD
2175 #define MU_DIV_QR_THRESHOLD 2000
2176 #endif
2178 #ifndef MUPI_DIV_QR_THRESHOLD
2179 #define MUPI_DIV_QR_THRESHOLD 200
2180 #endif
2182 #ifndef MU_BDIV_Q_THRESHOLD
2183 #define MU_BDIV_Q_THRESHOLD 2000
2184 #endif
2186 #ifndef MU_BDIV_QR_THRESHOLD
2187 #define MU_BDIV_QR_THRESHOLD 2000
2188 #endif
2190 #ifndef MULMOD_BNM1_THRESHOLD
2191 #define MULMOD_BNM1_THRESHOLD 16
2192 #endif
2194 #ifndef SQRMOD_BNM1_THRESHOLD
2195 #define SQRMOD_BNM1_THRESHOLD 16
2196 #endif
2198 #ifndef MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD
2199 #define MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD (INV_MULMOD_BNM1_THRESHOLD/2)
2200 #endif
2202 #if HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2
2204 #ifndef REDC_1_TO_REDC_2_THRESHOLD
2205 #define REDC_1_TO_REDC_2_THRESHOLD 15
2206 #endif
2207 #ifndef REDC_2_TO_REDC_N_THRESHOLD
2208 #define REDC_2_TO_REDC_N_THRESHOLD 100
2209 #endif
2211 #else
2213 #ifndef REDC_1_TO_REDC_N_THRESHOLD
2214 #define REDC_1_TO_REDC_N_THRESHOLD 100
2215 #endif
2217 #endif /* HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2 */
2220 /* First k to use for an FFT modF multiply. A modF FFT is an order
2221 log(2^k)/log(2^(k-1)) algorithm, so k=3 is merely 1.5 like karatsuba,
2222 whereas k=4 is 1.33 which is faster than toom3 at 1.485. */
2223 #define FFT_FIRST_K 4
2225 /* Threshold at which FFT should be used to do a modF NxN -> N multiply. */
2226 #ifndef MUL_FFT_MODF_THRESHOLD
2227 #define MUL_FFT_MODF_THRESHOLD (MUL_TOOM33_THRESHOLD * 3)
2228 #endif
2229 #ifndef SQR_FFT_MODF_THRESHOLD
2230 #define SQR_FFT_MODF_THRESHOLD (SQR_TOOM3_THRESHOLD * 3)
2231 #endif
2233 /* Threshold at which FFT should be used to do an NxN -> 2N multiply. This
2234 will be a size where FFT is using k=7 or k=8, since an FFT-k used for an
2235 NxN->2N multiply and not recursing into itself is an order
2236 log(2^k)/log(2^(k-2)) algorithm, so it'll be at least k=7 at 1.39 which
2237 is the first better than toom3. */
2238 #ifndef MUL_FFT_THRESHOLD
2239 #define MUL_FFT_THRESHOLD (MUL_FFT_MODF_THRESHOLD * 10)
2240 #endif
2241 #ifndef SQR_FFT_THRESHOLD
2242 #define SQR_FFT_THRESHOLD (SQR_FFT_MODF_THRESHOLD * 10)
2243 #endif
2245 /* Table of thresholds for successive modF FFT "k"s. The first entry is
2246 where FFT_FIRST_K+1 should be used, the second FFT_FIRST_K+2,
2247 etc. See mpn_fft_best_k(). */
2248 #ifndef MUL_FFT_TABLE
2249 #define MUL_FFT_TABLE \
2250 { MUL_TOOM33_THRESHOLD * 4, /* k=5 */ \
2251 MUL_TOOM33_THRESHOLD * 8, /* k=6 */ \
2252 MUL_TOOM33_THRESHOLD * 16, /* k=7 */ \
2253 MUL_TOOM33_THRESHOLD * 32, /* k=8 */ \
2254 MUL_TOOM33_THRESHOLD * 96, /* k=9 */ \
2255 MUL_TOOM33_THRESHOLD * 288, /* k=10 */ \
2257 #endif
2258 #ifndef SQR_FFT_TABLE
2259 #define SQR_FFT_TABLE \
2260 { SQR_TOOM3_THRESHOLD * 4, /* k=5 */ \
2261 SQR_TOOM3_THRESHOLD * 8, /* k=6 */ \
2262 SQR_TOOM3_THRESHOLD * 16, /* k=7 */ \
2263 SQR_TOOM3_THRESHOLD * 32, /* k=8 */ \
2264 SQR_TOOM3_THRESHOLD * 96, /* k=9 */ \
2265 SQR_TOOM3_THRESHOLD * 288, /* k=10 */ \
2267 #endif
2269 struct fft_table_nk
2271 unsigned int n:27;
2272 unsigned int k:5;
2275 #ifndef FFT_TABLE_ATTRS
2276 #define FFT_TABLE_ATTRS static const
2277 #endif
2279 #define MPN_FFT_TABLE_SIZE 16
2282 #ifndef DC_DIV_QR_THRESHOLD
2283 #define DC_DIV_QR_THRESHOLD (3 * MUL_TOOM22_THRESHOLD)
2284 #endif
2286 #ifndef GET_STR_DC_THRESHOLD
2287 #define GET_STR_DC_THRESHOLD 18
2288 #endif
2290 #ifndef GET_STR_PRECOMPUTE_THRESHOLD
2291 #define GET_STR_PRECOMPUTE_THRESHOLD 35
2292 #endif
2294 #ifndef SET_STR_DC_THRESHOLD
2295 #define SET_STR_DC_THRESHOLD 750
2296 #endif
2298 #ifndef SET_STR_PRECOMPUTE_THRESHOLD
2299 #define SET_STR_PRECOMPUTE_THRESHOLD 2000
2300 #endif
2302 #ifndef FAC_ODD_THRESHOLD
2303 #define FAC_ODD_THRESHOLD 35
2304 #endif
2306 #ifndef FAC_DSC_THRESHOLD
2307 #define FAC_DSC_THRESHOLD 400
2308 #endif
2310 /* Return non-zero if xp,xsize and yp,ysize overlap.
2311 If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
2312 overlap. If both these are false, there's an overlap. */
2313 #define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
2314 ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
2315 #define MEM_OVERLAP_P(xp, xsize, yp, ysize) \
2316 ( (char *) (xp) + (xsize) > (char *) (yp) \
2317 && (char *) (yp) + (ysize) > (char *) (xp))
2319 /* Return non-zero if xp,xsize and yp,ysize are either identical or not
2320 overlapping. Return zero if they're partially overlapping. */
2321 #define MPN_SAME_OR_SEPARATE_P(xp, yp, size) \
2322 MPN_SAME_OR_SEPARATE2_P(xp, size, yp, size)
2323 #define MPN_SAME_OR_SEPARATE2_P(xp, xsize, yp, ysize) \
2324 ((xp) == (yp) || ! MPN_OVERLAP_P (xp, xsize, yp, ysize))
2326 /* Return non-zero if dst,dsize and src,ssize are either identical or
2327 overlapping in a way suitable for an incrementing/decrementing algorithm.
2328 Return zero if they're partially overlapping in an unsuitable fashion. */
2329 #define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize) \
2330 ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
2331 #define MPN_SAME_OR_INCR_P(dst, src, size) \
2332 MPN_SAME_OR_INCR2_P(dst, size, src, size)
2333 #define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize) \
2334 ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
2335 #define MPN_SAME_OR_DECR_P(dst, src, size) \
2336 MPN_SAME_OR_DECR2_P(dst, size, src, size)
2339 /* ASSERT() is a private assertion checking scheme, similar to <assert.h>.
2340 ASSERT() does the check only if WANT_ASSERT is selected, ASSERT_ALWAYS()
2341 does it always. Generally assertions are meant for development, but
2342 might help when looking for a problem later too.
2344 Note that strings shouldn't be used within the ASSERT expression,
2345 eg. ASSERT(strcmp(s,"notgood")!=0), since the quotes upset the "expr"
2346 used in the !HAVE_STRINGIZE case (ie. K&R). */
2348 #ifdef __LINE__
2349 #define ASSERT_LINE __LINE__
2350 #else
2351 #define ASSERT_LINE -1
2352 #endif
2354 #ifdef __FILE__
2355 #define ASSERT_FILE __FILE__
2356 #else
2357 #define ASSERT_FILE ""
2358 #endif
2360 __GMP_DECLSPEC void __gmp_assert_header (const char *, int);
2361 __GMP_DECLSPEC void __gmp_assert_fail (const char *, int, const char *) ATTRIBUTE_NORETURN;
2363 #if HAVE_STRINGIZE
2364 #define ASSERT_FAIL(expr) __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, #expr)
2365 #else
2366 #define ASSERT_FAIL(expr) __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, "expr")
2367 #endif
2369 #define ASSERT_ALWAYS(expr) \
2370 do { \
2371 if (UNLIKELY (!(expr))) \
2372 ASSERT_FAIL (expr); \
2373 } while (0)
2375 #if WANT_ASSERT
2376 #define ASSERT(expr) ASSERT_ALWAYS (expr)
2377 #else
2378 #define ASSERT(expr) do {} while (0)
2379 #endif
2382 /* ASSERT_CARRY checks the expression is non-zero, and ASSERT_NOCARRY checks
2383 that it's zero. In both cases if assertion checking is disabled the
2384 expression is still evaluated. These macros are meant for use with
2385 routines like mpn_add_n() where the return value represents a carry or
2386 whatever that should or shouldn't occur in some context. For example,
2387 ASSERT_NOCARRY (mpn_add_n (rp, s1p, s2p, size)); */
2388 #if WANT_ASSERT
2389 #define ASSERT_CARRY(expr) ASSERT_ALWAYS ((expr) != 0)
2390 #define ASSERT_NOCARRY(expr) ASSERT_ALWAYS ((expr) == 0)
2391 #else
2392 #define ASSERT_CARRY(expr) (expr)
2393 #define ASSERT_NOCARRY(expr) (expr)
2394 #endif
2397 /* ASSERT_CODE includes code when assertion checking is wanted. This is the
2398 same as writing "#if WANT_ASSERT", but more compact. */
2399 #if WANT_ASSERT
2400 #define ASSERT_CODE(expr) expr
2401 #else
2402 #define ASSERT_CODE(expr)
2403 #endif
2406 /* Test that an mpq_t is in fully canonical form. This can be used as
2407 protection on routines like mpq_equal which give wrong results on
2408 non-canonical inputs. */
2409 #if WANT_ASSERT
2410 #define ASSERT_MPQ_CANONICAL(q) \
2411 do { \
2412 ASSERT (q->_mp_den._mp_size > 0); \
2413 if (q->_mp_num._mp_size == 0) \
2415 /* zero should be 0/1 */ \
2416 ASSERT (mpz_cmp_ui (mpq_denref(q), 1L) == 0); \
2418 else \
2420 /* no common factors */ \
2421 mpz_t __g; \
2422 mpz_init (__g); \
2423 mpz_gcd (__g, mpq_numref(q), mpq_denref(q)); \
2424 ASSERT (mpz_cmp_ui (__g, 1) == 0); \
2425 mpz_clear (__g); \
2427 } while (0)
2428 #else
2429 #define ASSERT_MPQ_CANONICAL(q) do {} while (0)
2430 #endif
2432 /* Check that the nail parts are zero. */
2433 #define ASSERT_ALWAYS_LIMB(limb) \
2434 do { \
2435 mp_limb_t __nail = (limb) & GMP_NAIL_MASK; \
2436 ASSERT_ALWAYS (__nail == 0); \
2437 } while (0)
2438 #define ASSERT_ALWAYS_MPN(ptr, size) \
2439 do { \
2440 /* let whole loop go dead when no nails */ \
2441 if (GMP_NAIL_BITS != 0) \
2443 mp_size_t __i; \
2444 for (__i = 0; __i < (size); __i++) \
2445 ASSERT_ALWAYS_LIMB ((ptr)[__i]); \
2447 } while (0)
2448 #if WANT_ASSERT
2449 #define ASSERT_LIMB(limb) ASSERT_ALWAYS_LIMB (limb)
2450 #define ASSERT_MPN(ptr, size) ASSERT_ALWAYS_MPN (ptr, size)
2451 #else
2452 #define ASSERT_LIMB(limb) do {} while (0)
2453 #define ASSERT_MPN(ptr, size) do {} while (0)
2454 #endif
2457 /* Assert that an mpn region {ptr,size} is zero, or non-zero.
2458 size==0 is allowed, and in that case {ptr,size} considered to be zero. */
2459 #if WANT_ASSERT
2460 #define ASSERT_MPN_ZERO_P(ptr,size) \
2461 do { \
2462 mp_size_t __i; \
2463 ASSERT ((size) >= 0); \
2464 for (__i = 0; __i < (size); __i++) \
2465 ASSERT ((ptr)[__i] == 0); \
2466 } while (0)
2467 #define ASSERT_MPN_NONZERO_P(ptr,size) \
2468 do { \
2469 mp_size_t __i; \
2470 int __nonzero = 0; \
2471 ASSERT ((size) >= 0); \
2472 for (__i = 0; __i < (size); __i++) \
2473 if ((ptr)[__i] != 0) \
2475 __nonzero = 1; \
2476 break; \
2478 ASSERT (__nonzero); \
2479 } while (0)
2480 #else
2481 #define ASSERT_MPN_ZERO_P(ptr,size) do {} while (0)
2482 #define ASSERT_MPN_NONZERO_P(ptr,size) do {} while (0)
2483 #endif
2486 #if ! HAVE_NATIVE_mpn_com
2487 #undef mpn_com
2488 #define mpn_com(d,s,n) \
2489 do { \
2490 mp_ptr __d = (d); \
2491 mp_srcptr __s = (s); \
2492 mp_size_t __n = (n); \
2493 ASSERT (__n >= 1); \
2494 ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s, __n)); \
2495 do \
2496 *__d++ = (~ *__s++) & GMP_NUMB_MASK; \
2497 while (--__n); \
2498 } while (0)
2499 #endif
2501 #define MPN_LOGOPS_N_INLINE(rp, up, vp, n, operation) \
2502 do { \
2503 mp_srcptr __up = (up); \
2504 mp_srcptr __vp = (vp); \
2505 mp_ptr __rp = (rp); \
2506 mp_size_t __n = (n); \
2507 mp_limb_t __a, __b; \
2508 ASSERT (__n > 0); \
2509 ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __up, __n)); \
2510 ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __vp, __n)); \
2511 __up += __n; \
2512 __vp += __n; \
2513 __rp += __n; \
2514 __n = -__n; \
2515 do { \
2516 __a = __up[__n]; \
2517 __b = __vp[__n]; \
2518 __rp[__n] = operation; \
2519 } while (++__n); \
2520 } while (0)
2523 #if ! HAVE_NATIVE_mpn_and_n
2524 #undef mpn_and_n
2525 #define mpn_and_n(rp, up, vp, n) \
2526 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & __b)
2527 #endif
2529 #if ! HAVE_NATIVE_mpn_andn_n
2530 #undef mpn_andn_n
2531 #define mpn_andn_n(rp, up, vp, n) \
2532 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & ~__b)
2533 #endif
2535 #if ! HAVE_NATIVE_mpn_nand_n
2536 #undef mpn_nand_n
2537 #define mpn_nand_n(rp, up, vp, n) \
2538 MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a & __b) & GMP_NUMB_MASK)
2539 #endif
2541 #if ! HAVE_NATIVE_mpn_ior_n
2542 #undef mpn_ior_n
2543 #define mpn_ior_n(rp, up, vp, n) \
2544 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a | __b)
2545 #endif
2547 #if ! HAVE_NATIVE_mpn_iorn_n
2548 #undef mpn_iorn_n
2549 #define mpn_iorn_n(rp, up, vp, n) \
2550 MPN_LOGOPS_N_INLINE (rp, up, vp, n, (__a | ~__b) & GMP_NUMB_MASK)
2551 #endif
2553 #if ! HAVE_NATIVE_mpn_nior_n
2554 #undef mpn_nior_n
2555 #define mpn_nior_n(rp, up, vp, n) \
2556 MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a | __b) & GMP_NUMB_MASK)
2557 #endif
2559 #if ! HAVE_NATIVE_mpn_xor_n
2560 #undef mpn_xor_n
2561 #define mpn_xor_n(rp, up, vp, n) \
2562 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a ^ __b)
2563 #endif
2565 #if ! HAVE_NATIVE_mpn_xnor_n
2566 #undef mpn_xnor_n
2567 #define mpn_xnor_n(rp, up, vp, n) \
2568 MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a ^ __b) & GMP_NUMB_MASK)
2569 #endif
2571 #define mpn_trialdiv __MPN(trialdiv)
2572 __GMP_DECLSPEC mp_limb_t mpn_trialdiv (mp_srcptr, mp_size_t, mp_size_t, int *);
2574 #define mpn_remove __MPN(remove)
2575 __GMP_DECLSPEC mp_bitcnt_t mpn_remove (mp_ptr, mp_size_t *, mp_ptr, mp_size_t, mp_ptr, mp_size_t, mp_bitcnt_t);
2578 /* ADDC_LIMB sets w=x+y and cout to 0 or 1 for a carry from that addition. */
2579 #if GMP_NAIL_BITS == 0
2580 #define ADDC_LIMB(cout, w, x, y) \
2581 do { \
2582 mp_limb_t __x = (x); \
2583 mp_limb_t __y = (y); \
2584 mp_limb_t __w = __x + __y; \
2585 (w) = __w; \
2586 (cout) = __w < __x; \
2587 } while (0)
2588 #else
2589 #define ADDC_LIMB(cout, w, x, y) \
2590 do { \
2591 mp_limb_t __w; \
2592 ASSERT_LIMB (x); \
2593 ASSERT_LIMB (y); \
2594 __w = (x) + (y); \
2595 (w) = __w & GMP_NUMB_MASK; \
2596 (cout) = __w >> GMP_NUMB_BITS; \
2597 } while (0)
2598 #endif
2600 /* SUBC_LIMB sets w=x-y and cout to 0 or 1 for a borrow from that
2601 subtract. */
2602 #if GMP_NAIL_BITS == 0
2603 #define SUBC_LIMB(cout, w, x, y) \
2604 do { \
2605 mp_limb_t __x = (x); \
2606 mp_limb_t __y = (y); \
2607 mp_limb_t __w = __x - __y; \
2608 (w) = __w; \
2609 (cout) = __w > __x; \
2610 } while (0)
2611 #else
2612 #define SUBC_LIMB(cout, w, x, y) \
2613 do { \
2614 mp_limb_t __w = (x) - (y); \
2615 (w) = __w & GMP_NUMB_MASK; \
2616 (cout) = __w >> (GMP_LIMB_BITS-1); \
2617 } while (0)
2618 #endif
2621 /* MPN_INCR_U does {ptr,size} += n, MPN_DECR_U does {ptr,size} -= n, both
2622 expecting no carry (or borrow) from that.
2624 The size parameter is only for the benefit of assertion checking. In a
2625 normal build it's unused and the carry/borrow is just propagated as far
2626 as it needs to go.
2628 On random data, usually only one or two limbs of {ptr,size} get updated,
2629 so there's no need for any sophisticated looping, just something compact
2630 and sensible.
2632 FIXME: Switch all code from mpn_{incr,decr}_u to MPN_{INCR,DECR}_U,
2633 declaring their operand sizes, then remove the former. This is purely
2634 for the benefit of assertion checking. */
2636 #if defined (__GNUC__) && GMP_NAIL_BITS == 0 && ! defined (NO_ASM) \
2637 && (defined(HAVE_HOST_CPU_FAMILY_x86) || defined(HAVE_HOST_CPU_FAMILY_x86_64)) \
2638 && ! WANT_ASSERT
2639 /* Better flags handling than the generic C gives on i386, saving a few
2640 bytes of code and maybe a cycle or two. */
2642 #define MPN_IORD_U(ptr, incr, aors) \
2643 do { \
2644 mp_ptr __ptr_dummy; \
2645 if (__builtin_constant_p (incr) && (incr) == 0) \
2648 else if (__builtin_constant_p (incr) && (incr) == 1) \
2650 __asm__ __volatile__ \
2651 ("\n" ASM_L(top) ":\n" \
2652 "\t" aors "\t$1, (%0)\n" \
2653 "\tlea\t%c2(%0), %0\n" \
2654 "\tjc\t" ASM_L(top) \
2655 : "=r" (__ptr_dummy) \
2656 : "0" (ptr), "n" (sizeof(mp_limb_t)) \
2657 : "memory"); \
2659 else \
2661 __asm__ __volatile__ \
2662 ( aors "\t%2, (%0)\n" \
2663 "\tjnc\t" ASM_L(done) "\n" \
2664 ASM_L(top) ":\n" \
2665 "\t" aors "\t$1, %c3(%0)\n" \
2666 "\tlea\t%c3(%0), %0\n" \
2667 "\tjc\t" ASM_L(top) "\n" \
2668 ASM_L(done) ":\n" \
2669 : "=r" (__ptr_dummy) \
2670 : "0" (ptr), \
2671 "ri" ((mp_limb_t) (incr)), "n" (sizeof(mp_limb_t)) \
2672 : "memory"); \
2674 } while (0)
2676 #if GMP_LIMB_BITS == 32
2677 #define MPN_INCR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "addl")
2678 #define MPN_DECR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "subl")
2679 #endif
2680 #if GMP_LIMB_BITS == 64
2681 #define MPN_INCR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "addq")
2682 #define MPN_DECR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "subq")
2683 #endif
2684 #define mpn_incr_u(ptr, incr) MPN_INCR_U (ptr, 0, incr)
2685 #define mpn_decr_u(ptr, incr) MPN_DECR_U (ptr, 0, incr)
2686 #endif
2688 #if GMP_NAIL_BITS == 0
2689 #ifndef mpn_incr_u
2690 #define mpn_incr_u(p,incr) \
2691 do { \
2692 mp_limb_t __x; \
2693 mp_ptr __p = (p); \
2694 if (__builtin_constant_p (incr) && (incr) == 1) \
2696 while (++(*(__p++)) == 0) \
2699 else \
2701 __x = *__p + (incr); \
2702 *__p = __x; \
2703 if (__x < (incr)) \
2704 while (++(*(++__p)) == 0) \
2707 } while (0)
2708 #endif
2709 #ifndef mpn_decr_u
2710 #define mpn_decr_u(p,incr) \
2711 do { \
2712 mp_limb_t __x; \
2713 mp_ptr __p = (p); \
2714 if (__builtin_constant_p (incr) && (incr) == 1) \
2716 while ((*(__p++))-- == 0) \
2719 else \
2721 __x = *__p; \
2722 *__p = __x - (incr); \
2723 if (__x < (incr)) \
2724 while ((*(++__p))-- == 0) \
2727 } while (0)
2728 #endif
2729 #endif
2731 #if GMP_NAIL_BITS >= 1
2732 #ifndef mpn_incr_u
2733 #define mpn_incr_u(p,incr) \
2734 do { \
2735 mp_limb_t __x; \
2736 mp_ptr __p = (p); \
2737 if (__builtin_constant_p (incr) && (incr) == 1) \
2739 do \
2741 __x = (*__p + 1) & GMP_NUMB_MASK; \
2742 *__p++ = __x; \
2744 while (__x == 0); \
2746 else \
2748 __x = (*__p + (incr)); \
2749 *__p++ = __x & GMP_NUMB_MASK; \
2750 if (__x >> GMP_NUMB_BITS != 0) \
2752 do \
2754 __x = (*__p + 1) & GMP_NUMB_MASK; \
2755 *__p++ = __x; \
2757 while (__x == 0); \
2760 } while (0)
2761 #endif
2762 #ifndef mpn_decr_u
2763 #define mpn_decr_u(p,incr) \
2764 do { \
2765 mp_limb_t __x; \
2766 mp_ptr __p = (p); \
2767 if (__builtin_constant_p (incr) && (incr) == 1) \
2769 do \
2771 __x = *__p; \
2772 *__p++ = (__x - 1) & GMP_NUMB_MASK; \
2774 while (__x == 0); \
2776 else \
2778 __x = *__p - (incr); \
2779 *__p++ = __x & GMP_NUMB_MASK; \
2780 if (__x >> GMP_NUMB_BITS != 0) \
2782 do \
2784 __x = *__p; \
2785 *__p++ = (__x - 1) & GMP_NUMB_MASK; \
2787 while (__x == 0); \
2790 } while (0)
2791 #endif
2792 #endif
2794 #ifndef MPN_INCR_U
2795 #if WANT_ASSERT
2796 #define MPN_INCR_U(ptr, size, n) \
2797 do { \
2798 ASSERT ((size) >= 1); \
2799 ASSERT_NOCARRY (mpn_add_1 (ptr, ptr, size, n)); \
2800 } while (0)
2801 #else
2802 #define MPN_INCR_U(ptr, size, n) mpn_incr_u (ptr, n)
2803 #endif
2804 #endif
2806 #ifndef MPN_DECR_U
2807 #if WANT_ASSERT
2808 #define MPN_DECR_U(ptr, size, n) \
2809 do { \
2810 ASSERT ((size) >= 1); \
2811 ASSERT_NOCARRY (mpn_sub_1 (ptr, ptr, size, n)); \
2812 } while (0)
2813 #else
2814 #define MPN_DECR_U(ptr, size, n) mpn_decr_u (ptr, n)
2815 #endif
2816 #endif
2819 /* Structure for conversion between internal binary format and strings. */
2820 struct bases
2822 /* Number of digits in the conversion base that always fits in an mp_limb_t.
2823 For example, for base 10 on a machine where a mp_limb_t has 32 bits this
2824 is 9, since 10**9 is the largest number that fits into a mp_limb_t. */
2825 int chars_per_limb;
2827 /* log(2)/log(conversion_base) */
2828 mp_limb_t logb2;
2830 /* log(conversion_base)/log(2) */
2831 mp_limb_t log2b;
2833 /* base**chars_per_limb, i.e. the biggest number that fits a word, built by
2834 factors of base. Exception: For 2, 4, 8, etc, big_base is log2(base),
2835 i.e. the number of bits used to represent each digit in the base. */
2836 mp_limb_t big_base;
2838 /* A GMP_LIMB_BITS bit approximation to 1/big_base, represented as a
2839 fixed-point number. Instead of dividing by big_base an application can
2840 choose to multiply by big_base_inverted. */
2841 mp_limb_t big_base_inverted;
2844 #define mp_bases __MPN(bases)
2845 __GMP_DECLSPEC extern const struct bases mp_bases[257];
2848 /* Compute the number of digits in base for nbits bits, making sure the result
2849 is never too small. The two variants of the macro implement the same
2850 function; the GT2 variant below works just for bases > 2. */
2851 #define DIGITS_IN_BASE_FROM_BITS(res, nbits, b) \
2852 do { \
2853 mp_limb_t _ph, _dummy; \
2854 size_t _nbits = (nbits); \
2855 umul_ppmm (_ph, _dummy, mp_bases[b].logb2, _nbits); \
2856 _ph += (_dummy + _nbits < _dummy); \
2857 res = _ph + 1; \
2858 } while (0)
2859 #define DIGITS_IN_BASEGT2_FROM_BITS(res, nbits, b) \
2860 do { \
2861 mp_limb_t _ph, _dummy; \
2862 size_t _nbits = (nbits); \
2863 umul_ppmm (_ph, _dummy, mp_bases[b].logb2 + 1, _nbits); \
2864 res = _ph + 1; \
2865 } while (0)
2867 /* For power of 2 bases this is exact. For other bases the result is either
2868 exact or one too big.
2870 To be exact always it'd be necessary to examine all the limbs of the
2871 operand, since numbers like 100..000 and 99...999 generally differ only
2872 in the lowest limb. It'd be possible to examine just a couple of high
2873 limbs to increase the probability of being exact, but that doesn't seem
2874 worth bothering with. */
2876 #define MPN_SIZEINBASE(result, ptr, size, base) \
2877 do { \
2878 int __lb_base, __cnt; \
2879 size_t __totbits; \
2881 ASSERT ((size) >= 0); \
2882 ASSERT ((base) >= 2); \
2883 ASSERT ((base) < numberof (mp_bases)); \
2885 /* Special case for X == 0. */ \
2886 if ((size) == 0) \
2887 (result) = 1; \
2888 else \
2890 /* Calculate the total number of significant bits of X. */ \
2891 count_leading_zeros (__cnt, (ptr)[(size)-1]); \
2892 __totbits = (size_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);\
2894 if (POW2_P (base)) \
2896 __lb_base = mp_bases[base].big_base; \
2897 (result) = (__totbits + __lb_base - 1) / __lb_base; \
2899 else \
2901 DIGITS_IN_BASEGT2_FROM_BITS (result, __totbits, base); \
2904 } while (0)
2906 #define MPN_SIZEINBASE_2EXP(result, ptr, size, base2exp) \
2907 do { \
2908 int __cnt; \
2909 mp_bitcnt_t __totbits; \
2910 ASSERT ((size) > 0); \
2911 ASSERT ((ptr)[(size)-1] != 0); \
2912 count_leading_zeros (__cnt, (ptr)[(size)-1]); \
2913 __totbits = (mp_bitcnt_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS); \
2914 (result) = (__totbits + (base2exp)-1) / (base2exp); \
2915 } while (0)
2918 /* bit count to limb count, rounding up */
2919 #define BITS_TO_LIMBS(n) (((n) + (GMP_NUMB_BITS - 1)) / GMP_NUMB_BITS)
2921 /* MPN_SET_UI sets an mpn (ptr, cnt) to given ui. MPZ_FAKE_UI creates fake
2922 mpz_t from ui. The zp argument must have room for LIMBS_PER_ULONG limbs
2923 in both cases (LIMBS_PER_ULONG is also defined here.) */
2924 #if BITS_PER_ULONG <= GMP_NUMB_BITS /* need one limb per ulong */
2926 #define LIMBS_PER_ULONG 1
2927 #define MPN_SET_UI(zp, zn, u) \
2928 (zp)[0] = (u); \
2929 (zn) = ((zp)[0] != 0);
2930 #define MPZ_FAKE_UI(z, zp, u) \
2931 (zp)[0] = (u); \
2932 PTR (z) = (zp); \
2933 SIZ (z) = ((zp)[0] != 0); \
2934 ASSERT_CODE (ALLOC (z) = 1);
2936 #else /* need two limbs per ulong */
2938 #define LIMBS_PER_ULONG 2
2939 #define MPN_SET_UI(zp, zn, u) \
2940 (zp)[0] = (u) & GMP_NUMB_MASK; \
2941 (zp)[1] = (u) >> GMP_NUMB_BITS; \
2942 (zn) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0);
2943 #define MPZ_FAKE_UI(z, zp, u) \
2944 (zp)[0] = (u) & GMP_NUMB_MASK; \
2945 (zp)[1] = (u) >> GMP_NUMB_BITS; \
2946 SIZ (z) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0); \
2947 PTR (z) = (zp); \
2948 ASSERT_CODE (ALLOC (z) = 2);
2950 #endif
2953 #if HAVE_HOST_CPU_FAMILY_x86
2954 #define TARGET_REGISTER_STARVED 1
2955 #else
2956 #define TARGET_REGISTER_STARVED 0
2957 #endif
2960 /* LIMB_HIGHBIT_TO_MASK(n) examines the high bit of a limb value and turns 1
2961 or 0 there into a limb 0xFF..FF or 0 respectively.
2963 On most CPUs this is just an arithmetic right shift by GMP_LIMB_BITS-1,
2964 but C99 doesn't guarantee signed right shifts are arithmetic, so we have
2965 a little compile-time test and a fallback to a "? :" form. The latter is
2966 necessary for instance on Cray vector systems.
2968 Recent versions of gcc (eg. 3.3) will in fact optimize a "? :" like this
2969 to an arithmetic right shift anyway, but it's good to get the desired
2970 shift on past versions too (in particular since an important use of
2971 LIMB_HIGHBIT_TO_MASK is in udiv_qrnnd_preinv). */
2973 #define LIMB_HIGHBIT_TO_MASK(n) \
2974 (((mp_limb_signed_t) -1 >> 1) < 0 \
2975 ? (mp_limb_signed_t) (n) >> (GMP_LIMB_BITS - 1) \
2976 : (n) & GMP_LIMB_HIGHBIT ? MP_LIMB_T_MAX : CNST_LIMB(0))
2979 /* Use a library function for invert_limb, if available. */
2980 #define mpn_invert_limb __MPN(invert_limb)
2981 __GMP_DECLSPEC mp_limb_t mpn_invert_limb (mp_limb_t) ATTRIBUTE_CONST;
2982 #if ! defined (invert_limb) && HAVE_NATIVE_mpn_invert_limb
2983 #define invert_limb(invxl,xl) \
2984 do { \
2985 (invxl) = mpn_invert_limb (xl); \
2986 } while (0)
2987 #endif
2989 #ifndef invert_limb
2990 #define invert_limb(invxl,xl) \
2991 do { \
2992 mp_limb_t _dummy; \
2993 ASSERT ((xl) != 0); \
2994 udiv_qrnnd (invxl, _dummy, ~(xl), ~CNST_LIMB(0), xl); \
2995 } while (0)
2996 #endif
2998 #define invert_pi1(dinv, d1, d0) \
2999 do { \
3000 mp_limb_t _v, _p, _t1, _t0, _mask; \
3001 invert_limb (_v, d1); \
3002 _p = (d1) * _v; \
3003 _p += (d0); \
3004 if (_p < (d0)) \
3006 _v--; \
3007 _mask = -(mp_limb_t) (_p >= (d1)); \
3008 _p -= (d1); \
3009 _v += _mask; \
3010 _p -= _mask & (d1); \
3012 umul_ppmm (_t1, _t0, d0, _v); \
3013 _p += _t1; \
3014 if (_p < _t1) \
3016 _v--; \
3017 if (UNLIKELY (_p >= (d1))) \
3019 if (_p > (d1) || _t0 >= (d0)) \
3020 _v--; \
3023 (dinv).inv32 = _v; \
3024 } while (0)
3027 /* udiv_qrnnd_preinv -- Based on work by Niels Möller and Torbjörn Granlund.
3028 We write things strangely below, to help gcc. A more straightforward
3029 version:
3030 _r = (nl) - _qh * (d);
3031 _t = _r + (d);
3032 if (_r >= _ql)
3034 _qh--;
3035 _r = _t;
3037 For one operation shorter critical path, one may want to use this form:
3038 _p = _qh * (d)
3039 _s = (nl) + (d);
3040 _r = (nl) - _p;
3041 _t = _s - _p;
3042 if (_r >= _ql)
3044 _qh--;
3045 _r = _t;
3048 #define udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
3049 do { \
3050 mp_limb_t _qh, _ql, _r, _mask; \
3051 umul_ppmm (_qh, _ql, (nh), (di)); \
3052 if (__builtin_constant_p (nl) && (nl) == 0) \
3054 _qh += (nh) + 1; \
3055 _r = - _qh * (d); \
3056 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3057 _qh += _mask; \
3058 _r += _mask & (d); \
3060 else \
3062 add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
3063 _r = (nl) - _qh * (d); \
3064 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3065 _qh += _mask; \
3066 _r += _mask & (d); \
3067 if (UNLIKELY (_r >= (d))) \
3069 _r -= (d); \
3070 _qh++; \
3073 (r) = _r; \
3074 (q) = _qh; \
3075 } while (0)
3077 /* Dividing (NH, NL) by D, returning the remainder only. Unlike
3078 udiv_qrnnd_preinv, works also for the case NH == D, where the
3079 quotient doesn't quite fit in a single limb. */
3080 #define udiv_rnnd_preinv(r, nh, nl, d, di) \
3081 do { \
3082 mp_limb_t _qh, _ql, _r, _mask; \
3083 umul_ppmm (_qh, _ql, (nh), (di)); \
3084 if (__builtin_constant_p (nl) && (nl) == 0) \
3086 _r = ~(_qh + (nh)) * (d); \
3087 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3088 _r += _mask & (d); \
3090 else \
3092 add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
3093 _r = (nl) - _qh * (d); \
3094 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3095 _r += _mask & (d); \
3096 if (UNLIKELY (_r >= (d))) \
3097 _r -= (d); \
3099 (r) = _r; \
3100 } while (0)
3102 /* Compute quotient the quotient and remainder for n / d. Requires d
3103 >= B^2 / 2 and n < d B. di is the inverse
3105 floor ((B^3 - 1) / (d0 + d1 B)) - B.
3107 NOTE: Output variables are updated multiple times. Only some inputs
3108 and outputs may overlap.
3110 #define udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
3111 do { \
3112 mp_limb_t _q0, _t1, _t0, _mask; \
3113 umul_ppmm ((q), _q0, (n2), (dinv)); \
3114 add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
3116 /* Compute the two most significant limbs of n - q'd */ \
3117 (r1) = (n1) - (d1) * (q); \
3118 sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
3119 umul_ppmm (_t1, _t0, (d0), (q)); \
3120 sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
3121 (q)++; \
3123 /* Conditionally adjust q and the remainders */ \
3124 _mask = - (mp_limb_t) ((r1) >= _q0); \
3125 (q) += _mask; \
3126 add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
3127 if (UNLIKELY ((r1) >= (d1))) \
3129 if ((r1) > (d1) || (r0) >= (d0)) \
3131 (q)++; \
3132 sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
3135 } while (0)
3137 #ifndef mpn_preinv_divrem_1 /* if not done with cpuvec in a fat binary */
3138 #define mpn_preinv_divrem_1 __MPN(preinv_divrem_1)
3139 __GMP_DECLSPEC mp_limb_t mpn_preinv_divrem_1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int);
3140 #endif
3143 /* USE_PREINV_DIVREM_1 is whether to use mpn_preinv_divrem_1, as opposed to the
3144 plain mpn_divrem_1. The default is yes, since the few CISC chips where
3145 preinv is not good have defines saying so. */
3146 #ifndef USE_PREINV_DIVREM_1
3147 #define USE_PREINV_DIVREM_1 1
3148 #endif
3150 #if USE_PREINV_DIVREM_1
3151 #define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift) \
3152 mpn_preinv_divrem_1 (qp, xsize, ap, size, d, dinv, shift)
3153 #else
3154 #define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift) \
3155 mpn_divrem_1 (qp, xsize, ap, size, d)
3156 #endif
3158 #ifndef PREINV_MOD_1_TO_MOD_1_THRESHOLD
3159 #define PREINV_MOD_1_TO_MOD_1_THRESHOLD 10
3160 #endif
3162 /* This selection may seem backwards. The reason mpn_mod_1 typically takes
3163 over for larger sizes is that it uses the mod_1_1 function. */
3164 #define MPN_MOD_OR_PREINV_MOD_1(src,size,divisor,inverse) \
3165 (BELOW_THRESHOLD (size, PREINV_MOD_1_TO_MOD_1_THRESHOLD) \
3166 ? mpn_preinv_mod_1 (src, size, divisor, inverse) \
3167 : mpn_mod_1 (src, size, divisor))
3170 #ifndef mpn_mod_34lsub1 /* if not done with cpuvec in a fat binary */
3171 #define mpn_mod_34lsub1 __MPN(mod_34lsub1)
3172 __GMP_DECLSPEC mp_limb_t mpn_mod_34lsub1 (mp_srcptr, mp_size_t) __GMP_ATTRIBUTE_PURE;
3173 #endif
3176 /* DIVEXACT_1_THRESHOLD is at what size to use mpn_divexact_1, as opposed to
3177 plain mpn_divrem_1. Likewise BMOD_1_TO_MOD_1_THRESHOLD for
3178 mpn_modexact_1_odd against plain mpn_mod_1. On most CPUs divexact and
3179 modexact are faster at all sizes, so the defaults are 0. Those CPUs
3180 where this is not right have a tuned threshold. */
3181 #ifndef DIVEXACT_1_THRESHOLD
3182 #define DIVEXACT_1_THRESHOLD 0
3183 #endif
3184 #ifndef BMOD_1_TO_MOD_1_THRESHOLD
3185 #define BMOD_1_TO_MOD_1_THRESHOLD 10
3186 #endif
3188 #ifndef mpn_divexact_1 /* if not done with cpuvec in a fat binary */
3189 #define mpn_divexact_1 __MPN(divexact_1)
3190 __GMP_DECLSPEC void mpn_divexact_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
3191 #endif
3193 #define MPN_DIVREM_OR_DIVEXACT_1(rp, up, n, d) \
3194 do { \
3195 if (BELOW_THRESHOLD (n, DIVEXACT_1_THRESHOLD)) \
3196 ASSERT_NOCARRY (mpn_divrem_1 (rp, (mp_size_t) 0, up, n, d)); \
3197 else \
3199 ASSERT (mpn_mod_1 (up, n, d) == 0); \
3200 mpn_divexact_1 (rp, up, n, d); \
3202 } while (0)
3204 #ifndef mpn_modexact_1c_odd /* if not done with cpuvec in a fat binary */
3205 #define mpn_modexact_1c_odd __MPN(modexact_1c_odd)
3206 __GMP_DECLSPEC mp_limb_t mpn_modexact_1c_odd (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) __GMP_ATTRIBUTE_PURE;
3207 #endif
3209 #if HAVE_NATIVE_mpn_modexact_1_odd
3210 #define mpn_modexact_1_odd __MPN(modexact_1_odd)
3211 __GMP_DECLSPEC mp_limb_t mpn_modexact_1_odd (mp_srcptr, mp_size_t, mp_limb_t) __GMP_ATTRIBUTE_PURE;
3212 #else
3213 #define mpn_modexact_1_odd(src,size,divisor) \
3214 mpn_modexact_1c_odd (src, size, divisor, CNST_LIMB(0))
3215 #endif
3217 #define MPN_MOD_OR_MODEXACT_1_ODD(src,size,divisor) \
3218 (BELOW_THRESHOLD (size, BMOD_1_TO_MOD_1_THRESHOLD) \
3219 ? mpn_modexact_1_odd (src, size, divisor) \
3220 : mpn_mod_1 (src, size, divisor))
3222 /* binvert_limb() sets inv to the multiplicative inverse of n modulo
3223 2^GMP_NUMB_BITS, ie. satisfying inv*n == 1 mod 2^GMP_NUMB_BITS.
3224 n must be odd (otherwise such an inverse doesn't exist).
3226 This is not to be confused with invert_limb(), which is completely
3227 different.
3229 The table lookup gives an inverse with the low 8 bits valid, and each
3230 multiply step doubles the number of bits. See Jebelean "An algorithm for
3231 exact division" end of section 4 (reference in gmp.texi).
3233 Possible enhancement: Could use UHWtype until the last step, if half-size
3234 multiplies are faster (might help under _LONG_LONG_LIMB).
3236 Alternative: As noted in Granlund and Montgomery "Division by Invariant
3237 Integers using Multiplication" (reference in gmp.texi), n itself gives a
3238 3-bit inverse immediately, and could be used instead of a table lookup.
3239 A 4-bit inverse can be obtained effectively from xoring bits 1 and 2 into
3240 bit 3, for instance with (((n + 2) & 4) << 1) ^ n. */
3242 #define binvert_limb_table __gmp_binvert_limb_table
3243 __GMP_DECLSPEC extern const unsigned char binvert_limb_table[128];
3245 #define binvert_limb(inv,n) \
3246 do { \
3247 mp_limb_t __n = (n); \
3248 mp_limb_t __inv; \
3249 ASSERT ((__n & 1) == 1); \
3251 __inv = binvert_limb_table[(__n/2) & 0x7F]; /* 8 */ \
3252 if (GMP_NUMB_BITS > 8) __inv = 2 * __inv - __inv * __inv * __n; \
3253 if (GMP_NUMB_BITS > 16) __inv = 2 * __inv - __inv * __inv * __n; \
3254 if (GMP_NUMB_BITS > 32) __inv = 2 * __inv - __inv * __inv * __n; \
3256 if (GMP_NUMB_BITS > 64) \
3258 int __invbits = 64; \
3259 do { \
3260 __inv = 2 * __inv - __inv * __inv * __n; \
3261 __invbits *= 2; \
3262 } while (__invbits < GMP_NUMB_BITS); \
3265 ASSERT ((__inv * __n & GMP_NUMB_MASK) == 1); \
3266 (inv) = __inv & GMP_NUMB_MASK; \
3267 } while (0)
3268 #define modlimb_invert binvert_limb /* backward compatibility */
3270 /* Multiplicative inverse of 3, modulo 2^GMP_NUMB_BITS.
3271 Eg. 0xAAAAAAAB for 32 bits, 0xAAAAAAAAAAAAAAAB for 64 bits.
3272 GMP_NUMB_MAX/3*2+1 is right when GMP_NUMB_BITS is even, but when it's odd
3273 we need to start from GMP_NUMB_MAX>>1. */
3274 #define MODLIMB_INVERSE_3 (((GMP_NUMB_MAX >> (GMP_NUMB_BITS % 2)) / 3) * 2 + 1)
3276 /* ceil(GMP_NUMB_MAX/3) and ceil(2*GMP_NUMB_MAX/3).
3277 These expressions work because GMP_NUMB_MAX%3 != 0 for all GMP_NUMB_BITS. */
3278 #define GMP_NUMB_CEIL_MAX_DIV3 (GMP_NUMB_MAX / 3 + 1)
3279 #define GMP_NUMB_CEIL_2MAX_DIV3 ((GMP_NUMB_MAX>>1) / 3 + 1 + GMP_NUMB_HIGHBIT)
3282 /* Set r to -a mod d. a>=d is allowed. Can give r>d. All should be limbs.
3284 It's not clear whether this is the best way to do this calculation.
3285 Anything congruent to -a would be fine for the one limb congruence
3286 tests. */
3288 #define NEG_MOD(r, a, d) \
3289 do { \
3290 ASSERT ((d) != 0); \
3291 ASSERT_LIMB (a); \
3292 ASSERT_LIMB (d); \
3294 if ((a) <= (d)) \
3296 /* small a is reasonably likely */ \
3297 (r) = (d) - (a); \
3299 else \
3301 unsigned __twos; \
3302 mp_limb_t __dnorm; \
3303 count_leading_zeros (__twos, d); \
3304 __twos -= GMP_NAIL_BITS; \
3305 __dnorm = (d) << __twos; \
3306 (r) = ((a) <= __dnorm ? __dnorm : 2*__dnorm) - (a); \
3309 ASSERT_LIMB (r); \
3310 } while (0)
3312 /* A bit mask of all the least significant zero bits of n, or -1 if n==0. */
3313 #define LOW_ZEROS_MASK(n) (((n) & -(n)) - 1)
3316 /* ULONG_PARITY sets "p" to 1 if there's an odd number of 1 bits in "n", or
3317 to 0 if there's an even number. "n" should be an unsigned long and "p"
3318 an int. */
3320 #if defined (__GNUC__) && ! defined (NO_ASM) && HAVE_HOST_CPU_alpha_CIX
3321 #define ULONG_PARITY(p, n) \
3322 do { \
3323 int __p; \
3324 __asm__ ("ctpop %1, %0" : "=r" (__p) : "r" (n)); \
3325 (p) = __p & 1; \
3326 } while (0)
3327 #endif
3329 /* Cray intrinsic _popcnt. */
3330 #ifdef _CRAY
3331 #define ULONG_PARITY(p, n) \
3332 do { \
3333 (p) = _popcnt (n) & 1; \
3334 } while (0)
3335 #endif
3337 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3338 && ! defined (NO_ASM) && defined (__ia64)
3339 /* unsigned long is either 32 or 64 bits depending on the ABI, zero extend
3340 to a 64 bit unsigned long long for popcnt */
3341 #define ULONG_PARITY(p, n) \
3342 do { \
3343 unsigned long long __n = (unsigned long) (n); \
3344 int __p; \
3345 __asm__ ("popcnt %0 = %1" : "=r" (__p) : "r" (__n)); \
3346 (p) = __p & 1; \
3347 } while (0)
3348 #endif
3350 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3351 && ! defined (NO_ASM) && HAVE_HOST_CPU_FAMILY_x86
3352 #if __GMP_GNUC_PREREQ (3,1)
3353 #define __GMP_qm "=Qm"
3354 #define __GMP_q "=Q"
3355 #else
3356 #define __GMP_qm "=qm"
3357 #define __GMP_q "=q"
3358 #endif
3359 #define ULONG_PARITY(p, n) \
3360 do { \
3361 char __p; \
3362 unsigned long __n = (n); \
3363 __n ^= (__n >> 16); \
3364 __asm__ ("xorb %h1, %b1\n\t" \
3365 "setpo %0" \
3366 : __GMP_qm (__p), __GMP_q (__n) \
3367 : "1" (__n)); \
3368 (p) = __p; \
3369 } while (0)
3370 #endif
3372 #if ! defined (ULONG_PARITY)
3373 #define ULONG_PARITY(p, n) \
3374 do { \
3375 unsigned long __n = (n); \
3376 int __p = 0; \
3377 do \
3379 __p ^= 0x96696996L >> (__n & 0x1F); \
3380 __n >>= 5; \
3382 while (__n != 0); \
3384 (p) = __p & 1; \
3385 } while (0)
3386 #endif
3389 /* 3 cycles on 604 or 750 since shifts and rlwimi's can pair. gcc (as of
3390 version 3.1 at least) doesn't seem to know how to generate rlwimi for
3391 anything other than bit-fields, so use "asm". */
3392 #if defined (__GNUC__) && ! defined (NO_ASM) \
3393 && HAVE_HOST_CPU_FAMILY_powerpc && GMP_LIMB_BITS == 32
3394 #define BSWAP_LIMB(dst, src) \
3395 do { \
3396 mp_limb_t __bswapl_src = (src); \
3397 mp_limb_t __tmp1 = __bswapl_src >> 24; /* low byte */ \
3398 mp_limb_t __tmp2 = __bswapl_src << 24; /* high byte */ \
3399 __asm__ ("rlwimi %0, %2, 24, 16, 23" /* 2nd low */ \
3400 : "=r" (__tmp1) : "0" (__tmp1), "r" (__bswapl_src)); \
3401 __asm__ ("rlwimi %0, %2, 8, 8, 15" /* 3nd high */ \
3402 : "=r" (__tmp2) : "0" (__tmp2), "r" (__bswapl_src)); \
3403 (dst) = __tmp1 | __tmp2; /* whole */ \
3404 } while (0)
3405 #endif
3407 /* bswap is available on i486 and up and is fast. A combination rorw $8 /
3408 roll $16 / rorw $8 is used in glibc for plain i386 (and in the linux
3409 kernel with xchgb instead of rorw), but this is not done here, because
3410 i386 means generic x86 and mixing word and dword operations will cause
3411 partial register stalls on P6 chips. */
3412 #if defined (__GNUC__) && ! defined (NO_ASM) \
3413 && HAVE_HOST_CPU_FAMILY_x86 && ! HAVE_HOST_CPU_i386 \
3414 && GMP_LIMB_BITS == 32
3415 #define BSWAP_LIMB(dst, src) \
3416 do { \
3417 __asm__ ("bswap %0" : "=r" (dst) : "0" (src)); \
3418 } while (0)
3419 #endif
3421 #if defined (__GNUC__) && ! defined (NO_ASM) \
3422 && defined (__amd64__) && GMP_LIMB_BITS == 64
3423 #define BSWAP_LIMB(dst, src) \
3424 do { \
3425 __asm__ ("bswap %q0" : "=r" (dst) : "0" (src)); \
3426 } while (0)
3427 #endif
3429 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3430 && ! defined (NO_ASM) && defined (__ia64) && GMP_LIMB_BITS == 64
3431 #define BSWAP_LIMB(dst, src) \
3432 do { \
3433 __asm__ ("mux1 %0 = %1, @rev" : "=r" (dst) : "r" (src)); \
3434 } while (0)
3435 #endif
3437 /* As per glibc. */
3438 #if defined (__GNUC__) && ! defined (NO_ASM) \
3439 && HAVE_HOST_CPU_FAMILY_m68k && GMP_LIMB_BITS == 32
3440 #define BSWAP_LIMB(dst, src) \
3441 do { \
3442 mp_limb_t __bswapl_src = (src); \
3443 __asm__ ("ror%.w %#8, %0\n\t" \
3444 "swap %0\n\t" \
3445 "ror%.w %#8, %0" \
3446 : "=d" (dst) \
3447 : "0" (__bswapl_src)); \
3448 } while (0)
3449 #endif
3451 #if ! defined (BSWAP_LIMB)
3452 #if GMP_LIMB_BITS == 8
3453 #define BSWAP_LIMB(dst, src) \
3454 do { (dst) = (src); } while (0)
3455 #endif
3456 #if GMP_LIMB_BITS == 16
3457 #define BSWAP_LIMB(dst, src) \
3458 do { \
3459 (dst) = ((src) << 8) + ((src) >> 8); \
3460 } while (0)
3461 #endif
3462 #if GMP_LIMB_BITS == 32
3463 #define BSWAP_LIMB(dst, src) \
3464 do { \
3465 (dst) = \
3466 ((src) << 24) \
3467 + (((src) & 0xFF00) << 8) \
3468 + (((src) >> 8) & 0xFF00) \
3469 + ((src) >> 24); \
3470 } while (0)
3471 #endif
3472 #if GMP_LIMB_BITS == 64
3473 #define BSWAP_LIMB(dst, src) \
3474 do { \
3475 (dst) = \
3476 ((src) << 56) \
3477 + (((src) & 0xFF00) << 40) \
3478 + (((src) & 0xFF0000) << 24) \
3479 + (((src) & 0xFF000000) << 8) \
3480 + (((src) >> 8) & 0xFF000000) \
3481 + (((src) >> 24) & 0xFF0000) \
3482 + (((src) >> 40) & 0xFF00) \
3483 + ((src) >> 56); \
3484 } while (0)
3485 #endif
3486 #endif
3488 #if ! defined (BSWAP_LIMB)
3489 #define BSWAP_LIMB(dst, src) \
3490 do { \
3491 mp_limb_t __bswapl_src = (src); \
3492 mp_limb_t __dstl = 0; \
3493 int __i; \
3494 for (__i = 0; __i < GMP_LIMB_BYTES; __i++) \
3496 __dstl = (__dstl << 8) | (__bswapl_src & 0xFF); \
3497 __bswapl_src >>= 8; \
3499 (dst) = __dstl; \
3500 } while (0)
3501 #endif
3504 /* Apparently lwbrx might be slow on some PowerPC chips, so restrict it to
3505 those we know are fast. */
3506 #if defined (__GNUC__) && ! defined (NO_ASM) \
3507 && GMP_LIMB_BITS == 32 && HAVE_LIMB_BIG_ENDIAN \
3508 && (HAVE_HOST_CPU_powerpc604 \
3509 || HAVE_HOST_CPU_powerpc604e \
3510 || HAVE_HOST_CPU_powerpc750 \
3511 || HAVE_HOST_CPU_powerpc7400)
3512 #define BSWAP_LIMB_FETCH(limb, src) \
3513 do { \
3514 mp_srcptr __blf_src = (src); \
3515 mp_limb_t __limb; \
3516 __asm__ ("lwbrx %0, 0, %1" \
3517 : "=r" (__limb) \
3518 : "r" (__blf_src), \
3519 "m" (*__blf_src)); \
3520 (limb) = __limb; \
3521 } while (0)
3522 #endif
3524 #if ! defined (BSWAP_LIMB_FETCH)
3525 #define BSWAP_LIMB_FETCH(limb, src) BSWAP_LIMB (limb, *(src))
3526 #endif
3529 /* On the same basis that lwbrx might be slow, restrict stwbrx to those we
3530 know are fast. FIXME: Is this necessary? */
3531 #if defined (__GNUC__) && ! defined (NO_ASM) \
3532 && GMP_LIMB_BITS == 32 && HAVE_LIMB_BIG_ENDIAN \
3533 && (HAVE_HOST_CPU_powerpc604 \
3534 || HAVE_HOST_CPU_powerpc604e \
3535 || HAVE_HOST_CPU_powerpc750 \
3536 || HAVE_HOST_CPU_powerpc7400)
3537 #define BSWAP_LIMB_STORE(dst, limb) \
3538 do { \
3539 mp_ptr __dst = (dst); \
3540 mp_limb_t __limb = (limb); \
3541 __asm__ ("stwbrx %1, 0, %2" \
3542 : "=m" (*__dst) \
3543 : "r" (__limb), \
3544 "r" (__dst)); \
3545 } while (0)
3546 #endif
3548 #if ! defined (BSWAP_LIMB_STORE)
3549 #define BSWAP_LIMB_STORE(dst, limb) BSWAP_LIMB (*(dst), limb)
3550 #endif
3553 /* Byte swap limbs from {src,size} and store at {dst,size}. */
3554 #define MPN_BSWAP(dst, src, size) \
3555 do { \
3556 mp_ptr __dst = (dst); \
3557 mp_srcptr __src = (src); \
3558 mp_size_t __size = (size); \
3559 mp_size_t __i; \
3560 ASSERT ((size) >= 0); \
3561 ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size)); \
3562 CRAY_Pragma ("_CRI ivdep"); \
3563 for (__i = 0; __i < __size; __i++) \
3565 BSWAP_LIMB_FETCH (*__dst, __src); \
3566 __dst++; \
3567 __src++; \
3569 } while (0)
3571 /* Byte swap limbs from {dst,size} and store in reverse order at {src,size}. */
3572 #define MPN_BSWAP_REVERSE(dst, src, size) \
3573 do { \
3574 mp_ptr __dst = (dst); \
3575 mp_size_t __size = (size); \
3576 mp_srcptr __src = (src) + __size - 1; \
3577 mp_size_t __i; \
3578 ASSERT ((size) >= 0); \
3579 ASSERT (! MPN_OVERLAP_P (dst, size, src, size)); \
3580 CRAY_Pragma ("_CRI ivdep"); \
3581 for (__i = 0; __i < __size; __i++) \
3583 BSWAP_LIMB_FETCH (*__dst, __src); \
3584 __dst++; \
3585 __src--; \
3587 } while (0)
3590 /* No processor claiming to be SPARC v9 compliant seems to
3591 implement the POPC instruction. Disable pattern for now. */
3592 #if 0
3593 #if defined __GNUC__ && defined __sparc_v9__ && GMP_LIMB_BITS == 64
3594 #define popc_limb(result, input) \
3595 do { \
3596 DItype __res; \
3597 __asm__ ("popc %1,%0" : "=r" (result) : "rI" (input)); \
3598 } while (0)
3599 #endif
3600 #endif
3602 #if defined (__GNUC__) && ! defined (NO_ASM) && HAVE_HOST_CPU_alpha_CIX
3603 #define popc_limb(result, input) \
3604 do { \
3605 __asm__ ("ctpop %1, %0" : "=r" (result) : "r" (input)); \
3606 } while (0)
3607 #endif
3609 /* Cray intrinsic. */
3610 #ifdef _CRAY
3611 #define popc_limb(result, input) \
3612 do { \
3613 (result) = _popcnt (input); \
3614 } while (0)
3615 #endif
3617 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3618 && ! defined (NO_ASM) && defined (__ia64) && GMP_LIMB_BITS == 64
3619 #define popc_limb(result, input) \
3620 do { \
3621 __asm__ ("popcnt %0 = %1" : "=r" (result) : "r" (input)); \
3622 } while (0)
3623 #endif
3625 /* Cool population count of an mp_limb_t.
3626 You have to figure out how this works, We won't tell you!
3628 The constants could also be expressed as:
3629 0x55... = [2^N / 3] = [(2^N-1)/3]
3630 0x33... = [2^N / 5] = [(2^N-1)/5]
3631 0x0f... = [2^N / 17] = [(2^N-1)/17]
3632 (N is GMP_LIMB_BITS, [] denotes truncation.) */
3634 #if ! defined (popc_limb) && GMP_LIMB_BITS == 8
3635 #define popc_limb(result, input) \
3636 do { \
3637 mp_limb_t __x = (input); \
3638 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3639 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3640 __x = ((__x >> 4) + __x); \
3641 (result) = __x & 0x0f; \
3642 } while (0)
3643 #endif
3645 #if ! defined (popc_limb) && GMP_LIMB_BITS == 16
3646 #define popc_limb(result, input) \
3647 do { \
3648 mp_limb_t __x = (input); \
3649 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3650 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3651 __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17; \
3652 __x = ((__x >> 8) + __x); \
3653 (result) = __x & 0xff; \
3654 } while (0)
3655 #endif
3657 #if ! defined (popc_limb) && GMP_LIMB_BITS == 32
3658 #define popc_limb(result, input) \
3659 do { \
3660 mp_limb_t __x = (input); \
3661 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3662 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3663 __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17; \
3664 __x = ((__x >> 8) + __x); \
3665 __x = ((__x >> 16) + __x); \
3666 (result) = __x & 0xff; \
3667 } while (0)
3668 #endif
3670 #if ! defined (popc_limb) && GMP_LIMB_BITS == 64
3671 #define popc_limb(result, input) \
3672 do { \
3673 mp_limb_t __x = (input); \
3674 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3675 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3676 __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17; \
3677 __x = ((__x >> 8) + __x); \
3678 __x = ((__x >> 16) + __x); \
3679 __x = ((__x >> 32) + __x); \
3680 (result) = __x & 0xff; \
3681 } while (0)
3682 #endif
3685 /* Define stuff for longlong.h. */
3686 #if HAVE_ATTRIBUTE_MODE
3687 typedef unsigned int UQItype __attribute__ ((mode (QI)));
3688 typedef int SItype __attribute__ ((mode (SI)));
3689 typedef unsigned int USItype __attribute__ ((mode (SI)));
3690 typedef int DItype __attribute__ ((mode (DI)));
3691 typedef unsigned int UDItype __attribute__ ((mode (DI)));
3692 #else
3693 typedef unsigned char UQItype;
3694 typedef long SItype;
3695 typedef unsigned long USItype;
3696 #if HAVE_LONG_LONG
3697 typedef long long int DItype;
3698 typedef unsigned long long int UDItype;
3699 #else /* Assume `long' gives us a wide enough type. Needed for hppa2.0w. */
3700 typedef long int DItype;
3701 typedef unsigned long int UDItype;
3702 #endif
3703 #endif
3705 typedef mp_limb_t UWtype;
3706 typedef unsigned int UHWtype;
3707 #define W_TYPE_SIZE GMP_LIMB_BITS
3709 /* Define ieee_double_extract and _GMP_IEEE_FLOATS.
3711 Bit field packing is "implementation defined" according to C99, which
3712 leaves us at the compiler's mercy here. For some systems packing is
3713 defined in the ABI (eg. x86). In any case so far it seems universal that
3714 little endian systems pack from low to high, and big endian from high to
3715 low within the given type.
3717 Within the fields we rely on the integer endianness being the same as the
3718 float endianness, this is true everywhere we know of and it'd be a fairly
3719 strange system that did anything else. */
3721 #if HAVE_DOUBLE_IEEE_LITTLE_SWAPPED
3722 #define _GMP_IEEE_FLOATS 1
3723 union ieee_double_extract
3725 struct
3727 gmp_uint_least32_t manh:20;
3728 gmp_uint_least32_t exp:11;
3729 gmp_uint_least32_t sig:1;
3730 gmp_uint_least32_t manl:32;
3731 } s;
3732 double d;
3734 #endif
3736 #if HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
3737 #define _GMP_IEEE_FLOATS 1
3738 union ieee_double_extract
3740 struct
3742 gmp_uint_least32_t manl:32;
3743 gmp_uint_least32_t manh:20;
3744 gmp_uint_least32_t exp:11;
3745 gmp_uint_least32_t sig:1;
3746 } s;
3747 double d;
3749 #endif
3751 #if HAVE_DOUBLE_IEEE_BIG_ENDIAN
3752 #define _GMP_IEEE_FLOATS 1
3753 union ieee_double_extract
3755 struct
3757 gmp_uint_least32_t sig:1;
3758 gmp_uint_least32_t exp:11;
3759 gmp_uint_least32_t manh:20;
3760 gmp_uint_least32_t manl:32;
3761 } s;
3762 double d;
3764 #endif
3766 #if HAVE_DOUBLE_VAX_D
3767 union double_extract
3769 struct
3771 gmp_uint_least32_t man3:7; /* highest 7 bits */
3772 gmp_uint_least32_t exp:8; /* excess-128 exponent */
3773 gmp_uint_least32_t sig:1;
3774 gmp_uint_least32_t man2:16;
3775 gmp_uint_least32_t man1:16;
3776 gmp_uint_least32_t man0:16; /* lowest 16 bits */
3777 } s;
3778 double d;
3780 #endif
3782 /* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
3783 that don't convert ulong->double correctly (eg. SunOS 4 native cc). */
3784 #define MP_BASE_AS_DOUBLE (4.0 * ((mp_limb_t) 1 << (GMP_NUMB_BITS - 2)))
3785 /* Maximum number of limbs it will take to store any `double'.
3786 We assume doubles have 53 mantissa bits. */
3787 #define LIMBS_PER_DOUBLE ((53 + GMP_NUMB_BITS - 2) / GMP_NUMB_BITS + 1)
3789 __GMP_DECLSPEC int __gmp_extract_double (mp_ptr, double);
3791 #define mpn_get_d __gmpn_get_d
3792 __GMP_DECLSPEC double mpn_get_d (mp_srcptr, mp_size_t, mp_size_t, long) __GMP_ATTRIBUTE_PURE;
3795 /* DOUBLE_NAN_INF_ACTION executes code a_nan if x is a NaN, or executes
3796 a_inf if x is an infinity. Both are considered unlikely values, for
3797 branch prediction. */
3799 #if _GMP_IEEE_FLOATS
3800 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf) \
3801 do { \
3802 union ieee_double_extract u; \
3803 u.d = (x); \
3804 if (UNLIKELY (u.s.exp == 0x7FF)) \
3806 if (u.s.manl == 0 && u.s.manh == 0) \
3807 { a_inf; } \
3808 else \
3809 { a_nan; } \
3811 } while (0)
3812 #endif
3814 #if HAVE_DOUBLE_VAX_D || HAVE_DOUBLE_VAX_G || HAVE_DOUBLE_CRAY_CFP
3815 /* no nans or infs in these formats */
3816 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf) \
3817 do { } while (0)
3818 #endif
3820 #ifndef DOUBLE_NAN_INF_ACTION
3821 /* Unknown format, try something generic.
3822 NaN should be "unordered", so x!=x.
3823 Inf should be bigger than DBL_MAX. */
3824 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf) \
3825 do { \
3827 if (UNLIKELY ((x) != (x))) \
3828 { a_nan; } \
3829 else if (UNLIKELY ((x) > DBL_MAX || (x) < -DBL_MAX)) \
3830 { a_inf; } \
3832 } while (0)
3833 #endif
3835 /* On m68k, x86 and amd64, gcc (and maybe other compilers) can hold doubles
3836 in the coprocessor, which means a bigger exponent range than normal, and
3837 depending on the rounding mode, a bigger mantissa than normal. (See
3838 "Disappointments" in the gcc manual.) FORCE_DOUBLE stores and fetches
3839 "d" through memory to force any rounding and overflows to occur.
3841 On amd64, and on x86s with SSE2, gcc (depending on options) uses the xmm
3842 registers, where there's no such extra precision and no need for the
3843 FORCE_DOUBLE. We don't bother to detect this since the present uses for
3844 FORCE_DOUBLE are only in test programs and default generic C code.
3846 Not quite sure that an "automatic volatile" will use memory, but it does
3847 in gcc. An asm("":"=m"(d):"0"(d)) can't be used to trick gcc, since
3848 apparently matching operands like "0" are only allowed on a register
3849 output. gcc 3.4 warns about this, though in fact it and past versions
3850 seem to put the operand through memory as hoped. */
3852 #if (HAVE_HOST_CPU_FAMILY_m68k || HAVE_HOST_CPU_FAMILY_x86 \
3853 || defined (__amd64__))
3854 #define FORCE_DOUBLE(d) \
3855 do { volatile double __gmp_force = (d); (d) = __gmp_force; } while (0)
3856 #else
3857 #define FORCE_DOUBLE(d) do { } while (0)
3858 #endif
3861 __GMP_DECLSPEC extern const unsigned char __gmp_digit_value_tab[];
3863 __GMP_DECLSPEC extern int __gmp_junk;
3864 __GMP_DECLSPEC extern const int __gmp_0;
3865 __GMP_DECLSPEC void __gmp_exception (int) ATTRIBUTE_NORETURN;
3866 __GMP_DECLSPEC void __gmp_divide_by_zero (void) ATTRIBUTE_NORETURN;
3867 __GMP_DECLSPEC void __gmp_sqrt_of_negative (void) ATTRIBUTE_NORETURN;
3868 __GMP_DECLSPEC void __gmp_invalid_operation (void) ATTRIBUTE_NORETURN;
3869 #define GMP_ERROR(code) __gmp_exception (code)
3870 #define DIVIDE_BY_ZERO __gmp_divide_by_zero ()
3871 #define SQRT_OF_NEGATIVE __gmp_sqrt_of_negative ()
3873 #if defined _LONG_LONG_LIMB
3874 #define CNST_LIMB(C) ((mp_limb_t) C##LL)
3875 #else /* not _LONG_LONG_LIMB */
3876 #define CNST_LIMB(C) ((mp_limb_t) C##L)
3877 #endif /* _LONG_LONG_LIMB */
3879 /* Stuff used by mpn/generic/perfsqr.c and mpz/prime_p.c */
3880 #if GMP_NUMB_BITS == 2
3881 #define PP 0x3 /* 3 */
3882 #define PP_FIRST_OMITTED 5
3883 #endif
3884 #if GMP_NUMB_BITS == 4
3885 #define PP 0xF /* 3 x 5 */
3886 #define PP_FIRST_OMITTED 7
3887 #endif
3888 #if GMP_NUMB_BITS == 8
3889 #define PP 0x69 /* 3 x 5 x 7 */
3890 #define PP_FIRST_OMITTED 11
3891 #endif
3892 #if GMP_NUMB_BITS == 16
3893 #define PP 0x3AA7 /* 3 x 5 x 7 x 11 x 13 */
3894 #define PP_FIRST_OMITTED 17
3895 #endif
3896 #if GMP_NUMB_BITS == 32
3897 #define PP 0xC0CFD797L /* 3 x 5 x 7 x 11 x ... x 29 */
3898 #define PP_INVERTED 0x53E5645CL
3899 #define PP_FIRST_OMITTED 31
3900 #endif
3901 #if GMP_NUMB_BITS == 64
3902 #define PP CNST_LIMB(0xE221F97C30E94E1D) /* 3 x 5 x 7 x 11 x ... x 53 */
3903 #define PP_INVERTED CNST_LIMB(0x21CFE6CFC938B36B)
3904 #define PP_FIRST_OMITTED 59
3905 #endif
3906 #ifndef PP_FIRST_OMITTED
3907 #define PP_FIRST_OMITTED 3
3908 #endif
3910 /* BIT1 means a result value in bit 1 (second least significant bit), with a
3911 zero bit representing +1 and a one bit representing -1. Bits other than
3912 bit 1 are garbage. These are meant to be kept in "int"s, and casts are
3913 used to ensure the expressions are "int"s even if a and/or b might be
3914 other types.
3916 JACOBI_TWOS_U_BIT1 and JACOBI_RECIP_UU_BIT1 are used in mpn_jacobi_base
3917 and their speed is important. Expressions are used rather than
3918 conditionals to accumulate sign changes, which effectively means XORs
3919 instead of conditional JUMPs. */
3921 /* (a/0), with a signed; is 1 if a=+/-1, 0 otherwise */
3922 #define JACOBI_S0(a) (((a) == 1) | ((a) == -1))
3924 /* (a/0), with a unsigned; is 1 if a=+/-1, 0 otherwise */
3925 #define JACOBI_U0(a) ((a) == 1)
3927 /* FIXME: JACOBI_LS0 and JACOBI_0LS are the same, so delete one and
3928 come up with a better name. */
3930 /* (a/0), with a given by low and size;
3931 is 1 if a=+/-1, 0 otherwise */
3932 #define JACOBI_LS0(alow,asize) \
3933 (((asize) == 1 || (asize) == -1) && (alow) == 1)
3935 /* (a/0), with a an mpz_t;
3936 fetch of low limb always valid, even if size is zero */
3937 #define JACOBI_Z0(a) JACOBI_LS0 (PTR(a)[0], SIZ(a))
3939 /* (0/b), with b unsigned; is 1 if b=1, 0 otherwise */
3940 #define JACOBI_0U(b) ((b) == 1)
3942 /* (0/b), with b unsigned; is 1 if b=+/-1, 0 otherwise */
3943 #define JACOBI_0S(b) ((b) == 1 || (b) == -1)
3945 /* (0/b), with b given by low and size; is 1 if b=+/-1, 0 otherwise */
3946 #define JACOBI_0LS(blow,bsize) \
3947 (((bsize) == 1 || (bsize) == -1) && (blow) == 1)
3949 /* Convert a bit1 to +1 or -1. */
3950 #define JACOBI_BIT1_TO_PN(result_bit1) \
3951 (1 - ((int) (result_bit1) & 2))
3953 /* (2/b), with b unsigned and odd;
3954 is (-1)^((b^2-1)/8) which is 1 if b==1,7mod8 or -1 if b==3,5mod8 and
3955 hence obtained from (b>>1)^b */
3956 #define JACOBI_TWO_U_BIT1(b) \
3957 ((int) (((b) >> 1) ^ (b)))
3959 /* (2/b)^twos, with b unsigned and odd */
3960 #define JACOBI_TWOS_U_BIT1(twos, b) \
3961 ((int) ((twos) << 1) & JACOBI_TWO_U_BIT1 (b))
3963 /* (2/b)^twos, with b unsigned and odd */
3964 #define JACOBI_TWOS_U(twos, b) \
3965 (JACOBI_BIT1_TO_PN (JACOBI_TWOS_U_BIT1 (twos, b)))
3967 /* (-1/b), with b odd (signed or unsigned);
3968 is (-1)^((b-1)/2) */
3969 #define JACOBI_N1B_BIT1(b) \
3970 ((int) (b))
3972 /* (a/b) effect due to sign of a: signed/unsigned, b odd;
3973 is (-1/b) if a<0, or +1 if a>=0 */
3974 #define JACOBI_ASGN_SU_BIT1(a, b) \
3975 ((((a) < 0) << 1) & JACOBI_N1B_BIT1(b))
3977 /* (a/b) effect due to sign of b: signed/signed;
3978 is -1 if a and b both negative, +1 otherwise */
3979 #define JACOBI_BSGN_SS_BIT1(a, b) \
3980 ((((a)<0) & ((b)<0)) << 1)
3982 /* (a/b) effect due to sign of b: signed/mpz;
3983 is -1 if a and b both negative, +1 otherwise */
3984 #define JACOBI_BSGN_SZ_BIT1(a, b) \
3985 JACOBI_BSGN_SS_BIT1 (a, SIZ(b))
3987 /* (a/b) effect due to sign of b: mpz/signed;
3988 is -1 if a and b both negative, +1 otherwise */
3989 #define JACOBI_BSGN_ZS_BIT1(a, b) \
3990 JACOBI_BSGN_SZ_BIT1 (b, a)
3992 /* (a/b) reciprocity to switch to (b/a), a,b both unsigned and odd;
3993 is (-1)^((a-1)*(b-1)/4), which means +1 if either a,b==1mod4, or -1 if
3994 both a,b==3mod4, achieved in bit 1 by a&b. No ASSERT()s about a,b odd
3995 because this is used in a couple of places with only bit 1 of a or b
3996 valid. */
3997 #define JACOBI_RECIP_UU_BIT1(a, b) \
3998 ((int) ((a) & (b)))
4000 /* Strip low zero limbs from {b_ptr,b_size} by incrementing b_ptr and
4001 decrementing b_size. b_low should be b_ptr[0] on entry, and will be
4002 updated for the new b_ptr. result_bit1 is updated according to the
4003 factors of 2 stripped, as per (a/2). */
4004 #define JACOBI_STRIP_LOW_ZEROS(result_bit1, a, b_ptr, b_size, b_low) \
4005 do { \
4006 ASSERT ((b_size) >= 1); \
4007 ASSERT ((b_low) == (b_ptr)[0]); \
4009 while (UNLIKELY ((b_low) == 0)) \
4011 (b_size)--; \
4012 ASSERT ((b_size) >= 1); \
4013 (b_ptr)++; \
4014 (b_low) = *(b_ptr); \
4016 ASSERT (((a) & 1) != 0); \
4017 if ((GMP_NUMB_BITS % 2) == 1) \
4018 (result_bit1) ^= JACOBI_TWO_U_BIT1(a); \
4020 } while (0)
4022 /* Set a_rem to {a_ptr,a_size} reduced modulo b, either using mod_1 or
4023 modexact_1_odd, but in either case leaving a_rem<b. b must be odd and
4024 unsigned. modexact_1_odd effectively calculates -a mod b, and
4025 result_bit1 is adjusted for the factor of -1.
4027 The way mpn_modexact_1_odd sometimes bases its remainder on a_size and
4028 sometimes on a_size-1 means if GMP_NUMB_BITS is odd we can't know what
4029 factor to introduce into result_bit1, so for that case use mpn_mod_1
4030 unconditionally.
4032 FIXME: mpn_modexact_1_odd is more efficient, so some way to get it used
4033 for odd GMP_NUMB_BITS would be good. Perhaps it could mung its result,
4034 or not skip a divide step, or something. */
4036 #define JACOBI_MOD_OR_MODEXACT_1_ODD(result_bit1, a_rem, a_ptr, a_size, b) \
4037 do { \
4038 mp_srcptr __a_ptr = (a_ptr); \
4039 mp_size_t __a_size = (a_size); \
4040 mp_limb_t __b = (b); \
4042 ASSERT (__a_size >= 1); \
4043 ASSERT (__b & 1); \
4045 if ((GMP_NUMB_BITS % 2) != 0 \
4046 || ABOVE_THRESHOLD (__a_size, BMOD_1_TO_MOD_1_THRESHOLD)) \
4048 (a_rem) = mpn_mod_1 (__a_ptr, __a_size, __b); \
4050 else \
4052 (result_bit1) ^= JACOBI_N1B_BIT1 (__b); \
4053 (a_rem) = mpn_modexact_1_odd (__a_ptr, __a_size, __b); \
4055 } while (0)
4057 /* State for the Jacobi computation using Lehmer. */
4058 #define jacobi_table __gmp_jacobi_table
4059 __GMP_DECLSPEC extern const unsigned char jacobi_table[208];
4061 /* Bit layout for the initial state. b must be odd.
4063 3 2 1 0
4064 +--+--+--+--+
4065 |a1|a0|b1| s|
4066 +--+--+--+--+
4069 static inline unsigned
4070 mpn_jacobi_init (unsigned a, unsigned b, unsigned s)
4072 ASSERT (b & 1);
4073 ASSERT (s <= 1);
4074 return ((a & 3) << 2) + (b & 2) + s;
4077 static inline int
4078 mpn_jacobi_finish (unsigned bits)
4080 /* (a, b) = (1,0) or (0,1) */
4081 ASSERT ( (bits & 14) == 0);
4083 return 1-2*(bits & 1);
4086 static inline unsigned
4087 mpn_jacobi_update (unsigned bits, unsigned denominator, unsigned q)
4089 /* FIXME: Could halve table size by not including the e bit in the
4090 * index, and instead xor when updating. Then the lookup would be
4091 * like
4093 * bits ^= table[((bits & 30) << 2) + (denominator << 2) + q];
4096 ASSERT (bits < 26);
4097 ASSERT (denominator < 2);
4098 ASSERT (q < 4);
4100 /* For almost all calls, denominator is constant and quite often q
4101 is constant too. So use addition rather than or, so the compiler
4102 can put the constant part can into the offset of an indexed
4103 addressing instruction.
4105 With constant denominator, the below table lookup is compiled to
4107 C Constant q = 1, constant denominator = 1
4108 movzbl table+5(%eax,8), %eax
4112 C q in %edx, constant denominator = 1
4113 movzbl table+4(%edx,%eax,8), %eax
4115 One could maintain the state preshifted 3 bits, to save a shift
4116 here, but at least on x86, that's no real saving.
4118 return bits = jacobi_table[(bits << 3) + (denominator << 2) + q];
4121 /* Matrix multiplication */
4122 #define mpn_matrix22_mul __MPN(matrix22_mul)
4123 __GMP_DECLSPEC void mpn_matrix22_mul (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
4124 #define mpn_matrix22_mul_strassen __MPN(matrix22_mul_strassen)
4125 __GMP_DECLSPEC void mpn_matrix22_mul_strassen (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
4126 #define mpn_matrix22_mul_itch __MPN(matrix22_mul_itch)
4127 __GMP_DECLSPEC mp_size_t mpn_matrix22_mul_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
4129 #ifndef MATRIX22_STRASSEN_THRESHOLD
4130 #define MATRIX22_STRASSEN_THRESHOLD 30
4131 #endif
4133 /* HGCD definitions */
4135 /* Extract one numb, shifting count bits left
4136 ________ ________
4137 |___xh___||___xl___|
4138 |____r____|
4139 >count <
4141 The count includes any nail bits, so it should work fine if count
4142 is computed using count_leading_zeros. If GMP_NAIL_BITS > 0, all of
4143 xh, xl and r include nail bits. Must have 0 < count < GMP_LIMB_BITS.
4145 FIXME: Omit masking with GMP_NUMB_MASK, and let callers do that for
4146 those calls where the count high bits of xh may be non-zero.
4149 #define MPN_EXTRACT_NUMB(count, xh, xl) \
4150 ((((xh) << ((count) - GMP_NAIL_BITS)) & GMP_NUMB_MASK) | \
4151 ((xl) >> (GMP_LIMB_BITS - (count))))
4154 /* The matrix non-negative M = (u, u'; v,v') keeps track of the
4155 reduction (a;b) = M (alpha; beta) where alpha, beta are smaller
4156 than a, b. The determinant must always be one, so that M has an
4157 inverse (v', -u'; -v, u). Elements always fit in GMP_NUMB_BITS - 1
4158 bits. */
4159 struct hgcd_matrix1
4161 mp_limb_t u[2][2];
4164 #define mpn_hgcd2 __MPN (hgcd2)
4165 __GMP_DECLSPEC int mpn_hgcd2 (mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t, struct hgcd_matrix1 *);
4167 #define mpn_hgcd_mul_matrix1_vector __MPN (hgcd_mul_matrix1_vector)
4168 __GMP_DECLSPEC mp_size_t mpn_hgcd_mul_matrix1_vector (const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t);
4170 #define mpn_matrix22_mul1_inverse_vector __MPN (matrix22_mul1_inverse_vector)
4171 __GMP_DECLSPEC mp_size_t mpn_matrix22_mul1_inverse_vector (const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t);
4173 #define mpn_hgcd2_jacobi __MPN (hgcd2_jacobi)
4174 __GMP_DECLSPEC int mpn_hgcd2_jacobi (mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t, struct hgcd_matrix1 *, unsigned *);
4176 struct hgcd_matrix
4178 mp_size_t alloc; /* for sanity checking only */
4179 mp_size_t n;
4180 mp_ptr p[2][2];
4183 #define MPN_HGCD_MATRIX_INIT_ITCH(n) (4 * ((n+1)/2 + 1))
4185 #define mpn_hgcd_matrix_init __MPN (hgcd_matrix_init)
4186 __GMP_DECLSPEC void mpn_hgcd_matrix_init (struct hgcd_matrix *, mp_size_t, mp_ptr);
4188 #define mpn_hgcd_matrix_update_q __MPN (hgcd_matrix_update_q)
4189 __GMP_DECLSPEC void mpn_hgcd_matrix_update_q (struct hgcd_matrix *, mp_srcptr, mp_size_t, unsigned, mp_ptr);
4191 #define mpn_hgcd_matrix_mul_1 __MPN (hgcd_matrix_mul_1)
4192 __GMP_DECLSPEC void mpn_hgcd_matrix_mul_1 (struct hgcd_matrix *, const struct hgcd_matrix1 *, mp_ptr);
4194 #define mpn_hgcd_matrix_mul __MPN (hgcd_matrix_mul)
4195 __GMP_DECLSPEC void mpn_hgcd_matrix_mul (struct hgcd_matrix *, const struct hgcd_matrix *, mp_ptr);
4197 #define mpn_hgcd_matrix_adjust __MPN (hgcd_matrix_adjust)
4198 __GMP_DECLSPEC mp_size_t mpn_hgcd_matrix_adjust (const struct hgcd_matrix *, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
4200 #define mpn_hgcd_step __MPN(hgcd_step)
4201 __GMP_DECLSPEC mp_size_t mpn_hgcd_step (mp_size_t, mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
4203 #define mpn_hgcd_reduce __MPN(hgcd_reduce)
4204 __GMP_DECLSPEC mp_size_t mpn_hgcd_reduce (struct hgcd_matrix *, mp_ptr, mp_ptr, mp_size_t, mp_size_t, mp_ptr);
4206 #define mpn_hgcd_reduce_itch __MPN(hgcd_reduce_itch)
4207 __GMP_DECLSPEC mp_size_t mpn_hgcd_reduce_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
4209 #define mpn_hgcd_itch __MPN (hgcd_itch)
4210 __GMP_DECLSPEC mp_size_t mpn_hgcd_itch (mp_size_t) ATTRIBUTE_CONST;
4212 #define mpn_hgcd __MPN (hgcd)
4213 __GMP_DECLSPEC mp_size_t mpn_hgcd (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
4215 #define mpn_hgcd_appr_itch __MPN (hgcd_appr_itch)
4216 __GMP_DECLSPEC mp_size_t mpn_hgcd_appr_itch (mp_size_t) ATTRIBUTE_CONST;
4218 #define mpn_hgcd_appr __MPN (hgcd_appr)
4219 __GMP_DECLSPEC int mpn_hgcd_appr (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
4221 #define mpn_hgcd_jacobi __MPN (hgcd_jacobi)
4222 __GMP_DECLSPEC mp_size_t mpn_hgcd_jacobi (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, unsigned *, mp_ptr);
4224 typedef void gcd_subdiv_step_hook(void *, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int);
4226 /* Needs storage for the quotient */
4227 #define MPN_GCD_SUBDIV_STEP_ITCH(n) (n)
4229 #define mpn_gcd_subdiv_step __MPN(gcd_subdiv_step)
4230 __GMP_DECLSPEC mp_size_t mpn_gcd_subdiv_step (mp_ptr, mp_ptr, mp_size_t, mp_size_t, gcd_subdiv_step_hook *, void *, mp_ptr);
4232 struct gcdext_ctx
4234 /* Result parameters. */
4235 mp_ptr gp;
4236 mp_size_t gn;
4237 mp_ptr up;
4238 mp_size_t *usize;
4240 /* Cofactors updated in each step. */
4241 mp_size_t un;
4242 mp_ptr u0, u1, tp;
4245 #define mpn_gcdext_hook __MPN (gcdext_hook)
4246 gcd_subdiv_step_hook mpn_gcdext_hook;
4248 #define MPN_GCDEXT_LEHMER_N_ITCH(n) (4*(n) + 3)
4250 #define mpn_gcdext_lehmer_n __MPN(gcdext_lehmer_n)
4251 __GMP_DECLSPEC mp_size_t mpn_gcdext_lehmer_n (mp_ptr, mp_ptr, mp_size_t *, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
4253 /* 4*(an + 1) + 4*(bn + 1) + an */
4254 #define MPN_GCDEXT_LEHMER_ITCH(an, bn) (5*(an) + 4*(bn) + 8)
4256 #ifndef HGCD_THRESHOLD
4257 #define HGCD_THRESHOLD 400
4258 #endif
4260 #ifndef HGCD_APPR_THRESHOLD
4261 #define HGCD_APPR_THRESHOLD 400
4262 #endif
4264 #ifndef HGCD_REDUCE_THRESHOLD
4265 #define HGCD_REDUCE_THRESHOLD 1000
4266 #endif
4268 #ifndef GCD_DC_THRESHOLD
4269 #define GCD_DC_THRESHOLD 1000
4270 #endif
4272 #ifndef GCDEXT_DC_THRESHOLD
4273 #define GCDEXT_DC_THRESHOLD 600
4274 #endif
4276 /* Definitions for mpn_set_str and mpn_get_str */
4277 struct powers
4279 mp_ptr p; /* actual power value */
4280 mp_size_t n; /* # of limbs at p */
4281 mp_size_t shift; /* weight of lowest limb, in limb base B */
4282 size_t digits_in_base; /* number of corresponding digits */
4283 int base;
4285 typedef struct powers powers_t;
4286 #define mpn_dc_set_str_powtab_alloc(n) ((n) + GMP_LIMB_BITS)
4287 #define mpn_dc_set_str_itch(n) ((n) + GMP_LIMB_BITS)
4288 #define mpn_dc_get_str_powtab_alloc(n) ((n) + 2 * GMP_LIMB_BITS)
4289 #define mpn_dc_get_str_itch(n) ((n) + GMP_LIMB_BITS)
4291 #define mpn_dc_set_str __MPN(dc_set_str)
4292 __GMP_DECLSPEC mp_size_t mpn_dc_set_str (mp_ptr, const unsigned char *, size_t, const powers_t *, mp_ptr);
4293 #define mpn_bc_set_str __MPN(bc_set_str)
4294 __GMP_DECLSPEC mp_size_t mpn_bc_set_str (mp_ptr, const unsigned char *, size_t, int);
4295 #define mpn_set_str_compute_powtab __MPN(set_str_compute_powtab)
4296 __GMP_DECLSPEC void mpn_set_str_compute_powtab (powers_t *, mp_ptr, mp_size_t, int);
4299 /* __GMPF_BITS_TO_PREC applies a minimum 53 bits, rounds upwards to a whole
4300 limb and adds an extra limb. __GMPF_PREC_TO_BITS drops that extra limb,
4301 hence giving back the user's size in bits rounded up. Notice that
4302 converting prec->bits->prec gives an unchanged value. */
4303 #define __GMPF_BITS_TO_PREC(n) \
4304 ((mp_size_t) ((__GMP_MAX (53, n) + 2 * GMP_NUMB_BITS - 1) / GMP_NUMB_BITS))
4305 #define __GMPF_PREC_TO_BITS(n) \
4306 ((mp_bitcnt_t) (n) * GMP_NUMB_BITS - GMP_NUMB_BITS)
4308 __GMP_DECLSPEC extern mp_size_t __gmp_default_fp_limb_precision;
4310 /* Compute the number of base-b digits corresponding to nlimbs limbs, rounding
4311 down. */
4312 #define DIGITS_IN_BASE_PER_LIMB(res, nlimbs, b) \
4313 do { \
4314 mp_limb_t _ph, _pl; \
4315 umul_ppmm (_ph, _pl, \
4316 mp_bases[b].logb2, GMP_NUMB_BITS * (mp_limb_t) (nlimbs));\
4317 res = _ph; \
4318 } while (0)
4320 /* Compute the number of limbs corresponding to ndigits base-b digits, rounding
4321 up. */
4322 #define LIMBS_PER_DIGIT_IN_BASE(res, ndigits, b) \
4323 do { \
4324 mp_limb_t _ph, _dummy; \
4325 umul_ppmm (_ph, _dummy, mp_bases[b].log2b, (mp_limb_t) (ndigits)); \
4326 res = 8 * _ph / GMP_NUMB_BITS + 2; \
4327 } while (0)
4330 /* Set n to the number of significant digits an mpf of the given _mp_prec
4331 field, in the given base. This is a rounded up value, designed to ensure
4332 there's enough digits to reproduce all the guaranteed part of the value.
4334 There are prec many limbs, but the high might be only "1" so forget it
4335 and just count prec-1 limbs into chars. +1 rounds that upwards, and a
4336 further +1 is because the limbs usually won't fall on digit boundaries.
4338 FIXME: If base is a power of 2 and the bits per digit divides
4339 GMP_LIMB_BITS then the +2 is unnecessary. This happens always for
4340 base==2, and in base==16 with the current 32 or 64 bit limb sizes. */
4342 #define MPF_SIGNIFICANT_DIGITS(n, base, prec) \
4343 do { \
4344 size_t rawn; \
4345 ASSERT (base >= 2 && base < numberof (mp_bases)); \
4346 DIGITS_IN_BASE_PER_LIMB (rawn, (prec) - 1, base); \
4347 n = rawn + 2; \
4348 } while (0)
4351 /* Decimal point string, from the current C locale. Needs <langinfo.h> for
4352 nl_langinfo and constants, preferably with _GNU_SOURCE defined to get
4353 DECIMAL_POINT from glibc, and needs <locale.h> for localeconv, each under
4354 their respective #if HAVE_FOO_H.
4356 GLIBC recommends nl_langinfo because getting only one facet can be
4357 faster, apparently. */
4359 /* DECIMAL_POINT seems to need _GNU_SOURCE defined to get it from glibc. */
4360 #if HAVE_NL_LANGINFO && defined (DECIMAL_POINT)
4361 #define GMP_DECIMAL_POINT (nl_langinfo (DECIMAL_POINT))
4362 #endif
4363 /* RADIXCHAR is deprecated, still in unix98 or some such. */
4364 #if HAVE_NL_LANGINFO && defined (RADIXCHAR) && ! defined (GMP_DECIMAL_POINT)
4365 #define GMP_DECIMAL_POINT (nl_langinfo (RADIXCHAR))
4366 #endif
4367 /* localeconv is slower since it returns all locale stuff */
4368 #if HAVE_LOCALECONV && ! defined (GMP_DECIMAL_POINT)
4369 #define GMP_DECIMAL_POINT (localeconv()->decimal_point)
4370 #endif
4371 #if ! defined (GMP_DECIMAL_POINT)
4372 #define GMP_DECIMAL_POINT (".")
4373 #endif
4376 #define DOPRNT_CONV_FIXED 1
4377 #define DOPRNT_CONV_SCIENTIFIC 2
4378 #define DOPRNT_CONV_GENERAL 3
4380 #define DOPRNT_JUSTIFY_NONE 0
4381 #define DOPRNT_JUSTIFY_LEFT 1
4382 #define DOPRNT_JUSTIFY_RIGHT 2
4383 #define DOPRNT_JUSTIFY_INTERNAL 3
4385 #define DOPRNT_SHOWBASE_YES 1
4386 #define DOPRNT_SHOWBASE_NO 2
4387 #define DOPRNT_SHOWBASE_NONZERO 3
4389 struct doprnt_params_t {
4390 int base; /* negative for upper case */
4391 int conv; /* choices above */
4392 const char *expfmt; /* exponent format */
4393 int exptimes4; /* exponent multiply by 4 */
4394 char fill; /* character */
4395 int justify; /* choices above */
4396 int prec; /* prec field, or -1 for all digits */
4397 int showbase; /* choices above */
4398 int showpoint; /* if radix point always shown */
4399 int showtrailing; /* if trailing zeros wanted */
4400 char sign; /* '+', ' ', or '\0' */
4401 int width; /* width field */
4404 #if _GMP_H_HAVE_VA_LIST
4406 typedef int (*doprnt_format_t) (void *, const char *, va_list);
4407 typedef int (*doprnt_memory_t) (void *, const char *, size_t);
4408 typedef int (*doprnt_reps_t) (void *, int, int);
4409 typedef int (*doprnt_final_t) (void *);
4411 struct doprnt_funs_t {
4412 doprnt_format_t format;
4413 doprnt_memory_t memory;
4414 doprnt_reps_t reps;
4415 doprnt_final_t final; /* NULL if not required */
4418 extern const struct doprnt_funs_t __gmp_fprintf_funs;
4419 extern const struct doprnt_funs_t __gmp_sprintf_funs;
4420 extern const struct doprnt_funs_t __gmp_snprintf_funs;
4421 extern const struct doprnt_funs_t __gmp_obstack_printf_funs;
4422 extern const struct doprnt_funs_t __gmp_ostream_funs;
4424 /* "buf" is a __gmp_allocate_func block of "alloc" many bytes. The first
4425 "size" of these have been written. "alloc > size" is maintained, so
4426 there's room to store a '\0' at the end. "result" is where the
4427 application wants the final block pointer. */
4428 struct gmp_asprintf_t {
4429 char **result;
4430 char *buf;
4431 size_t size;
4432 size_t alloc;
4435 #define GMP_ASPRINTF_T_INIT(d, output) \
4436 do { \
4437 (d).result = (output); \
4438 (d).alloc = 256; \
4439 (d).buf = (char *) (*__gmp_allocate_func) ((d).alloc); \
4440 (d).size = 0; \
4441 } while (0)
4443 /* If a realloc is necessary, use twice the size actually required, so as to
4444 avoid repeated small reallocs. */
4445 #define GMP_ASPRINTF_T_NEED(d, n) \
4446 do { \
4447 size_t alloc, newsize, newalloc; \
4448 ASSERT ((d)->alloc >= (d)->size + 1); \
4450 alloc = (d)->alloc; \
4451 newsize = (d)->size + (n); \
4452 if (alloc <= newsize) \
4454 newalloc = 2*newsize; \
4455 (d)->alloc = newalloc; \
4456 (d)->buf = __GMP_REALLOCATE_FUNC_TYPE ((d)->buf, \
4457 alloc, newalloc, char); \
4459 } while (0)
4461 __GMP_DECLSPEC int __gmp_asprintf_memory (struct gmp_asprintf_t *, const char *, size_t);
4462 __GMP_DECLSPEC int __gmp_asprintf_reps (struct gmp_asprintf_t *, int, int);
4463 __GMP_DECLSPEC int __gmp_asprintf_final (struct gmp_asprintf_t *);
4465 /* buf is where to write the next output, and size is how much space is left
4466 there. If the application passed size==0 then that's what we'll have
4467 here, and nothing at all should be written. */
4468 struct gmp_snprintf_t {
4469 char *buf;
4470 size_t size;
4473 /* Add the bytes printed by the call to the total retval, or bail out on an
4474 error. */
4475 #define DOPRNT_ACCUMULATE(call) \
4476 do { \
4477 int __ret; \
4478 __ret = call; \
4479 if (__ret == -1) \
4480 goto error; \
4481 retval += __ret; \
4482 } while (0)
4483 #define DOPRNT_ACCUMULATE_FUN(fun, params) \
4484 do { \
4485 ASSERT ((fun) != NULL); \
4486 DOPRNT_ACCUMULATE ((*(fun)) params); \
4487 } while (0)
4489 #define DOPRNT_FORMAT(fmt, ap) \
4490 DOPRNT_ACCUMULATE_FUN (funs->format, (data, fmt, ap))
4491 #define DOPRNT_MEMORY(ptr, len) \
4492 DOPRNT_ACCUMULATE_FUN (funs->memory, (data, ptr, len))
4493 #define DOPRNT_REPS(c, n) \
4494 DOPRNT_ACCUMULATE_FUN (funs->reps, (data, c, n))
4496 #define DOPRNT_STRING(str) DOPRNT_MEMORY (str, strlen (str))
4498 #define DOPRNT_REPS_MAYBE(c, n) \
4499 do { \
4500 if ((n) != 0) \
4501 DOPRNT_REPS (c, n); \
4502 } while (0)
4503 #define DOPRNT_MEMORY_MAYBE(ptr, len) \
4504 do { \
4505 if ((len) != 0) \
4506 DOPRNT_MEMORY (ptr, len); \
4507 } while (0)
4509 __GMP_DECLSPEC int __gmp_doprnt (const struct doprnt_funs_t *, void *, const char *, va_list);
4510 __GMP_DECLSPEC int __gmp_doprnt_integer (const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *);
4512 #define __gmp_doprnt_mpf __gmp_doprnt_mpf2
4513 __GMP_DECLSPEC int __gmp_doprnt_mpf (const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *, mpf_srcptr);
4515 __GMP_DECLSPEC int __gmp_replacement_vsnprintf (char *, size_t, const char *, va_list);
4516 #endif /* _GMP_H_HAVE_VA_LIST */
4519 typedef int (*gmp_doscan_scan_t) (void *, const char *, ...);
4520 typedef void *(*gmp_doscan_step_t) (void *, int);
4521 typedef int (*gmp_doscan_get_t) (void *);
4522 typedef int (*gmp_doscan_unget_t) (int, void *);
4524 struct gmp_doscan_funs_t {
4525 gmp_doscan_scan_t scan;
4526 gmp_doscan_step_t step;
4527 gmp_doscan_get_t get;
4528 gmp_doscan_unget_t unget;
4530 extern const struct gmp_doscan_funs_t __gmp_fscanf_funs;
4531 extern const struct gmp_doscan_funs_t __gmp_sscanf_funs;
4533 #if _GMP_H_HAVE_VA_LIST
4534 __GMP_DECLSPEC int __gmp_doscan (const struct gmp_doscan_funs_t *, void *, const char *, va_list);
4535 #endif
4538 /* For testing and debugging. */
4539 #define MPZ_CHECK_FORMAT(z) \
4540 do { \
4541 ASSERT_ALWAYS (SIZ(z) == 0 || PTR(z)[ABSIZ(z) - 1] != 0); \
4542 ASSERT_ALWAYS (ALLOC(z) >= ABSIZ(z)); \
4543 ASSERT_ALWAYS_MPN (PTR(z), ABSIZ(z)); \
4544 } while (0)
4546 #define MPQ_CHECK_FORMAT(q) \
4547 do { \
4548 MPZ_CHECK_FORMAT (mpq_numref (q)); \
4549 MPZ_CHECK_FORMAT (mpq_denref (q)); \
4550 ASSERT_ALWAYS (SIZ(mpq_denref(q)) >= 1); \
4552 if (SIZ(mpq_numref(q)) == 0) \
4554 /* should have zero as 0/1 */ \
4555 ASSERT_ALWAYS (SIZ(mpq_denref(q)) == 1 \
4556 && PTR(mpq_denref(q))[0] == 1); \
4558 else \
4560 /* should have no common factors */ \
4561 mpz_t g; \
4562 mpz_init (g); \
4563 mpz_gcd (g, mpq_numref(q), mpq_denref(q)); \
4564 ASSERT_ALWAYS (mpz_cmp_ui (g, 1) == 0); \
4565 mpz_clear (g); \
4567 } while (0)
4569 #define MPF_CHECK_FORMAT(f) \
4570 do { \
4571 ASSERT_ALWAYS (PREC(f) >= __GMPF_BITS_TO_PREC(53)); \
4572 ASSERT_ALWAYS (ABSIZ(f) <= PREC(f)+1); \
4573 if (SIZ(f) == 0) \
4574 ASSERT_ALWAYS (EXP(f) == 0); \
4575 if (SIZ(f) != 0) \
4576 ASSERT_ALWAYS (PTR(f)[ABSIZ(f) - 1] != 0); \
4577 } while (0)
4580 /* Enhancement: The "mod" and "gcd_1" functions below could have
4581 __GMP_ATTRIBUTE_PURE, but currently (gcc 3.3) that's not supported on
4582 function pointers, only actual functions. It probably doesn't make much
4583 difference to the gmp code, since hopefully we arrange calls so there's
4584 no great need for the compiler to move things around. */
4586 #if WANT_FAT_BINARY && (HAVE_HOST_CPU_FAMILY_x86 || HAVE_HOST_CPU_FAMILY_x86_64)
4587 /* NOTE: The function pointers in this struct are also in CPUVEC_FUNCS_LIST
4588 in mpn/x86/x86-defs.m4 and mpn/x86_64/x86_64-defs.m4. Be sure to update
4589 those when changing here. */
4590 struct cpuvec_t {
4591 DECL_add_n ((*add_n));
4592 DECL_addlsh1_n ((*addlsh1_n));
4593 DECL_addlsh2_n ((*addlsh2_n));
4594 DECL_addmul_1 ((*addmul_1));
4595 DECL_addmul_2 ((*addmul_2));
4596 DECL_bdiv_dbm1c ((*bdiv_dbm1c));
4597 DECL_cnd_add_n ((*cnd_add_n));
4598 DECL_cnd_sub_n ((*cnd_sub_n));
4599 DECL_com ((*com));
4600 DECL_copyd ((*copyd));
4601 DECL_copyi ((*copyi));
4602 DECL_divexact_1 ((*divexact_1));
4603 DECL_divrem_1 ((*divrem_1));
4604 DECL_gcd_1 ((*gcd_1));
4605 DECL_lshift ((*lshift));
4606 DECL_lshiftc ((*lshiftc));
4607 DECL_mod_1 ((*mod_1));
4608 DECL_mod_1_1p ((*mod_1_1p));
4609 DECL_mod_1_1p_cps ((*mod_1_1p_cps));
4610 DECL_mod_1s_2p ((*mod_1s_2p));
4611 DECL_mod_1s_2p_cps ((*mod_1s_2p_cps));
4612 DECL_mod_1s_4p ((*mod_1s_4p));
4613 DECL_mod_1s_4p_cps ((*mod_1s_4p_cps));
4614 DECL_mod_34lsub1 ((*mod_34lsub1));
4615 DECL_modexact_1c_odd ((*modexact_1c_odd));
4616 DECL_mul_1 ((*mul_1));
4617 DECL_mul_basecase ((*mul_basecase));
4618 DECL_mullo_basecase ((*mullo_basecase));
4619 DECL_preinv_divrem_1 ((*preinv_divrem_1));
4620 DECL_preinv_mod_1 ((*preinv_mod_1));
4621 DECL_redc_1 ((*redc_1));
4622 DECL_redc_2 ((*redc_2));
4623 DECL_rshift ((*rshift));
4624 DECL_sqr_basecase ((*sqr_basecase));
4625 DECL_sub_n ((*sub_n));
4626 DECL_sublsh1_n ((*sublsh1_n));
4627 DECL_submul_1 ((*submul_1));
4628 mp_size_t mul_toom22_threshold;
4629 mp_size_t mul_toom33_threshold;
4630 mp_size_t sqr_toom2_threshold;
4631 mp_size_t sqr_toom3_threshold;
4632 mp_size_t bmod_1_to_mod_1_threshold;
4634 __GMP_DECLSPEC extern struct cpuvec_t __gmpn_cpuvec;
4635 __GMP_DECLSPEC extern int __gmpn_cpuvec_initialized;
4636 #endif /* x86 fat binary */
4638 __GMP_DECLSPEC void __gmpn_cpuvec_init (void);
4640 /* Get a threshold "field" from __gmpn_cpuvec, running __gmpn_cpuvec_init()
4641 if that hasn't yet been done (to establish the right values). */
4642 #define CPUVEC_THRESHOLD(field) \
4643 ((LIKELY (__gmpn_cpuvec_initialized) ? 0 : (__gmpn_cpuvec_init (), 0)), \
4644 __gmpn_cpuvec.field)
4647 #if HAVE_NATIVE_mpn_add_nc
4648 #define mpn_add_nc __MPN(add_nc)
4649 __GMP_DECLSPEC mp_limb_t mpn_add_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
4650 #else
4651 static inline
4652 mp_limb_t
4653 mpn_add_nc (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n, mp_limb_t ci)
4655 mp_limb_t co;
4656 co = mpn_add_n (rp, up, vp, n);
4657 co += mpn_add_1 (rp, rp, n, ci);
4658 return co;
4660 #endif
4662 #if HAVE_NATIVE_mpn_sub_nc
4663 #define mpn_sub_nc __MPN(sub_nc)
4664 __GMP_DECLSPEC mp_limb_t mpn_sub_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
4665 #else
4666 static inline mp_limb_t
4667 mpn_sub_nc (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n, mp_limb_t ci)
4669 mp_limb_t co;
4670 co = mpn_sub_n (rp, up, vp, n);
4671 co += mpn_sub_1 (rp, rp, n, ci);
4672 return co;
4674 #endif
4676 static inline int
4677 mpn_zero_p (mp_srcptr ap, mp_size_t n)
4679 while (--n >= 0)
4681 if (ap[n] != 0)
4682 return 0;
4684 return 1;
4687 #if TUNE_PROGRAM_BUILD
4688 /* Some extras wanted when recompiling some .c files for use by the tune
4689 program. Not part of a normal build.
4691 It's necessary to keep these thresholds as #defines (just to an
4692 identically named variable), since various defaults are established based
4693 on #ifdef in the .c files. For some this is not so (the defaults are
4694 instead established above), but all are done this way for consistency. */
4696 #undef MUL_TOOM22_THRESHOLD
4697 #define MUL_TOOM22_THRESHOLD mul_toom22_threshold
4698 extern mp_size_t mul_toom22_threshold;
4700 #undef MUL_TOOM33_THRESHOLD
4701 #define MUL_TOOM33_THRESHOLD mul_toom33_threshold
4702 extern mp_size_t mul_toom33_threshold;
4704 #undef MUL_TOOM44_THRESHOLD
4705 #define MUL_TOOM44_THRESHOLD mul_toom44_threshold
4706 extern mp_size_t mul_toom44_threshold;
4708 #undef MUL_TOOM6H_THRESHOLD
4709 #define MUL_TOOM6H_THRESHOLD mul_toom6h_threshold
4710 extern mp_size_t mul_toom6h_threshold;
4712 #undef MUL_TOOM8H_THRESHOLD
4713 #define MUL_TOOM8H_THRESHOLD mul_toom8h_threshold
4714 extern mp_size_t mul_toom8h_threshold;
4716 #undef MUL_TOOM32_TO_TOOM43_THRESHOLD
4717 #define MUL_TOOM32_TO_TOOM43_THRESHOLD mul_toom32_to_toom43_threshold
4718 extern mp_size_t mul_toom32_to_toom43_threshold;
4720 #undef MUL_TOOM32_TO_TOOM53_THRESHOLD
4721 #define MUL_TOOM32_TO_TOOM53_THRESHOLD mul_toom32_to_toom53_threshold
4722 extern mp_size_t mul_toom32_to_toom53_threshold;
4724 #undef MUL_TOOM42_TO_TOOM53_THRESHOLD
4725 #define MUL_TOOM42_TO_TOOM53_THRESHOLD mul_toom42_to_toom53_threshold
4726 extern mp_size_t mul_toom42_to_toom53_threshold;
4728 #undef MUL_TOOM42_TO_TOOM63_THRESHOLD
4729 #define MUL_TOOM42_TO_TOOM63_THRESHOLD mul_toom42_to_toom63_threshold
4730 extern mp_size_t mul_toom42_to_toom63_threshold;
4732 #undef MUL_TOOM43_TO_TOOM54_THRESHOLD
4733 #define MUL_TOOM43_TO_TOOM54_THRESHOLD mul_toom43_to_toom54_threshold;
4734 extern mp_size_t mul_toom43_to_toom54_threshold;
4736 #undef MUL_FFT_THRESHOLD
4737 #define MUL_FFT_THRESHOLD mul_fft_threshold
4738 extern mp_size_t mul_fft_threshold;
4740 #undef MUL_FFT_MODF_THRESHOLD
4741 #define MUL_FFT_MODF_THRESHOLD mul_fft_modf_threshold
4742 extern mp_size_t mul_fft_modf_threshold;
4744 #undef MUL_FFT_TABLE
4745 #define MUL_FFT_TABLE { 0 }
4747 #undef MUL_FFT_TABLE3
4748 #define MUL_FFT_TABLE3 { {0,0} }
4750 /* A native mpn_sqr_basecase is not tuned and SQR_BASECASE_THRESHOLD should
4751 remain as zero (always use it). */
4752 #if ! HAVE_NATIVE_mpn_sqr_basecase
4753 #undef SQR_BASECASE_THRESHOLD
4754 #define SQR_BASECASE_THRESHOLD sqr_basecase_threshold
4755 extern mp_size_t sqr_basecase_threshold;
4756 #endif
4758 #if TUNE_PROGRAM_BUILD_SQR
4759 #undef SQR_TOOM2_THRESHOLD
4760 #define SQR_TOOM2_THRESHOLD SQR_TOOM2_MAX_GENERIC
4761 #else
4762 #undef SQR_TOOM2_THRESHOLD
4763 #define SQR_TOOM2_THRESHOLD sqr_toom2_threshold
4764 extern mp_size_t sqr_toom2_threshold;
4765 #endif
4767 #undef SQR_TOOM3_THRESHOLD
4768 #define SQR_TOOM3_THRESHOLD sqr_toom3_threshold
4769 extern mp_size_t sqr_toom3_threshold;
4771 #undef SQR_TOOM4_THRESHOLD
4772 #define SQR_TOOM4_THRESHOLD sqr_toom4_threshold
4773 extern mp_size_t sqr_toom4_threshold;
4775 #undef SQR_TOOM6_THRESHOLD
4776 #define SQR_TOOM6_THRESHOLD sqr_toom6_threshold
4777 extern mp_size_t sqr_toom6_threshold;
4779 #undef SQR_TOOM8_THRESHOLD
4780 #define SQR_TOOM8_THRESHOLD sqr_toom8_threshold
4781 extern mp_size_t sqr_toom8_threshold;
4783 #undef SQR_FFT_THRESHOLD
4784 #define SQR_FFT_THRESHOLD sqr_fft_threshold
4785 extern mp_size_t sqr_fft_threshold;
4787 #undef SQR_FFT_MODF_THRESHOLD
4788 #define SQR_FFT_MODF_THRESHOLD sqr_fft_modf_threshold
4789 extern mp_size_t sqr_fft_modf_threshold;
4791 #undef SQR_FFT_TABLE
4792 #define SQR_FFT_TABLE { 0 }
4794 #undef SQR_FFT_TABLE3
4795 #define SQR_FFT_TABLE3 { {0,0} }
4797 #undef MULLO_BASECASE_THRESHOLD
4798 #define MULLO_BASECASE_THRESHOLD mullo_basecase_threshold
4799 extern mp_size_t mullo_basecase_threshold;
4801 #undef MULLO_DC_THRESHOLD
4802 #define MULLO_DC_THRESHOLD mullo_dc_threshold
4803 extern mp_size_t mullo_dc_threshold;
4805 #undef MULLO_MUL_N_THRESHOLD
4806 #define MULLO_MUL_N_THRESHOLD mullo_mul_n_threshold
4807 extern mp_size_t mullo_mul_n_threshold;
4809 #undef MULMID_TOOM42_THRESHOLD
4810 #define MULMID_TOOM42_THRESHOLD mulmid_toom42_threshold
4811 extern mp_size_t mulmid_toom42_threshold;
4813 #undef DIV_QR_2_PI2_THRESHOLD
4814 #define DIV_QR_2_PI2_THRESHOLD div_qr_2_pi2_threshold
4815 extern mp_size_t div_qr_2_pi2_threshold;
4817 #undef DC_DIV_QR_THRESHOLD
4818 #define DC_DIV_QR_THRESHOLD dc_div_qr_threshold
4819 extern mp_size_t dc_div_qr_threshold;
4821 #undef DC_DIVAPPR_Q_THRESHOLD
4822 #define DC_DIVAPPR_Q_THRESHOLD dc_divappr_q_threshold
4823 extern mp_size_t dc_divappr_q_threshold;
4825 #undef DC_BDIV_Q_THRESHOLD
4826 #define DC_BDIV_Q_THRESHOLD dc_bdiv_q_threshold
4827 extern mp_size_t dc_bdiv_q_threshold;
4829 #undef DC_BDIV_QR_THRESHOLD
4830 #define DC_BDIV_QR_THRESHOLD dc_bdiv_qr_threshold
4831 extern mp_size_t dc_bdiv_qr_threshold;
4833 #undef MU_DIV_QR_THRESHOLD
4834 #define MU_DIV_QR_THRESHOLD mu_div_qr_threshold
4835 extern mp_size_t mu_div_qr_threshold;
4837 #undef MU_DIVAPPR_Q_THRESHOLD
4838 #define MU_DIVAPPR_Q_THRESHOLD mu_divappr_q_threshold
4839 extern mp_size_t mu_divappr_q_threshold;
4841 #undef MUPI_DIV_QR_THRESHOLD
4842 #define MUPI_DIV_QR_THRESHOLD mupi_div_qr_threshold
4843 extern mp_size_t mupi_div_qr_threshold;
4845 #undef MU_BDIV_QR_THRESHOLD
4846 #define MU_BDIV_QR_THRESHOLD mu_bdiv_qr_threshold
4847 extern mp_size_t mu_bdiv_qr_threshold;
4849 #undef MU_BDIV_Q_THRESHOLD
4850 #define MU_BDIV_Q_THRESHOLD mu_bdiv_q_threshold
4851 extern mp_size_t mu_bdiv_q_threshold;
4853 #undef INV_MULMOD_BNM1_THRESHOLD
4854 #define INV_MULMOD_BNM1_THRESHOLD inv_mulmod_bnm1_threshold
4855 extern mp_size_t inv_mulmod_bnm1_threshold;
4857 #undef INV_NEWTON_THRESHOLD
4858 #define INV_NEWTON_THRESHOLD inv_newton_threshold
4859 extern mp_size_t inv_newton_threshold;
4861 #undef INV_APPR_THRESHOLD
4862 #define INV_APPR_THRESHOLD inv_appr_threshold
4863 extern mp_size_t inv_appr_threshold;
4865 #undef BINV_NEWTON_THRESHOLD
4866 #define BINV_NEWTON_THRESHOLD binv_newton_threshold
4867 extern mp_size_t binv_newton_threshold;
4869 #undef REDC_1_TO_REDC_2_THRESHOLD
4870 #define REDC_1_TO_REDC_2_THRESHOLD redc_1_to_redc_2_threshold
4871 extern mp_size_t redc_1_to_redc_2_threshold;
4873 #undef REDC_2_TO_REDC_N_THRESHOLD
4874 #define REDC_2_TO_REDC_N_THRESHOLD redc_2_to_redc_n_threshold
4875 extern mp_size_t redc_2_to_redc_n_threshold;
4877 #undef REDC_1_TO_REDC_N_THRESHOLD
4878 #define REDC_1_TO_REDC_N_THRESHOLD redc_1_to_redc_n_threshold
4879 extern mp_size_t redc_1_to_redc_n_threshold;
4881 #undef MATRIX22_STRASSEN_THRESHOLD
4882 #define MATRIX22_STRASSEN_THRESHOLD matrix22_strassen_threshold
4883 extern mp_size_t matrix22_strassen_threshold;
4885 #undef HGCD_THRESHOLD
4886 #define HGCD_THRESHOLD hgcd_threshold
4887 extern mp_size_t hgcd_threshold;
4889 #undef HGCD_APPR_THRESHOLD
4890 #define HGCD_APPR_THRESHOLD hgcd_appr_threshold
4891 extern mp_size_t hgcd_appr_threshold;
4893 #undef HGCD_REDUCE_THRESHOLD
4894 #define HGCD_REDUCE_THRESHOLD hgcd_reduce_threshold
4895 extern mp_size_t hgcd_reduce_threshold;
4897 #undef GCD_DC_THRESHOLD
4898 #define GCD_DC_THRESHOLD gcd_dc_threshold
4899 extern mp_size_t gcd_dc_threshold;
4901 #undef GCDEXT_DC_THRESHOLD
4902 #define GCDEXT_DC_THRESHOLD gcdext_dc_threshold
4903 extern mp_size_t gcdext_dc_threshold;
4905 #undef DIV_QR_1N_PI1_METHOD
4906 #define DIV_QR_1N_PI1_METHOD div_qr_1n_pi1_method
4907 extern int div_qr_1n_pi1_method;
4909 #undef DIV_QR_1_NORM_THRESHOLD
4910 #define DIV_QR_1_NORM_THRESHOLD div_qr_1_norm_threshold
4911 extern mp_size_t div_qr_1_norm_threshold;
4913 #undef DIV_QR_1_UNNORM_THRESHOLD
4914 #define DIV_QR_1_UNNORM_THRESHOLD div_qr_1_unnorm_threshold
4915 extern mp_size_t div_qr_1_unnorm_threshold;
4917 #undef DIVREM_1_NORM_THRESHOLD
4918 #define DIVREM_1_NORM_THRESHOLD divrem_1_norm_threshold
4919 extern mp_size_t divrem_1_norm_threshold;
4921 #undef DIVREM_1_UNNORM_THRESHOLD
4922 #define DIVREM_1_UNNORM_THRESHOLD divrem_1_unnorm_threshold
4923 extern mp_size_t divrem_1_unnorm_threshold;
4925 #undef MOD_1_NORM_THRESHOLD
4926 #define MOD_1_NORM_THRESHOLD mod_1_norm_threshold
4927 extern mp_size_t mod_1_norm_threshold;
4929 #undef MOD_1_UNNORM_THRESHOLD
4930 #define MOD_1_UNNORM_THRESHOLD mod_1_unnorm_threshold
4931 extern mp_size_t mod_1_unnorm_threshold;
4933 #undef MOD_1_1P_METHOD
4934 #define MOD_1_1P_METHOD mod_1_1p_method
4935 extern int mod_1_1p_method;
4937 #undef MOD_1N_TO_MOD_1_1_THRESHOLD
4938 #define MOD_1N_TO_MOD_1_1_THRESHOLD mod_1n_to_mod_1_1_threshold
4939 extern mp_size_t mod_1n_to_mod_1_1_threshold;
4941 #undef MOD_1U_TO_MOD_1_1_THRESHOLD
4942 #define MOD_1U_TO_MOD_1_1_THRESHOLD mod_1u_to_mod_1_1_threshold
4943 extern mp_size_t mod_1u_to_mod_1_1_threshold;
4945 #undef MOD_1_1_TO_MOD_1_2_THRESHOLD
4946 #define MOD_1_1_TO_MOD_1_2_THRESHOLD mod_1_1_to_mod_1_2_threshold
4947 extern mp_size_t mod_1_1_to_mod_1_2_threshold;
4949 #undef MOD_1_2_TO_MOD_1_4_THRESHOLD
4950 #define MOD_1_2_TO_MOD_1_4_THRESHOLD mod_1_2_to_mod_1_4_threshold
4951 extern mp_size_t mod_1_2_to_mod_1_4_threshold;
4953 #undef PREINV_MOD_1_TO_MOD_1_THRESHOLD
4954 #define PREINV_MOD_1_TO_MOD_1_THRESHOLD preinv_mod_1_to_mod_1_threshold
4955 extern mp_size_t preinv_mod_1_to_mod_1_threshold;
4957 #if ! UDIV_PREINV_ALWAYS
4958 #undef DIVREM_2_THRESHOLD
4959 #define DIVREM_2_THRESHOLD divrem_2_threshold
4960 extern mp_size_t divrem_2_threshold;
4961 #endif
4963 #undef MULMOD_BNM1_THRESHOLD
4964 #define MULMOD_BNM1_THRESHOLD mulmod_bnm1_threshold
4965 extern mp_size_t mulmod_bnm1_threshold;
4967 #undef SQRMOD_BNM1_THRESHOLD
4968 #define SQRMOD_BNM1_THRESHOLD sqrmod_bnm1_threshold
4969 extern mp_size_t sqrmod_bnm1_threshold;
4971 #undef GET_STR_DC_THRESHOLD
4972 #define GET_STR_DC_THRESHOLD get_str_dc_threshold
4973 extern mp_size_t get_str_dc_threshold;
4975 #undef GET_STR_PRECOMPUTE_THRESHOLD
4976 #define GET_STR_PRECOMPUTE_THRESHOLD get_str_precompute_threshold
4977 extern mp_size_t get_str_precompute_threshold;
4979 #undef SET_STR_DC_THRESHOLD
4980 #define SET_STR_DC_THRESHOLD set_str_dc_threshold
4981 extern mp_size_t set_str_dc_threshold;
4983 #undef SET_STR_PRECOMPUTE_THRESHOLD
4984 #define SET_STR_PRECOMPUTE_THRESHOLD set_str_precompute_threshold
4985 extern mp_size_t set_str_precompute_threshold;
4987 #undef FAC_ODD_THRESHOLD
4988 #define FAC_ODD_THRESHOLD fac_odd_threshold
4989 extern mp_size_t fac_odd_threshold;
4991 #undef FAC_DSC_THRESHOLD
4992 #define FAC_DSC_THRESHOLD fac_dsc_threshold
4993 extern mp_size_t fac_dsc_threshold;
4995 #undef FFT_TABLE_ATTRS
4996 #define FFT_TABLE_ATTRS
4997 extern mp_size_t mpn_fft_table[2][MPN_FFT_TABLE_SIZE];
4998 #define FFT_TABLE3_SIZE 2000 /* generous space for tuning */
4999 extern struct fft_table_nk mpn_fft_table3[2][FFT_TABLE3_SIZE];
5001 /* Sizes the tune program tests up to, used in a couple of recompilations. */
5002 #undef MUL_TOOM22_THRESHOLD_LIMIT
5003 #undef MUL_TOOM33_THRESHOLD_LIMIT
5004 #undef MULLO_BASECASE_THRESHOLD_LIMIT
5005 #undef SQR_TOOM3_THRESHOLD_LIMIT
5006 #define SQR_TOOM2_MAX_GENERIC 200
5007 #define MUL_TOOM22_THRESHOLD_LIMIT 700
5008 #define MUL_TOOM33_THRESHOLD_LIMIT 700
5009 #define SQR_TOOM3_THRESHOLD_LIMIT 400
5010 #define MUL_TOOM44_THRESHOLD_LIMIT 1000
5011 #define SQR_TOOM4_THRESHOLD_LIMIT 1000
5012 #define MUL_TOOM6H_THRESHOLD_LIMIT 1100
5013 #define SQR_TOOM6_THRESHOLD_LIMIT 1100
5014 #define MUL_TOOM8H_THRESHOLD_LIMIT 1200
5015 #define SQR_TOOM8_THRESHOLD_LIMIT 1200
5016 #define MULLO_BASECASE_THRESHOLD_LIMIT 200
5017 #define GET_STR_THRESHOLD_LIMIT 150
5018 #define FAC_DSC_THRESHOLD_LIMIT 2048
5020 #endif /* TUNE_PROGRAM_BUILD */
5022 #if defined (__cplusplus)
5024 #endif
5026 /* FIXME: Make these itch functions less conservative. Also consider making
5027 them dependent on just 'an', and compute the allocation directly from 'an'
5028 instead of via n. */
5030 /* toom22/toom2: Scratch need is 2*(an + k), k is the recursion depth.
5031 k is ths smallest k such that
5032 ceil(an/2^k) < MUL_TOOM22_THRESHOLD.
5033 which implies that
5034 k = bitsize of floor ((an-1)/(MUL_TOOM22_THRESHOLD-1))
5035 = 1 + floor (log_2 (floor ((an-1)/(MUL_TOOM22_THRESHOLD-1))))
5037 #define mpn_toom22_mul_itch(an, bn) \
5038 (2 * ((an) + GMP_NUMB_BITS))
5039 #define mpn_toom2_sqr_itch(an) \
5040 (2 * ((an) + GMP_NUMB_BITS))
5042 /* toom33/toom3: Scratch need is 5an/2 + 10k, k is the recursion depth.
5043 We use 3an + C, so that we can use a smaller constant.
5045 #define mpn_toom33_mul_itch(an, bn) \
5046 (3 * (an) + GMP_NUMB_BITS)
5047 #define mpn_toom3_sqr_itch(an) \
5048 (3 * (an) + GMP_NUMB_BITS)
5050 /* toom33/toom3: Scratch need is 8an/3 + 13k, k is the recursion depth.
5051 We use 3an + C, so that we can use a smaller constant.
5053 #define mpn_toom44_mul_itch(an, bn) \
5054 (3 * (an) + GMP_NUMB_BITS)
5055 #define mpn_toom4_sqr_itch(an) \
5056 (3 * (an) + GMP_NUMB_BITS)
5058 #define mpn_toom6_sqr_itch(n) \
5059 (((n) - SQR_TOOM6_THRESHOLD)*2 + \
5060 MAX(SQR_TOOM6_THRESHOLD*2 + GMP_NUMB_BITS*6, \
5061 mpn_toom4_sqr_itch(SQR_TOOM6_THRESHOLD)))
5063 #define MUL_TOOM6H_MIN \
5064 ((MUL_TOOM6H_THRESHOLD > MUL_TOOM44_THRESHOLD) ? \
5065 MUL_TOOM6H_THRESHOLD : MUL_TOOM44_THRESHOLD)
5066 #define mpn_toom6_mul_n_itch(n) \
5067 (((n) - MUL_TOOM6H_MIN)*2 + \
5068 MAX(MUL_TOOM6H_MIN*2 + GMP_NUMB_BITS*6, \
5069 mpn_toom44_mul_itch(MUL_TOOM6H_MIN,MUL_TOOM6H_MIN)))
5071 static inline mp_size_t
5072 mpn_toom6h_mul_itch (mp_size_t an, mp_size_t bn) {
5073 mp_size_t estimatedN;
5074 estimatedN = (an + bn) / (size_t) 10 + 1;
5075 return mpn_toom6_mul_n_itch (estimatedN * 6);
5078 #define mpn_toom8_sqr_itch(n) \
5079 ((((n)*15)>>3) - ((SQR_TOOM8_THRESHOLD*15)>>3) + \
5080 MAX(((SQR_TOOM8_THRESHOLD*15)>>3) + GMP_NUMB_BITS*6, \
5081 mpn_toom6_sqr_itch(SQR_TOOM8_THRESHOLD)))
5083 #define MUL_TOOM8H_MIN \
5084 ((MUL_TOOM8H_THRESHOLD > MUL_TOOM6H_MIN) ? \
5085 MUL_TOOM8H_THRESHOLD : MUL_TOOM6H_MIN)
5086 #define mpn_toom8_mul_n_itch(n) \
5087 ((((n)*15)>>3) - ((MUL_TOOM8H_MIN*15)>>3) + \
5088 MAX(((MUL_TOOM8H_MIN*15)>>3) + GMP_NUMB_BITS*6, \
5089 mpn_toom6_mul_n_itch(MUL_TOOM8H_MIN)))
5091 static inline mp_size_t
5092 mpn_toom8h_mul_itch (mp_size_t an, mp_size_t bn) {
5093 mp_size_t estimatedN;
5094 estimatedN = (an + bn) / (size_t) 14 + 1;
5095 return mpn_toom8_mul_n_itch (estimatedN * 8);
5098 static inline mp_size_t
5099 mpn_toom32_mul_itch (mp_size_t an, mp_size_t bn)
5101 mp_size_t n = 1 + (2 * an >= 3 * bn ? (an - 1) / (size_t) 3 : (bn - 1) >> 1);
5102 mp_size_t itch = 2 * n + 1;
5104 return itch;
5107 static inline mp_size_t
5108 mpn_toom42_mul_itch (mp_size_t an, mp_size_t bn)
5110 mp_size_t n = an >= 2 * bn ? (an + 3) >> 2 : (bn + 1) >> 1;
5111 return 6 * n + 3;
5114 static inline mp_size_t
5115 mpn_toom43_mul_itch (mp_size_t an, mp_size_t bn)
5117 mp_size_t n = 1 + (3 * an >= 4 * bn ? (an - 1) >> 2 : (bn - 1) / (size_t) 3);
5119 return 6*n + 4;
5122 static inline mp_size_t
5123 mpn_toom52_mul_itch (mp_size_t an, mp_size_t bn)
5125 mp_size_t n = 1 + (2 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) >> 1);
5126 return 6*n + 4;
5129 static inline mp_size_t
5130 mpn_toom53_mul_itch (mp_size_t an, mp_size_t bn)
5132 mp_size_t n = 1 + (3 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) / (size_t) 3);
5133 return 10 * n + 10;
5136 static inline mp_size_t
5137 mpn_toom62_mul_itch (mp_size_t an, mp_size_t bn)
5139 mp_size_t n = 1 + (an >= 3 * bn ? (an - 1) / (size_t) 6 : (bn - 1) >> 1);
5140 return 10 * n + 10;
5143 static inline mp_size_t
5144 mpn_toom63_mul_itch (mp_size_t an, mp_size_t bn)
5146 mp_size_t n = 1 + (an >= 2 * bn ? (an - 1) / (size_t) 6 : (bn - 1) / (size_t) 3);
5147 return 9 * n + 3;
5150 static inline mp_size_t
5151 mpn_toom54_mul_itch (mp_size_t an, mp_size_t bn)
5153 mp_size_t n = 1 + (4 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) / (size_t) 4);
5154 return 9 * n + 3;
5157 /* let S(n) = space required for input size n,
5158 then S(n) = 3 floor(n/2) + 1 + S(floor(n/2)). */
5159 #define mpn_toom42_mulmid_itch(n) \
5160 (3 * (n) + GMP_NUMB_BITS)
5162 #if 0
5163 #define mpn_fft_mul mpn_mul_fft_full
5164 #else
5165 #define mpn_fft_mul mpn_nussbaumer_mul
5166 #endif
5168 #ifdef __cplusplus
5170 /* A little helper for a null-terminated __gmp_allocate_func string.
5171 The destructor ensures it's freed even if an exception is thrown.
5172 The len field is needed by the destructor, and can be used by anyone else
5173 to avoid a second strlen pass over the data.
5175 Since our input is a C string, using strlen is correct. Perhaps it'd be
5176 more C++-ish style to use std::char_traits<char>::length, but char_traits
5177 isn't available in gcc 2.95.4. */
5179 class gmp_allocated_string {
5180 public:
5181 char *str;
5182 size_t len;
5183 gmp_allocated_string(char *arg)
5185 str = arg;
5186 len = std::strlen (str);
5188 ~gmp_allocated_string()
5190 (*__gmp_free_func) (str, len+1);
5194 std::istream &__gmpz_operator_in_nowhite (std::istream &, mpz_ptr, char);
5195 int __gmp_istream_set_base (std::istream &, char &, bool &, bool &);
5196 void __gmp_istream_set_digits (std::string &, std::istream &, char &, bool &, int);
5197 void __gmp_doprnt_params_from_ios (struct doprnt_params_t *, std::ios &);
5198 std::ostream& __gmp_doprnt_integer_ostream (std::ostream &, struct doprnt_params_t *, char *);
5199 extern const struct doprnt_funs_t __gmp_asprintf_funs_noformat;
5201 #endif /* __cplusplus */
5203 #endif /* __GMP_IMPL_H__ */